xref: /openbmc/linux/drivers/s390/scsi/zfcp_fsf.c (revision 711aab1d)
1 /*
2  * zfcp device driver
3  *
4  * Implementation of FSF commands.
5  *
6  * Copyright IBM Corp. 2002, 2017
7  */
8 
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20 
21 struct kmem_cache *zfcp_fsf_qtcb_cache;
22 
23 static void zfcp_fsf_request_timeout_handler(unsigned long data)
24 {
25 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
26 	zfcp_qdio_siosl(adapter);
27 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
28 				"fsrth_1");
29 }
30 
31 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
32 				 unsigned long timeout)
33 {
34 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
35 	fsf_req->timer.data = (unsigned long) fsf_req->adapter;
36 	fsf_req->timer.expires = jiffies + timeout;
37 	add_timer(&fsf_req->timer);
38 }
39 
40 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
41 {
42 	BUG_ON(!fsf_req->erp_action);
43 	fsf_req->timer.function = zfcp_erp_timeout_handler;
44 	fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
45 	fsf_req->timer.expires = jiffies + 30 * HZ;
46 	add_timer(&fsf_req->timer);
47 }
48 
49 /* association between FSF command and FSF QTCB type */
50 static u32 fsf_qtcb_type[] = {
51 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
52 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
53 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
54 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
55 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
56 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
57 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
58 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
59 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
60 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
61 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
62 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
63 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
64 };
65 
66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
67 {
68 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
69 		"operational because of an unsupported FC class\n");
70 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
71 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
72 }
73 
74 /**
75  * zfcp_fsf_req_free - free memory used by fsf request
76  * @fsf_req: pointer to struct zfcp_fsf_req
77  */
78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
79 {
80 	if (likely(req->pool)) {
81 		if (likely(req->qtcb))
82 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
83 		mempool_free(req, req->pool);
84 		return;
85 	}
86 
87 	if (likely(req->qtcb))
88 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
89 	kfree(req);
90 }
91 
92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
93 {
94 	unsigned long flags;
95 	struct fsf_status_read_buffer *sr_buf = req->data;
96 	struct zfcp_adapter *adapter = req->adapter;
97 	struct zfcp_port *port;
98 	int d_id = ntoh24(sr_buf->d_id);
99 
100 	read_lock_irqsave(&adapter->port_list_lock, flags);
101 	list_for_each_entry(port, &adapter->port_list, list)
102 		if (port->d_id == d_id) {
103 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
104 			break;
105 		}
106 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
107 }
108 
109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
110 					 struct fsf_link_down_info *link_down)
111 {
112 	struct zfcp_adapter *adapter = req->adapter;
113 
114 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
115 		return;
116 
117 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
118 
119 	zfcp_scsi_schedule_rports_block(adapter);
120 
121 	if (!link_down)
122 		goto out;
123 
124 	switch (link_down->error_code) {
125 	case FSF_PSQ_LINK_NO_LIGHT:
126 		dev_warn(&req->adapter->ccw_device->dev,
127 			 "There is no light signal from the local "
128 			 "fibre channel cable\n");
129 		break;
130 	case FSF_PSQ_LINK_WRAP_PLUG:
131 		dev_warn(&req->adapter->ccw_device->dev,
132 			 "There is a wrap plug instead of a fibre "
133 			 "channel cable\n");
134 		break;
135 	case FSF_PSQ_LINK_NO_FCP:
136 		dev_warn(&req->adapter->ccw_device->dev,
137 			 "The adjacent fibre channel node does not "
138 			 "support FCP\n");
139 		break;
140 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
141 		dev_warn(&req->adapter->ccw_device->dev,
142 			 "The FCP device is suspended because of a "
143 			 "firmware update\n");
144 		break;
145 	case FSF_PSQ_LINK_INVALID_WWPN:
146 		dev_warn(&req->adapter->ccw_device->dev,
147 			 "The FCP device detected a WWPN that is "
148 			 "duplicate or not valid\n");
149 		break;
150 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
151 		dev_warn(&req->adapter->ccw_device->dev,
152 			 "The fibre channel fabric does not support NPIV\n");
153 		break;
154 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
155 		dev_warn(&req->adapter->ccw_device->dev,
156 			 "The FCP adapter cannot support more NPIV ports\n");
157 		break;
158 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
159 		dev_warn(&req->adapter->ccw_device->dev,
160 			 "The adjacent switch cannot support "
161 			 "more NPIV ports\n");
162 		break;
163 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
164 		dev_warn(&req->adapter->ccw_device->dev,
165 			 "The FCP adapter could not log in to the "
166 			 "fibre channel fabric\n");
167 		break;
168 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
169 		dev_warn(&req->adapter->ccw_device->dev,
170 			 "The WWPN assignment file on the FCP adapter "
171 			 "has been damaged\n");
172 		break;
173 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
174 		dev_warn(&req->adapter->ccw_device->dev,
175 			 "The mode table on the FCP adapter "
176 			 "has been damaged\n");
177 		break;
178 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
179 		dev_warn(&req->adapter->ccw_device->dev,
180 			 "All NPIV ports on the FCP adapter have "
181 			 "been assigned\n");
182 		break;
183 	default:
184 		dev_warn(&req->adapter->ccw_device->dev,
185 			 "The link between the FCP adapter and "
186 			 "the FC fabric is down\n");
187 	}
188 out:
189 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
190 }
191 
192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
193 {
194 	struct fsf_status_read_buffer *sr_buf = req->data;
195 	struct fsf_link_down_info *ldi =
196 		(struct fsf_link_down_info *) &sr_buf->payload;
197 
198 	switch (sr_buf->status_subtype) {
199 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
200 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
201 		zfcp_fsf_link_down_info_eval(req, ldi);
202 		break;
203 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
204 		zfcp_fsf_link_down_info_eval(req, NULL);
205 	}
206 }
207 
208 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
209 {
210 	struct zfcp_adapter *adapter = req->adapter;
211 	struct fsf_status_read_buffer *sr_buf = req->data;
212 
213 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
214 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
215 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
216 		zfcp_fsf_req_free(req);
217 		return;
218 	}
219 
220 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
221 
222 	switch (sr_buf->status_type) {
223 	case FSF_STATUS_READ_PORT_CLOSED:
224 		zfcp_fsf_status_read_port_closed(req);
225 		break;
226 	case FSF_STATUS_READ_INCOMING_ELS:
227 		zfcp_fc_incoming_els(req);
228 		break;
229 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
230 		break;
231 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
232 		dev_warn(&adapter->ccw_device->dev,
233 			 "The error threshold for checksum statistics "
234 			 "has been exceeded\n");
235 		zfcp_dbf_hba_bit_err("fssrh_3", req);
236 		break;
237 	case FSF_STATUS_READ_LINK_DOWN:
238 		zfcp_fsf_status_read_link_down(req);
239 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
240 		break;
241 	case FSF_STATUS_READ_LINK_UP:
242 		dev_info(&adapter->ccw_device->dev,
243 			 "The local link has been restored\n");
244 		/* All ports should be marked as ready to run again */
245 		zfcp_erp_set_adapter_status(adapter,
246 					    ZFCP_STATUS_COMMON_RUNNING);
247 		zfcp_erp_adapter_reopen(adapter,
248 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
249 					ZFCP_STATUS_COMMON_ERP_FAILED,
250 					"fssrh_2");
251 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
252 
253 		break;
254 	case FSF_STATUS_READ_NOTIFICATION_LOST:
255 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
256 			zfcp_fc_conditional_port_scan(adapter);
257 		break;
258 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
259 		adapter->adapter_features = sr_buf->payload.word[0];
260 		break;
261 	}
262 
263 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
264 	zfcp_fsf_req_free(req);
265 
266 	atomic_inc(&adapter->stat_miss);
267 	queue_work(adapter->work_queue, &adapter->stat_work);
268 }
269 
270 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
271 {
272 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
273 	case FSF_SQ_FCP_RSP_AVAILABLE:
274 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
275 	case FSF_SQ_NO_RETRY_POSSIBLE:
276 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
277 		return;
278 	case FSF_SQ_COMMAND_ABORTED:
279 		break;
280 	case FSF_SQ_NO_RECOM:
281 		dev_err(&req->adapter->ccw_device->dev,
282 			"The FCP adapter reported a problem "
283 			"that cannot be recovered\n");
284 		zfcp_qdio_siosl(req->adapter);
285 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
286 		break;
287 	}
288 	/* all non-return stats set FSFREQ_ERROR*/
289 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
290 }
291 
292 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
293 {
294 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
295 		return;
296 
297 	switch (req->qtcb->header.fsf_status) {
298 	case FSF_UNKNOWN_COMMAND:
299 		dev_err(&req->adapter->ccw_device->dev,
300 			"The FCP adapter does not recognize the command 0x%x\n",
301 			req->qtcb->header.fsf_command);
302 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
303 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
304 		break;
305 	case FSF_ADAPTER_STATUS_AVAILABLE:
306 		zfcp_fsf_fsfstatus_qual_eval(req);
307 		break;
308 	}
309 }
310 
311 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
312 {
313 	struct zfcp_adapter *adapter = req->adapter;
314 	struct fsf_qtcb *qtcb = req->qtcb;
315 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
316 
317 	zfcp_dbf_hba_fsf_response(req);
318 
319 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
320 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
321 		return;
322 	}
323 
324 	switch (qtcb->prefix.prot_status) {
325 	case FSF_PROT_GOOD:
326 	case FSF_PROT_FSF_STATUS_PRESENTED:
327 		return;
328 	case FSF_PROT_QTCB_VERSION_ERROR:
329 		dev_err(&adapter->ccw_device->dev,
330 			"QTCB version 0x%x not supported by FCP adapter "
331 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
332 			psq->word[0], psq->word[1]);
333 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
334 		break;
335 	case FSF_PROT_ERROR_STATE:
336 	case FSF_PROT_SEQ_NUMB_ERROR:
337 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
338 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
339 		break;
340 	case FSF_PROT_UNSUPP_QTCB_TYPE:
341 		dev_err(&adapter->ccw_device->dev,
342 			"The QTCB type is not supported by the FCP adapter\n");
343 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
344 		break;
345 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
346 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
347 				&adapter->status);
348 		break;
349 	case FSF_PROT_DUPLICATE_REQUEST_ID:
350 		dev_err(&adapter->ccw_device->dev,
351 			"0x%Lx is an ambiguous request identifier\n",
352 			(unsigned long long)qtcb->bottom.support.req_handle);
353 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
354 		break;
355 	case FSF_PROT_LINK_DOWN:
356 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
357 		/* go through reopen to flush pending requests */
358 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
359 		break;
360 	case FSF_PROT_REEST_QUEUE:
361 		/* All ports should be marked as ready to run again */
362 		zfcp_erp_set_adapter_status(adapter,
363 					    ZFCP_STATUS_COMMON_RUNNING);
364 		zfcp_erp_adapter_reopen(adapter,
365 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
366 					ZFCP_STATUS_COMMON_ERP_FAILED,
367 					"fspse_8");
368 		break;
369 	default:
370 		dev_err(&adapter->ccw_device->dev,
371 			"0x%x is not a valid transfer protocol status\n",
372 			qtcb->prefix.prot_status);
373 		zfcp_qdio_siosl(adapter);
374 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
375 	}
376 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
377 }
378 
379 /**
380  * zfcp_fsf_req_complete - process completion of a FSF request
381  * @fsf_req: The FSF request that has been completed.
382  *
383  * When a request has been completed either from the FCP adapter,
384  * or it has been dismissed due to a queue shutdown, this function
385  * is called to process the completion status and trigger further
386  * events related to the FSF request.
387  */
388 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
389 {
390 	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
391 		zfcp_fsf_status_read_handler(req);
392 		return;
393 	}
394 
395 	del_timer(&req->timer);
396 	zfcp_fsf_protstatus_eval(req);
397 	zfcp_fsf_fsfstatus_eval(req);
398 	req->handler(req);
399 
400 	if (req->erp_action)
401 		zfcp_erp_notify(req->erp_action, 0);
402 
403 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
404 		zfcp_fsf_req_free(req);
405 	else
406 		complete(&req->completion);
407 }
408 
409 /**
410  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
411  * @adapter: pointer to struct zfcp_adapter
412  *
413  * Never ever call this without shutting down the adapter first.
414  * Otherwise the adapter would continue using and corrupting s390 storage.
415  * Included BUG_ON() call to ensure this is done.
416  * ERP is supposed to be the only user of this function.
417  */
418 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
419 {
420 	struct zfcp_fsf_req *req, *tmp;
421 	LIST_HEAD(remove_queue);
422 
423 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
424 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
425 
426 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
427 		list_del(&req->list);
428 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
429 		zfcp_fsf_req_complete(req);
430 	}
431 }
432 
433 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
434 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
435 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
436 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
437 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
438 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
439 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
440 
441 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
442 {
443 	u32 fdmi_speed = 0;
444 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
445 		fdmi_speed |= FC_PORTSPEED_1GBIT;
446 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
447 		fdmi_speed |= FC_PORTSPEED_2GBIT;
448 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
449 		fdmi_speed |= FC_PORTSPEED_4GBIT;
450 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
451 		fdmi_speed |= FC_PORTSPEED_10GBIT;
452 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
453 		fdmi_speed |= FC_PORTSPEED_8GBIT;
454 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
455 		fdmi_speed |= FC_PORTSPEED_16GBIT;
456 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
457 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
458 	return fdmi_speed;
459 }
460 
461 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
462 {
463 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
464 	struct zfcp_adapter *adapter = req->adapter;
465 	struct Scsi_Host *shost = adapter->scsi_host;
466 	struct fc_els_flogi *nsp, *plogi;
467 
468 	/* adjust pointers for missing command code */
469 	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
470 					- sizeof(u32));
471 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
472 					- sizeof(u32));
473 
474 	if (req->data)
475 		memcpy(req->data, bottom, sizeof(*bottom));
476 
477 	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
478 	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
479 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
480 
481 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
482 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
483 					 (u16)FSF_STATUS_READS_RECOM);
484 
485 	if (fc_host_permanent_port_name(shost) == -1)
486 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
487 
488 	zfcp_scsi_set_prot(adapter);
489 
490 	/* no error return above here, otherwise must fix call chains */
491 	/* do not evaluate invalid fields */
492 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
493 		return 0;
494 
495 	fc_host_port_id(shost) = ntoh24(bottom->s_id);
496 	fc_host_speed(shost) =
497 		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
498 
499 	adapter->hydra_version = bottom->adapter_type;
500 
501 	switch (bottom->fc_topology) {
502 	case FSF_TOPO_P2P:
503 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
504 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
505 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
506 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
507 		break;
508 	case FSF_TOPO_FABRIC:
509 		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
510 			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
511 		else
512 			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
513 		break;
514 	case FSF_TOPO_AL:
515 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
516 		/* fall through */
517 	default:
518 		dev_err(&adapter->ccw_device->dev,
519 			"Unknown or unsupported arbitrated loop "
520 			"fibre channel topology detected\n");
521 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
522 		return -EIO;
523 	}
524 
525 	return 0;
526 }
527 
528 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
529 {
530 	struct zfcp_adapter *adapter = req->adapter;
531 	struct fsf_qtcb *qtcb = req->qtcb;
532 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
533 	struct Scsi_Host *shost = adapter->scsi_host;
534 
535 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
536 		return;
537 
538 	adapter->fsf_lic_version = bottom->lic_version;
539 	adapter->adapter_features = bottom->adapter_features;
540 	adapter->connection_features = bottom->connection_features;
541 	adapter->peer_wwpn = 0;
542 	adapter->peer_wwnn = 0;
543 	adapter->peer_d_id = 0;
544 
545 	switch (qtcb->header.fsf_status) {
546 	case FSF_GOOD:
547 		if (zfcp_fsf_exchange_config_evaluate(req))
548 			return;
549 
550 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
551 			dev_err(&adapter->ccw_device->dev,
552 				"FCP adapter maximum QTCB size (%d bytes) "
553 				"is too small\n",
554 				bottom->max_qtcb_size);
555 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
556 			return;
557 		}
558 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
559 				&adapter->status);
560 		break;
561 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
562 		fc_host_node_name(shost) = 0;
563 		fc_host_port_name(shost) = 0;
564 		fc_host_port_id(shost) = 0;
565 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
566 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
567 		adapter->hydra_version = 0;
568 
569 		/* avoids adapter shutdown to be able to recognize
570 		 * events such as LINK UP */
571 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
572 				&adapter->status);
573 		zfcp_fsf_link_down_info_eval(req,
574 			&qtcb->header.fsf_status_qual.link_down_info);
575 		if (zfcp_fsf_exchange_config_evaluate(req))
576 			return;
577 		break;
578 	default:
579 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
580 		return;
581 	}
582 
583 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
584 		adapter->hardware_version = bottom->hardware_version;
585 		memcpy(fc_host_serial_number(shost), bottom->serial_number,
586 		       min(FC_SERIAL_NUMBER_SIZE, 17));
587 		EBCASC(fc_host_serial_number(shost),
588 		       min(FC_SERIAL_NUMBER_SIZE, 17));
589 	}
590 
591 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
592 		dev_err(&adapter->ccw_device->dev,
593 			"The FCP adapter only supports newer "
594 			"control block versions\n");
595 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
596 		return;
597 	}
598 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
599 		dev_err(&adapter->ccw_device->dev,
600 			"The FCP adapter only supports older "
601 			"control block versions\n");
602 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
603 	}
604 }
605 
606 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
607 {
608 	struct zfcp_adapter *adapter = req->adapter;
609 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
610 	struct Scsi_Host *shost = adapter->scsi_host;
611 
612 	if (req->data)
613 		memcpy(req->data, bottom, sizeof(*bottom));
614 
615 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
616 		fc_host_permanent_port_name(shost) = bottom->wwpn;
617 	} else
618 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
619 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
620 	fc_host_supported_speeds(shost) =
621 		zfcp_fsf_convert_portspeed(bottom->supported_speed);
622 	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
623 	       FC_FC4_LIST_SIZE);
624 	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
625 	       FC_FC4_LIST_SIZE);
626 }
627 
628 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
629 {
630 	struct fsf_qtcb *qtcb = req->qtcb;
631 
632 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
633 		return;
634 
635 	switch (qtcb->header.fsf_status) {
636 	case FSF_GOOD:
637 		zfcp_fsf_exchange_port_evaluate(req);
638 		break;
639 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
640 		zfcp_fsf_exchange_port_evaluate(req);
641 		zfcp_fsf_link_down_info_eval(req,
642 			&qtcb->header.fsf_status_qual.link_down_info);
643 		break;
644 	}
645 }
646 
647 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
648 {
649 	struct zfcp_fsf_req *req;
650 
651 	if (likely(pool))
652 		req = mempool_alloc(pool, GFP_ATOMIC);
653 	else
654 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
655 
656 	if (unlikely(!req))
657 		return NULL;
658 
659 	memset(req, 0, sizeof(*req));
660 	req->pool = pool;
661 	return req;
662 }
663 
664 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
665 {
666 	struct fsf_qtcb *qtcb;
667 
668 	if (likely(pool))
669 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
670 	else
671 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
672 
673 	if (unlikely(!qtcb))
674 		return NULL;
675 
676 	memset(qtcb, 0, sizeof(*qtcb));
677 	return qtcb;
678 }
679 
680 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
681 						u32 fsf_cmd, u8 sbtype,
682 						mempool_t *pool)
683 {
684 	struct zfcp_adapter *adapter = qdio->adapter;
685 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
686 
687 	if (unlikely(!req))
688 		return ERR_PTR(-ENOMEM);
689 
690 	if (adapter->req_no == 0)
691 		adapter->req_no++;
692 
693 	INIT_LIST_HEAD(&req->list);
694 	init_timer(&req->timer);
695 	init_completion(&req->completion);
696 
697 	req->adapter = adapter;
698 	req->fsf_command = fsf_cmd;
699 	req->req_id = adapter->req_no;
700 
701 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
702 		if (likely(pool))
703 			req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
704 		else
705 			req->qtcb = zfcp_qtcb_alloc(NULL);
706 
707 		if (unlikely(!req->qtcb)) {
708 			zfcp_fsf_req_free(req);
709 			return ERR_PTR(-ENOMEM);
710 		}
711 
712 		req->seq_no = adapter->fsf_req_seq_no;
713 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
714 		req->qtcb->prefix.req_id = req->req_id;
715 		req->qtcb->prefix.ulp_info = 26;
716 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
717 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
718 		req->qtcb->header.req_handle = req->req_id;
719 		req->qtcb->header.fsf_command = req->fsf_command;
720 	}
721 
722 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
723 			   req->qtcb, sizeof(struct fsf_qtcb));
724 
725 	return req;
726 }
727 
728 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
729 {
730 	struct zfcp_adapter *adapter = req->adapter;
731 	struct zfcp_qdio *qdio = adapter->qdio;
732 	int with_qtcb = (req->qtcb != NULL);
733 	int req_id = req->req_id;
734 
735 	zfcp_reqlist_add(adapter->req_list, req);
736 
737 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
738 	req->issued = get_tod_clock();
739 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
740 		del_timer(&req->timer);
741 		/* lookup request again, list might have changed */
742 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
743 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
744 		return -EIO;
745 	}
746 
747 	/* Don't increase for unsolicited status */
748 	if (with_qtcb)
749 		adapter->fsf_req_seq_no++;
750 	adapter->req_no++;
751 
752 	return 0;
753 }
754 
755 /**
756  * zfcp_fsf_status_read - send status read request
757  * @adapter: pointer to struct zfcp_adapter
758  * @req_flags: request flags
759  * Returns: 0 on success, ERROR otherwise
760  */
761 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
762 {
763 	struct zfcp_adapter *adapter = qdio->adapter;
764 	struct zfcp_fsf_req *req;
765 	struct fsf_status_read_buffer *sr_buf;
766 	struct page *page;
767 	int retval = -EIO;
768 
769 	spin_lock_irq(&qdio->req_q_lock);
770 	if (zfcp_qdio_sbal_get(qdio))
771 		goto out;
772 
773 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
774 				  SBAL_SFLAGS0_TYPE_STATUS,
775 				  adapter->pool.status_read_req);
776 	if (IS_ERR(req)) {
777 		retval = PTR_ERR(req);
778 		goto out;
779 	}
780 
781 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
782 	if (!page) {
783 		retval = -ENOMEM;
784 		goto failed_buf;
785 	}
786 	sr_buf = page_address(page);
787 	memset(sr_buf, 0, sizeof(*sr_buf));
788 	req->data = sr_buf;
789 
790 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
791 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
792 
793 	retval = zfcp_fsf_req_send(req);
794 	if (retval)
795 		goto failed_req_send;
796 
797 	goto out;
798 
799 failed_req_send:
800 	req->data = NULL;
801 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
802 failed_buf:
803 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
804 	zfcp_fsf_req_free(req);
805 out:
806 	spin_unlock_irq(&qdio->req_q_lock);
807 	return retval;
808 }
809 
810 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
811 {
812 	struct scsi_device *sdev = req->data;
813 	struct zfcp_scsi_dev *zfcp_sdev;
814 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
815 
816 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
817 		return;
818 
819 	zfcp_sdev = sdev_to_zfcp(sdev);
820 
821 	switch (req->qtcb->header.fsf_status) {
822 	case FSF_PORT_HANDLE_NOT_VALID:
823 		if (fsq->word[0] == fsq->word[1]) {
824 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
825 						"fsafch1");
826 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
827 		}
828 		break;
829 	case FSF_LUN_HANDLE_NOT_VALID:
830 		if (fsq->word[0] == fsq->word[1]) {
831 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
832 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
833 		}
834 		break;
835 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
836 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
837 		break;
838 	case FSF_PORT_BOXED:
839 		zfcp_erp_set_port_status(zfcp_sdev->port,
840 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
841 		zfcp_erp_port_reopen(zfcp_sdev->port,
842 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
843 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
844 		break;
845 	case FSF_LUN_BOXED:
846 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
847 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
848 				    "fsafch4");
849 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
850                 break;
851 	case FSF_ADAPTER_STATUS_AVAILABLE:
852 		switch (fsq->word[0]) {
853 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
854 			zfcp_fc_test_link(zfcp_sdev->port);
855 			/* fall through */
856 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
857 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
858 			break;
859 		}
860 		break;
861 	case FSF_GOOD:
862 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
863 		break;
864 	}
865 }
866 
867 /**
868  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
869  * @scmnd: The SCSI command to abort
870  * Returns: pointer to struct zfcp_fsf_req
871  */
872 
873 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
874 {
875 	struct zfcp_fsf_req *req = NULL;
876 	struct scsi_device *sdev = scmnd->device;
877 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
878 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
879 	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
880 
881 	spin_lock_irq(&qdio->req_q_lock);
882 	if (zfcp_qdio_sbal_get(qdio))
883 		goto out;
884 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
885 				  SBAL_SFLAGS0_TYPE_READ,
886 				  qdio->adapter->pool.scsi_abort);
887 	if (IS_ERR(req)) {
888 		req = NULL;
889 		goto out;
890 	}
891 
892 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
893 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
894 		goto out_error_free;
895 
896 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
897 
898 	req->data = sdev;
899 	req->handler = zfcp_fsf_abort_fcp_command_handler;
900 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
901 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
902 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
903 
904 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
905 	if (!zfcp_fsf_req_send(req))
906 		goto out;
907 
908 out_error_free:
909 	zfcp_fsf_req_free(req);
910 	req = NULL;
911 out:
912 	spin_unlock_irq(&qdio->req_q_lock);
913 	return req;
914 }
915 
916 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
917 {
918 	struct zfcp_adapter *adapter = req->adapter;
919 	struct zfcp_fsf_ct_els *ct = req->data;
920 	struct fsf_qtcb_header *header = &req->qtcb->header;
921 
922 	ct->status = -EINVAL;
923 
924 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
925 		goto skip_fsfstatus;
926 
927 	switch (header->fsf_status) {
928         case FSF_GOOD:
929 		ct->status = 0;
930 		zfcp_dbf_san_res("fsscth2", req);
931 		break;
932         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
933 		zfcp_fsf_class_not_supp(req);
934 		break;
935         case FSF_ADAPTER_STATUS_AVAILABLE:
936                 switch (header->fsf_status_qual.word[0]){
937                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
938                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
939 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
940 			break;
941                 }
942                 break;
943         case FSF_PORT_BOXED:
944 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
945 		break;
946 	case FSF_PORT_HANDLE_NOT_VALID:
947 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
948 		/* fall through */
949 	case FSF_GENERIC_COMMAND_REJECTED:
950 	case FSF_PAYLOAD_SIZE_MISMATCH:
951 	case FSF_REQUEST_SIZE_TOO_LARGE:
952 	case FSF_RESPONSE_SIZE_TOO_LARGE:
953 	case FSF_SBAL_MISMATCH:
954 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
955 		break;
956 	}
957 
958 skip_fsfstatus:
959 	if (ct->handler)
960 		ct->handler(ct->handler_data);
961 }
962 
963 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
964 					    struct zfcp_qdio_req *q_req,
965 					    struct scatterlist *sg_req,
966 					    struct scatterlist *sg_resp)
967 {
968 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
969 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
970 	zfcp_qdio_set_sbale_last(qdio, q_req);
971 }
972 
973 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
974 				       struct scatterlist *sg_req,
975 				       struct scatterlist *sg_resp)
976 {
977 	struct zfcp_adapter *adapter = req->adapter;
978 	struct zfcp_qdio *qdio = adapter->qdio;
979 	struct fsf_qtcb *qtcb = req->qtcb;
980 	u32 feat = adapter->adapter_features;
981 
982 	if (zfcp_adapter_multi_buffer_active(adapter)) {
983 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
984 			return -EIO;
985 		qtcb->bottom.support.req_buf_length =
986 			zfcp_qdio_real_bytes(sg_req);
987 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
988 			return -EIO;
989 		qtcb->bottom.support.resp_buf_length =
990 			zfcp_qdio_real_bytes(sg_resp);
991 
992 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
993 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
994 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
995 		return 0;
996 	}
997 
998 	/* use single, unchained SBAL if it can hold the request */
999 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1000 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1001 						sg_req, sg_resp);
1002 		return 0;
1003 	}
1004 
1005 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1006 		return -EOPNOTSUPP;
1007 
1008 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1009 		return -EIO;
1010 
1011 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1012 
1013 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1014 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1015 
1016 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1017 		return -EIO;
1018 
1019 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1020 
1021 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1022 
1023 	return 0;
1024 }
1025 
1026 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1027 				 struct scatterlist *sg_req,
1028 				 struct scatterlist *sg_resp,
1029 				 unsigned int timeout)
1030 {
1031 	int ret;
1032 
1033 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1034 	if (ret)
1035 		return ret;
1036 
1037 	/* common settings for ct/gs and els requests */
1038 	if (timeout > 255)
1039 		timeout = 255; /* max value accepted by hardware */
1040 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1041 	req->qtcb->bottom.support.timeout = timeout;
1042 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1043 
1044 	return 0;
1045 }
1046 
1047 /**
1048  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1049  * @ct: pointer to struct zfcp_send_ct with data for request
1050  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1051  */
1052 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1053 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1054 		     unsigned int timeout)
1055 {
1056 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1057 	struct zfcp_fsf_req *req;
1058 	int ret = -EIO;
1059 
1060 	spin_lock_irq(&qdio->req_q_lock);
1061 	if (zfcp_qdio_sbal_get(qdio))
1062 		goto out;
1063 
1064 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1065 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1066 
1067 	if (IS_ERR(req)) {
1068 		ret = PTR_ERR(req);
1069 		goto out;
1070 	}
1071 
1072 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1073 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1074 	if (ret)
1075 		goto failed_send;
1076 
1077 	req->handler = zfcp_fsf_send_ct_handler;
1078 	req->qtcb->header.port_handle = wka_port->handle;
1079 	ct->d_id = wka_port->d_id;
1080 	req->data = ct;
1081 
1082 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1083 
1084 	ret = zfcp_fsf_req_send(req);
1085 	if (ret)
1086 		goto failed_send;
1087 
1088 	goto out;
1089 
1090 failed_send:
1091 	zfcp_fsf_req_free(req);
1092 out:
1093 	spin_unlock_irq(&qdio->req_q_lock);
1094 	return ret;
1095 }
1096 
1097 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1098 {
1099 	struct zfcp_fsf_ct_els *send_els = req->data;
1100 	struct fsf_qtcb_header *header = &req->qtcb->header;
1101 
1102 	send_els->status = -EINVAL;
1103 
1104 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1105 		goto skip_fsfstatus;
1106 
1107 	switch (header->fsf_status) {
1108 	case FSF_GOOD:
1109 		send_els->status = 0;
1110 		zfcp_dbf_san_res("fsselh1", req);
1111 		break;
1112 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1113 		zfcp_fsf_class_not_supp(req);
1114 		break;
1115 	case FSF_ADAPTER_STATUS_AVAILABLE:
1116 		switch (header->fsf_status_qual.word[0]){
1117 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1118 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1119 		case FSF_SQ_RETRY_IF_POSSIBLE:
1120 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1121 			break;
1122 		}
1123 		break;
1124 	case FSF_ELS_COMMAND_REJECTED:
1125 	case FSF_PAYLOAD_SIZE_MISMATCH:
1126 	case FSF_REQUEST_SIZE_TOO_LARGE:
1127 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1128 		break;
1129 	case FSF_SBAL_MISMATCH:
1130 		/* should never occur, avoided in zfcp_fsf_send_els */
1131 		/* fall through */
1132 	default:
1133 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1134 		break;
1135 	}
1136 skip_fsfstatus:
1137 	if (send_els->handler)
1138 		send_els->handler(send_els->handler_data);
1139 }
1140 
1141 /**
1142  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1143  * @els: pointer to struct zfcp_send_els with data for the command
1144  */
1145 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1146 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1147 {
1148 	struct zfcp_fsf_req *req;
1149 	struct zfcp_qdio *qdio = adapter->qdio;
1150 	int ret = -EIO;
1151 
1152 	spin_lock_irq(&qdio->req_q_lock);
1153 	if (zfcp_qdio_sbal_get(qdio))
1154 		goto out;
1155 
1156 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1157 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1158 
1159 	if (IS_ERR(req)) {
1160 		ret = PTR_ERR(req);
1161 		goto out;
1162 	}
1163 
1164 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1165 
1166 	if (!zfcp_adapter_multi_buffer_active(adapter))
1167 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1168 
1169 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1170 
1171 	if (ret)
1172 		goto failed_send;
1173 
1174 	hton24(req->qtcb->bottom.support.d_id, d_id);
1175 	req->handler = zfcp_fsf_send_els_handler;
1176 	els->d_id = d_id;
1177 	req->data = els;
1178 
1179 	zfcp_dbf_san_req("fssels1", req, d_id);
1180 
1181 	ret = zfcp_fsf_req_send(req);
1182 	if (ret)
1183 		goto failed_send;
1184 
1185 	goto out;
1186 
1187 failed_send:
1188 	zfcp_fsf_req_free(req);
1189 out:
1190 	spin_unlock_irq(&qdio->req_q_lock);
1191 	return ret;
1192 }
1193 
1194 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1195 {
1196 	struct zfcp_fsf_req *req;
1197 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1198 	int retval = -EIO;
1199 
1200 	spin_lock_irq(&qdio->req_q_lock);
1201 	if (zfcp_qdio_sbal_get(qdio))
1202 		goto out;
1203 
1204 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1205 				  SBAL_SFLAGS0_TYPE_READ,
1206 				  qdio->adapter->pool.erp_req);
1207 
1208 	if (IS_ERR(req)) {
1209 		retval = PTR_ERR(req);
1210 		goto out;
1211 	}
1212 
1213 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1214 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1215 
1216 	req->qtcb->bottom.config.feature_selection =
1217 			FSF_FEATURE_NOTIFICATION_LOST |
1218 			FSF_FEATURE_UPDATE_ALERT;
1219 	req->erp_action = erp_action;
1220 	req->handler = zfcp_fsf_exchange_config_data_handler;
1221 	erp_action->fsf_req_id = req->req_id;
1222 
1223 	zfcp_fsf_start_erp_timer(req);
1224 	retval = zfcp_fsf_req_send(req);
1225 	if (retval) {
1226 		zfcp_fsf_req_free(req);
1227 		erp_action->fsf_req_id = 0;
1228 	}
1229 out:
1230 	spin_unlock_irq(&qdio->req_q_lock);
1231 	return retval;
1232 }
1233 
1234 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1235 				       struct fsf_qtcb_bottom_config *data)
1236 {
1237 	struct zfcp_fsf_req *req = NULL;
1238 	int retval = -EIO;
1239 
1240 	spin_lock_irq(&qdio->req_q_lock);
1241 	if (zfcp_qdio_sbal_get(qdio))
1242 		goto out_unlock;
1243 
1244 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1245 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1246 
1247 	if (IS_ERR(req)) {
1248 		retval = PTR_ERR(req);
1249 		goto out_unlock;
1250 	}
1251 
1252 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1253 	req->handler = zfcp_fsf_exchange_config_data_handler;
1254 
1255 	req->qtcb->bottom.config.feature_selection =
1256 			FSF_FEATURE_NOTIFICATION_LOST |
1257 			FSF_FEATURE_UPDATE_ALERT;
1258 
1259 	if (data)
1260 		req->data = data;
1261 
1262 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1263 	retval = zfcp_fsf_req_send(req);
1264 	spin_unlock_irq(&qdio->req_q_lock);
1265 	if (!retval)
1266 		wait_for_completion(&req->completion);
1267 
1268 	zfcp_fsf_req_free(req);
1269 	return retval;
1270 
1271 out_unlock:
1272 	spin_unlock_irq(&qdio->req_q_lock);
1273 	return retval;
1274 }
1275 
1276 /**
1277  * zfcp_fsf_exchange_port_data - request information about local port
1278  * @erp_action: ERP action for the adapter for which port data is requested
1279  * Returns: 0 on success, error otherwise
1280  */
1281 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1282 {
1283 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1284 	struct zfcp_fsf_req *req;
1285 	int retval = -EIO;
1286 
1287 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1288 		return -EOPNOTSUPP;
1289 
1290 	spin_lock_irq(&qdio->req_q_lock);
1291 	if (zfcp_qdio_sbal_get(qdio))
1292 		goto out;
1293 
1294 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1295 				  SBAL_SFLAGS0_TYPE_READ,
1296 				  qdio->adapter->pool.erp_req);
1297 
1298 	if (IS_ERR(req)) {
1299 		retval = PTR_ERR(req);
1300 		goto out;
1301 	}
1302 
1303 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1304 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1305 
1306 	req->handler = zfcp_fsf_exchange_port_data_handler;
1307 	req->erp_action = erp_action;
1308 	erp_action->fsf_req_id = req->req_id;
1309 
1310 	zfcp_fsf_start_erp_timer(req);
1311 	retval = zfcp_fsf_req_send(req);
1312 	if (retval) {
1313 		zfcp_fsf_req_free(req);
1314 		erp_action->fsf_req_id = 0;
1315 	}
1316 out:
1317 	spin_unlock_irq(&qdio->req_q_lock);
1318 	return retval;
1319 }
1320 
1321 /**
1322  * zfcp_fsf_exchange_port_data_sync - request information about local port
1323  * @qdio: pointer to struct zfcp_qdio
1324  * @data: pointer to struct fsf_qtcb_bottom_port
1325  * Returns: 0 on success, error otherwise
1326  */
1327 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1328 				     struct fsf_qtcb_bottom_port *data)
1329 {
1330 	struct zfcp_fsf_req *req = NULL;
1331 	int retval = -EIO;
1332 
1333 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1334 		return -EOPNOTSUPP;
1335 
1336 	spin_lock_irq(&qdio->req_q_lock);
1337 	if (zfcp_qdio_sbal_get(qdio))
1338 		goto out_unlock;
1339 
1340 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1341 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1342 
1343 	if (IS_ERR(req)) {
1344 		retval = PTR_ERR(req);
1345 		goto out_unlock;
1346 	}
1347 
1348 	if (data)
1349 		req->data = data;
1350 
1351 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1352 
1353 	req->handler = zfcp_fsf_exchange_port_data_handler;
1354 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1355 	retval = zfcp_fsf_req_send(req);
1356 	spin_unlock_irq(&qdio->req_q_lock);
1357 
1358 	if (!retval)
1359 		wait_for_completion(&req->completion);
1360 
1361 	zfcp_fsf_req_free(req);
1362 
1363 	return retval;
1364 
1365 out_unlock:
1366 	spin_unlock_irq(&qdio->req_q_lock);
1367 	return retval;
1368 }
1369 
1370 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1371 {
1372 	struct zfcp_port *port = req->data;
1373 	struct fsf_qtcb_header *header = &req->qtcb->header;
1374 	struct fc_els_flogi *plogi;
1375 
1376 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1377 		goto out;
1378 
1379 	switch (header->fsf_status) {
1380 	case FSF_PORT_ALREADY_OPEN:
1381 		break;
1382 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1383 		dev_warn(&req->adapter->ccw_device->dev,
1384 			 "Not enough FCP adapter resources to open "
1385 			 "remote port 0x%016Lx\n",
1386 			 (unsigned long long)port->wwpn);
1387 		zfcp_erp_set_port_status(port,
1388 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1389 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1390 		break;
1391 	case FSF_ADAPTER_STATUS_AVAILABLE:
1392 		switch (header->fsf_status_qual.word[0]) {
1393 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1394 			/* no zfcp_fc_test_link() with failed open port */
1395 			/* fall through */
1396 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1397 		case FSF_SQ_NO_RETRY_POSSIBLE:
1398 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1399 			break;
1400 		}
1401 		break;
1402 	case FSF_GOOD:
1403 		port->handle = header->port_handle;
1404 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1405 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1406 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1407 		                  &port->status);
1408 		/* check whether D_ID has changed during open */
1409 		/*
1410 		 * FIXME: This check is not airtight, as the FCP channel does
1411 		 * not monitor closures of target port connections caused on
1412 		 * the remote side. Thus, they might miss out on invalidating
1413 		 * locally cached WWPNs (and other N_Port parameters) of gone
1414 		 * target ports. So, our heroic attempt to make things safe
1415 		 * could be undermined by 'open port' response data tagged with
1416 		 * obsolete WWPNs. Another reason to monitor potential
1417 		 * connection closures ourself at least (by interpreting
1418 		 * incoming ELS' and unsolicited status). It just crosses my
1419 		 * mind that one should be able to cross-check by means of
1420 		 * another GID_PN straight after a port has been opened.
1421 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1422 		 */
1423 		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1424 		if (req->qtcb->bottom.support.els1_length >=
1425 		    FSF_PLOGI_MIN_LEN)
1426 				zfcp_fc_plogi_evaluate(port, plogi);
1427 		break;
1428 	case FSF_UNKNOWN_OP_SUBTYPE:
1429 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1430 		break;
1431 	}
1432 
1433 out:
1434 	put_device(&port->dev);
1435 }
1436 
1437 /**
1438  * zfcp_fsf_open_port - create and send open port request
1439  * @erp_action: pointer to struct zfcp_erp_action
1440  * Returns: 0 on success, error otherwise
1441  */
1442 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1443 {
1444 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1445 	struct zfcp_port *port = erp_action->port;
1446 	struct zfcp_fsf_req *req;
1447 	int retval = -EIO;
1448 
1449 	spin_lock_irq(&qdio->req_q_lock);
1450 	if (zfcp_qdio_sbal_get(qdio))
1451 		goto out;
1452 
1453 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1454 				  SBAL_SFLAGS0_TYPE_READ,
1455 				  qdio->adapter->pool.erp_req);
1456 
1457 	if (IS_ERR(req)) {
1458 		retval = PTR_ERR(req);
1459 		goto out;
1460 	}
1461 
1462 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1463 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1464 
1465 	req->handler = zfcp_fsf_open_port_handler;
1466 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1467 	req->data = port;
1468 	req->erp_action = erp_action;
1469 	erp_action->fsf_req_id = req->req_id;
1470 	get_device(&port->dev);
1471 
1472 	zfcp_fsf_start_erp_timer(req);
1473 	retval = zfcp_fsf_req_send(req);
1474 	if (retval) {
1475 		zfcp_fsf_req_free(req);
1476 		erp_action->fsf_req_id = 0;
1477 		put_device(&port->dev);
1478 	}
1479 out:
1480 	spin_unlock_irq(&qdio->req_q_lock);
1481 	return retval;
1482 }
1483 
1484 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1485 {
1486 	struct zfcp_port *port = req->data;
1487 
1488 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1489 		return;
1490 
1491 	switch (req->qtcb->header.fsf_status) {
1492 	case FSF_PORT_HANDLE_NOT_VALID:
1493 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1494 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1495 		break;
1496 	case FSF_ADAPTER_STATUS_AVAILABLE:
1497 		break;
1498 	case FSF_GOOD:
1499 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1500 		break;
1501 	}
1502 }
1503 
1504 /**
1505  * zfcp_fsf_close_port - create and send close port request
1506  * @erp_action: pointer to struct zfcp_erp_action
1507  * Returns: 0 on success, error otherwise
1508  */
1509 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1510 {
1511 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1512 	struct zfcp_fsf_req *req;
1513 	int retval = -EIO;
1514 
1515 	spin_lock_irq(&qdio->req_q_lock);
1516 	if (zfcp_qdio_sbal_get(qdio))
1517 		goto out;
1518 
1519 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1520 				  SBAL_SFLAGS0_TYPE_READ,
1521 				  qdio->adapter->pool.erp_req);
1522 
1523 	if (IS_ERR(req)) {
1524 		retval = PTR_ERR(req);
1525 		goto out;
1526 	}
1527 
1528 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1529 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1530 
1531 	req->handler = zfcp_fsf_close_port_handler;
1532 	req->data = erp_action->port;
1533 	req->erp_action = erp_action;
1534 	req->qtcb->header.port_handle = erp_action->port->handle;
1535 	erp_action->fsf_req_id = req->req_id;
1536 
1537 	zfcp_fsf_start_erp_timer(req);
1538 	retval = zfcp_fsf_req_send(req);
1539 	if (retval) {
1540 		zfcp_fsf_req_free(req);
1541 		erp_action->fsf_req_id = 0;
1542 	}
1543 out:
1544 	spin_unlock_irq(&qdio->req_q_lock);
1545 	return retval;
1546 }
1547 
1548 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1549 {
1550 	struct zfcp_fc_wka_port *wka_port = req->data;
1551 	struct fsf_qtcb_header *header = &req->qtcb->header;
1552 
1553 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1554 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1555 		goto out;
1556 	}
1557 
1558 	switch (header->fsf_status) {
1559 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1560 		dev_warn(&req->adapter->ccw_device->dev,
1561 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1562 		/* fall through */
1563 	case FSF_ADAPTER_STATUS_AVAILABLE:
1564 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1565 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1566 		break;
1567 	case FSF_GOOD:
1568 		wka_port->handle = header->port_handle;
1569 		/* fall through */
1570 	case FSF_PORT_ALREADY_OPEN:
1571 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1572 	}
1573 out:
1574 	wake_up(&wka_port->completion_wq);
1575 }
1576 
1577 /**
1578  * zfcp_fsf_open_wka_port - create and send open wka-port request
1579  * @wka_port: pointer to struct zfcp_fc_wka_port
1580  * Returns: 0 on success, error otherwise
1581  */
1582 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1583 {
1584 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1585 	struct zfcp_fsf_req *req;
1586 	int retval = -EIO;
1587 
1588 	spin_lock_irq(&qdio->req_q_lock);
1589 	if (zfcp_qdio_sbal_get(qdio))
1590 		goto out;
1591 
1592 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1593 				  SBAL_SFLAGS0_TYPE_READ,
1594 				  qdio->adapter->pool.erp_req);
1595 
1596 	if (IS_ERR(req)) {
1597 		retval = PTR_ERR(req);
1598 		goto out;
1599 	}
1600 
1601 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1602 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1603 
1604 	req->handler = zfcp_fsf_open_wka_port_handler;
1605 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1606 	req->data = wka_port;
1607 
1608 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1609 	retval = zfcp_fsf_req_send(req);
1610 	if (retval)
1611 		zfcp_fsf_req_free(req);
1612 out:
1613 	spin_unlock_irq(&qdio->req_q_lock);
1614 	if (!retval)
1615 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1616 	return retval;
1617 }
1618 
1619 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1620 {
1621 	struct zfcp_fc_wka_port *wka_port = req->data;
1622 
1623 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1624 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1625 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1626 	}
1627 
1628 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1629 	wake_up(&wka_port->completion_wq);
1630 }
1631 
1632 /**
1633  * zfcp_fsf_close_wka_port - create and send close wka port request
1634  * @wka_port: WKA port to open
1635  * Returns: 0 on success, error otherwise
1636  */
1637 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1638 {
1639 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1640 	struct zfcp_fsf_req *req;
1641 	int retval = -EIO;
1642 
1643 	spin_lock_irq(&qdio->req_q_lock);
1644 	if (zfcp_qdio_sbal_get(qdio))
1645 		goto out;
1646 
1647 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1648 				  SBAL_SFLAGS0_TYPE_READ,
1649 				  qdio->adapter->pool.erp_req);
1650 
1651 	if (IS_ERR(req)) {
1652 		retval = PTR_ERR(req);
1653 		goto out;
1654 	}
1655 
1656 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1657 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1658 
1659 	req->handler = zfcp_fsf_close_wka_port_handler;
1660 	req->data = wka_port;
1661 	req->qtcb->header.port_handle = wka_port->handle;
1662 
1663 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1664 	retval = zfcp_fsf_req_send(req);
1665 	if (retval)
1666 		zfcp_fsf_req_free(req);
1667 out:
1668 	spin_unlock_irq(&qdio->req_q_lock);
1669 	if (!retval)
1670 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1671 	return retval;
1672 }
1673 
1674 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1675 {
1676 	struct zfcp_port *port = req->data;
1677 	struct fsf_qtcb_header *header = &req->qtcb->header;
1678 	struct scsi_device *sdev;
1679 
1680 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1681 		return;
1682 
1683 	switch (header->fsf_status) {
1684 	case FSF_PORT_HANDLE_NOT_VALID:
1685 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1686 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1687 		break;
1688 	case FSF_PORT_BOXED:
1689 		/* can't use generic zfcp_erp_modify_port_status because
1690 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1691 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1692 		shost_for_each_device(sdev, port->adapter->scsi_host)
1693 			if (sdev_to_zfcp(sdev)->port == port)
1694 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1695 						  &sdev_to_zfcp(sdev)->status);
1696 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1697 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1698 				     "fscpph2");
1699 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1700 		break;
1701 	case FSF_ADAPTER_STATUS_AVAILABLE:
1702 		switch (header->fsf_status_qual.word[0]) {
1703 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1704 			/* fall through */
1705 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1706 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1707 			break;
1708 		}
1709 		break;
1710 	case FSF_GOOD:
1711 		/* can't use generic zfcp_erp_modify_port_status because
1712 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1713 		 */
1714 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1715 		shost_for_each_device(sdev, port->adapter->scsi_host)
1716 			if (sdev_to_zfcp(sdev)->port == port)
1717 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1718 						  &sdev_to_zfcp(sdev)->status);
1719 		break;
1720 	}
1721 }
1722 
1723 /**
1724  * zfcp_fsf_close_physical_port - close physical port
1725  * @erp_action: pointer to struct zfcp_erp_action
1726  * Returns: 0 on success
1727  */
1728 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1729 {
1730 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1731 	struct zfcp_fsf_req *req;
1732 	int retval = -EIO;
1733 
1734 	spin_lock_irq(&qdio->req_q_lock);
1735 	if (zfcp_qdio_sbal_get(qdio))
1736 		goto out;
1737 
1738 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1739 				  SBAL_SFLAGS0_TYPE_READ,
1740 				  qdio->adapter->pool.erp_req);
1741 
1742 	if (IS_ERR(req)) {
1743 		retval = PTR_ERR(req);
1744 		goto out;
1745 	}
1746 
1747 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1748 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1749 
1750 	req->data = erp_action->port;
1751 	req->qtcb->header.port_handle = erp_action->port->handle;
1752 	req->erp_action = erp_action;
1753 	req->handler = zfcp_fsf_close_physical_port_handler;
1754 	erp_action->fsf_req_id = req->req_id;
1755 
1756 	zfcp_fsf_start_erp_timer(req);
1757 	retval = zfcp_fsf_req_send(req);
1758 	if (retval) {
1759 		zfcp_fsf_req_free(req);
1760 		erp_action->fsf_req_id = 0;
1761 	}
1762 out:
1763 	spin_unlock_irq(&qdio->req_q_lock);
1764 	return retval;
1765 }
1766 
1767 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1768 {
1769 	struct zfcp_adapter *adapter = req->adapter;
1770 	struct scsi_device *sdev = req->data;
1771 	struct zfcp_scsi_dev *zfcp_sdev;
1772 	struct fsf_qtcb_header *header = &req->qtcb->header;
1773 	union fsf_status_qual *qual = &header->fsf_status_qual;
1774 
1775 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1776 		return;
1777 
1778 	zfcp_sdev = sdev_to_zfcp(sdev);
1779 
1780 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1781 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
1782 			  &zfcp_sdev->status);
1783 
1784 	switch (header->fsf_status) {
1785 
1786 	case FSF_PORT_HANDLE_NOT_VALID:
1787 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1788 		/* fall through */
1789 	case FSF_LUN_ALREADY_OPEN:
1790 		break;
1791 	case FSF_PORT_BOXED:
1792 		zfcp_erp_set_port_status(zfcp_sdev->port,
1793 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1794 		zfcp_erp_port_reopen(zfcp_sdev->port,
1795 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1796 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1797 		break;
1798 	case FSF_LUN_SHARING_VIOLATION:
1799 		if (qual->word[0])
1800 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1801 				 "LUN 0x%Lx on port 0x%Lx is already in "
1802 				 "use by CSS%d, MIF Image ID %x\n",
1803 				 zfcp_scsi_dev_lun(sdev),
1804 				 (unsigned long long)zfcp_sdev->port->wwpn,
1805 				 qual->fsf_queue_designator.cssid,
1806 				 qual->fsf_queue_designator.hla);
1807 		zfcp_erp_set_lun_status(sdev,
1808 					ZFCP_STATUS_COMMON_ERP_FAILED |
1809 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
1810 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1811 		break;
1812 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1813 		dev_warn(&adapter->ccw_device->dev,
1814 			 "No handle is available for LUN "
1815 			 "0x%016Lx on port 0x%016Lx\n",
1816 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1817 			 (unsigned long long)zfcp_sdev->port->wwpn);
1818 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1819 		/* fall through */
1820 	case FSF_INVALID_COMMAND_OPTION:
1821 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1822 		break;
1823 	case FSF_ADAPTER_STATUS_AVAILABLE:
1824 		switch (header->fsf_status_qual.word[0]) {
1825 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1826 			zfcp_fc_test_link(zfcp_sdev->port);
1827 			/* fall through */
1828 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1829 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1830 			break;
1831 		}
1832 		break;
1833 
1834 	case FSF_GOOD:
1835 		zfcp_sdev->lun_handle = header->lun_handle;
1836 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1837 		break;
1838 	}
1839 }
1840 
1841 /**
1842  * zfcp_fsf_open_lun - open LUN
1843  * @erp_action: pointer to struct zfcp_erp_action
1844  * Returns: 0 on success, error otherwise
1845  */
1846 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1847 {
1848 	struct zfcp_adapter *adapter = erp_action->adapter;
1849 	struct zfcp_qdio *qdio = adapter->qdio;
1850 	struct zfcp_fsf_req *req;
1851 	int retval = -EIO;
1852 
1853 	spin_lock_irq(&qdio->req_q_lock);
1854 	if (zfcp_qdio_sbal_get(qdio))
1855 		goto out;
1856 
1857 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1858 				  SBAL_SFLAGS0_TYPE_READ,
1859 				  adapter->pool.erp_req);
1860 
1861 	if (IS_ERR(req)) {
1862 		retval = PTR_ERR(req);
1863 		goto out;
1864 	}
1865 
1866 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1867 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1868 
1869 	req->qtcb->header.port_handle = erp_action->port->handle;
1870 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1871 	req->handler = zfcp_fsf_open_lun_handler;
1872 	req->data = erp_action->sdev;
1873 	req->erp_action = erp_action;
1874 	erp_action->fsf_req_id = req->req_id;
1875 
1876 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1877 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1878 
1879 	zfcp_fsf_start_erp_timer(req);
1880 	retval = zfcp_fsf_req_send(req);
1881 	if (retval) {
1882 		zfcp_fsf_req_free(req);
1883 		erp_action->fsf_req_id = 0;
1884 	}
1885 out:
1886 	spin_unlock_irq(&qdio->req_q_lock);
1887 	return retval;
1888 }
1889 
1890 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1891 {
1892 	struct scsi_device *sdev = req->data;
1893 	struct zfcp_scsi_dev *zfcp_sdev;
1894 
1895 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1896 		return;
1897 
1898 	zfcp_sdev = sdev_to_zfcp(sdev);
1899 
1900 	switch (req->qtcb->header.fsf_status) {
1901 	case FSF_PORT_HANDLE_NOT_VALID:
1902 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1903 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1904 		break;
1905 	case FSF_LUN_HANDLE_NOT_VALID:
1906 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1907 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1908 		break;
1909 	case FSF_PORT_BOXED:
1910 		zfcp_erp_set_port_status(zfcp_sdev->port,
1911 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1912 		zfcp_erp_port_reopen(zfcp_sdev->port,
1913 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1914 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1915 		break;
1916 	case FSF_ADAPTER_STATUS_AVAILABLE:
1917 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
1918 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1919 			zfcp_fc_test_link(zfcp_sdev->port);
1920 			/* fall through */
1921 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1922 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1923 			break;
1924 		}
1925 		break;
1926 	case FSF_GOOD:
1927 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1928 		break;
1929 	}
1930 }
1931 
1932 /**
1933  * zfcp_fsf_close_LUN - close LUN
1934  * @erp_action: pointer to erp_action triggering the "close LUN"
1935  * Returns: 0 on success, error otherwise
1936  */
1937 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1938 {
1939 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1940 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1941 	struct zfcp_fsf_req *req;
1942 	int retval = -EIO;
1943 
1944 	spin_lock_irq(&qdio->req_q_lock);
1945 	if (zfcp_qdio_sbal_get(qdio))
1946 		goto out;
1947 
1948 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1949 				  SBAL_SFLAGS0_TYPE_READ,
1950 				  qdio->adapter->pool.erp_req);
1951 
1952 	if (IS_ERR(req)) {
1953 		retval = PTR_ERR(req);
1954 		goto out;
1955 	}
1956 
1957 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1958 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1959 
1960 	req->qtcb->header.port_handle = erp_action->port->handle;
1961 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1962 	req->handler = zfcp_fsf_close_lun_handler;
1963 	req->data = erp_action->sdev;
1964 	req->erp_action = erp_action;
1965 	erp_action->fsf_req_id = req->req_id;
1966 
1967 	zfcp_fsf_start_erp_timer(req);
1968 	retval = zfcp_fsf_req_send(req);
1969 	if (retval) {
1970 		zfcp_fsf_req_free(req);
1971 		erp_action->fsf_req_id = 0;
1972 	}
1973 out:
1974 	spin_unlock_irq(&qdio->req_q_lock);
1975 	return retval;
1976 }
1977 
1978 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1979 {
1980 	lat_rec->sum += lat;
1981 	lat_rec->min = min(lat_rec->min, lat);
1982 	lat_rec->max = max(lat_rec->max, lat);
1983 }
1984 
1985 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1986 {
1987 	struct fsf_qual_latency_info *lat_in;
1988 	struct latency_cont *lat = NULL;
1989 	struct zfcp_scsi_dev *zfcp_sdev;
1990 	struct zfcp_blk_drv_data blktrc;
1991 	int ticks = req->adapter->timer_ticks;
1992 
1993 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1994 
1995 	blktrc.flags = 0;
1996 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1997 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1998 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
1999 	blktrc.inb_usage = 0;
2000 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2001 
2002 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2003 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2004 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2005 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2006 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2007 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2008 
2009 		switch (req->qtcb->bottom.io.data_direction) {
2010 		case FSF_DATADIR_DIF_READ_STRIP:
2011 		case FSF_DATADIR_DIF_READ_CONVERT:
2012 		case FSF_DATADIR_READ:
2013 			lat = &zfcp_sdev->latencies.read;
2014 			break;
2015 		case FSF_DATADIR_DIF_WRITE_INSERT:
2016 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2017 		case FSF_DATADIR_WRITE:
2018 			lat = &zfcp_sdev->latencies.write;
2019 			break;
2020 		case FSF_DATADIR_CMND:
2021 			lat = &zfcp_sdev->latencies.cmd;
2022 			break;
2023 		}
2024 
2025 		if (lat) {
2026 			spin_lock(&zfcp_sdev->latencies.lock);
2027 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2028 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2029 			lat->counter++;
2030 			spin_unlock(&zfcp_sdev->latencies.lock);
2031 		}
2032 	}
2033 
2034 	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2035 			    sizeof(blktrc));
2036 }
2037 
2038 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2039 {
2040 	struct scsi_cmnd *scmnd = req->data;
2041 	struct scsi_device *sdev = scmnd->device;
2042 	struct zfcp_scsi_dev *zfcp_sdev;
2043 	struct fsf_qtcb_header *header = &req->qtcb->header;
2044 
2045 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2046 		return;
2047 
2048 	zfcp_sdev = sdev_to_zfcp(sdev);
2049 
2050 	switch (header->fsf_status) {
2051 	case FSF_HANDLE_MISMATCH:
2052 	case FSF_PORT_HANDLE_NOT_VALID:
2053 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2054 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2055 		break;
2056 	case FSF_FCPLUN_NOT_VALID:
2057 	case FSF_LUN_HANDLE_NOT_VALID:
2058 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2059 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2060 		break;
2061 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2062 		zfcp_fsf_class_not_supp(req);
2063 		break;
2064 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2065 		dev_err(&req->adapter->ccw_device->dev,
2066 			"Incorrect direction %d, LUN 0x%016Lx on port "
2067 			"0x%016Lx closed\n",
2068 			req->qtcb->bottom.io.data_direction,
2069 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2070 			(unsigned long long)zfcp_sdev->port->wwpn);
2071 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2072 					  "fssfch3");
2073 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2074 		break;
2075 	case FSF_CMND_LENGTH_NOT_VALID:
2076 		dev_err(&req->adapter->ccw_device->dev,
2077 			"Incorrect CDB length %d, LUN 0x%016Lx on "
2078 			"port 0x%016Lx closed\n",
2079 			req->qtcb->bottom.io.fcp_cmnd_length,
2080 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2081 			(unsigned long long)zfcp_sdev->port->wwpn);
2082 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2083 					  "fssfch4");
2084 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2085 		break;
2086 	case FSF_PORT_BOXED:
2087 		zfcp_erp_set_port_status(zfcp_sdev->port,
2088 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2089 		zfcp_erp_port_reopen(zfcp_sdev->port,
2090 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2091 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2092 		break;
2093 	case FSF_LUN_BOXED:
2094 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2095 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2096 				    "fssfch6");
2097 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2098 		break;
2099 	case FSF_ADAPTER_STATUS_AVAILABLE:
2100 		if (header->fsf_status_qual.word[0] ==
2101 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2102 			zfcp_fc_test_link(zfcp_sdev->port);
2103 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2104 		break;
2105 	}
2106 }
2107 
2108 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2109 {
2110 	struct scsi_cmnd *scpnt;
2111 	struct fcp_resp_with_ext *fcp_rsp;
2112 	unsigned long flags;
2113 
2114 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2115 
2116 	scpnt = req->data;
2117 	if (unlikely(!scpnt)) {
2118 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2119 		return;
2120 	}
2121 
2122 	zfcp_fsf_fcp_handler_common(req);
2123 
2124 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2125 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2126 		goto skip_fsfstatus;
2127 	}
2128 
2129 	switch (req->qtcb->header.fsf_status) {
2130 	case FSF_INCONSISTENT_PROT_DATA:
2131 	case FSF_INVALID_PROT_PARM:
2132 		set_host_byte(scpnt, DID_ERROR);
2133 		goto skip_fsfstatus;
2134 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2135 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2136 		goto skip_fsfstatus;
2137 	case FSF_APP_TAG_CHECK_FAILURE:
2138 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2139 		goto skip_fsfstatus;
2140 	case FSF_REF_TAG_CHECK_FAILURE:
2141 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2142 		goto skip_fsfstatus;
2143 	}
2144 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2145 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2146 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2147 
2148 skip_fsfstatus:
2149 	zfcp_fsf_req_trace(req, scpnt);
2150 	zfcp_dbf_scsi_result(scpnt, req);
2151 
2152 	scpnt->host_scribble = NULL;
2153 	(scpnt->scsi_done) (scpnt);
2154 	/*
2155 	 * We must hold this lock until scsi_done has been called.
2156 	 * Otherwise we may call scsi_done after abort regarding this
2157 	 * command has completed.
2158 	 * Note: scsi_done must not block!
2159 	 */
2160 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2161 }
2162 
2163 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2164 {
2165 	switch (scsi_get_prot_op(scsi_cmnd)) {
2166 	case SCSI_PROT_NORMAL:
2167 		switch (scsi_cmnd->sc_data_direction) {
2168 		case DMA_NONE:
2169 			*data_dir = FSF_DATADIR_CMND;
2170 			break;
2171 		case DMA_FROM_DEVICE:
2172 			*data_dir = FSF_DATADIR_READ;
2173 			break;
2174 		case DMA_TO_DEVICE:
2175 			*data_dir = FSF_DATADIR_WRITE;
2176 			break;
2177 		case DMA_BIDIRECTIONAL:
2178 			return -EINVAL;
2179 		}
2180 		break;
2181 
2182 	case SCSI_PROT_READ_STRIP:
2183 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2184 		break;
2185 	case SCSI_PROT_WRITE_INSERT:
2186 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2187 		break;
2188 	case SCSI_PROT_READ_PASS:
2189 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2190 		break;
2191 	case SCSI_PROT_WRITE_PASS:
2192 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2193 		break;
2194 	default:
2195 		return -EINVAL;
2196 	}
2197 
2198 	return 0;
2199 }
2200 
2201 /**
2202  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2203  * @scsi_cmnd: scsi command to be sent
2204  */
2205 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2206 {
2207 	struct zfcp_fsf_req *req;
2208 	struct fcp_cmnd *fcp_cmnd;
2209 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2210 	int retval = -EIO;
2211 	struct scsi_device *sdev = scsi_cmnd->device;
2212 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2213 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2214 	struct zfcp_qdio *qdio = adapter->qdio;
2215 	struct fsf_qtcb_bottom_io *io;
2216 	unsigned long flags;
2217 
2218 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2219 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2220 		return -EBUSY;
2221 
2222 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2223 	if (atomic_read(&qdio->req_q_free) <= 0) {
2224 		atomic_inc(&qdio->req_q_full);
2225 		goto out;
2226 	}
2227 
2228 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2229 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2230 
2231 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2232 				  sbtype, adapter->pool.scsi_req);
2233 
2234 	if (IS_ERR(req)) {
2235 		retval = PTR_ERR(req);
2236 		goto out;
2237 	}
2238 
2239 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2240 
2241 	io = &req->qtcb->bottom.io;
2242 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2243 	req->data = scsi_cmnd;
2244 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2245 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2246 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2247 	io->service_class = FSF_CLASS_3;
2248 	io->fcp_cmnd_length = FCP_CMND_LEN;
2249 
2250 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2251 		io->data_block_length = scsi_cmnd->device->sector_size;
2252 		io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2253 	}
2254 
2255 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2256 		goto failed_scsi_cmnd;
2257 
2258 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2259 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2260 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
2261 
2262 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2263 	    scsi_prot_sg_count(scsi_cmnd)) {
2264 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2265 				       scsi_prot_sg_count(scsi_cmnd));
2266 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2267 						 scsi_prot_sglist(scsi_cmnd));
2268 		if (retval)
2269 			goto failed_scsi_cmnd;
2270 		io->prot_data_length = zfcp_qdio_real_bytes(
2271 						scsi_prot_sglist(scsi_cmnd));
2272 	}
2273 
2274 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2275 					 scsi_sglist(scsi_cmnd));
2276 	if (unlikely(retval))
2277 		goto failed_scsi_cmnd;
2278 
2279 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2280 	if (zfcp_adapter_multi_buffer_active(adapter))
2281 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2282 
2283 	retval = zfcp_fsf_req_send(req);
2284 	if (unlikely(retval))
2285 		goto failed_scsi_cmnd;
2286 
2287 	goto out;
2288 
2289 failed_scsi_cmnd:
2290 	zfcp_fsf_req_free(req);
2291 	scsi_cmnd->host_scribble = NULL;
2292 out:
2293 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2294 	return retval;
2295 }
2296 
2297 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2298 {
2299 	struct fcp_resp_with_ext *fcp_rsp;
2300 	struct fcp_resp_rsp_info *rsp_info;
2301 
2302 	zfcp_fsf_fcp_handler_common(req);
2303 
2304 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2305 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2306 
2307 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2308 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2309 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2310 }
2311 
2312 /**
2313  * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2314  * @scmnd: SCSI command to send the task management command for
2315  * @tm_flags: unsigned byte for task management flags
2316  * Returns: on success pointer to struct fsf_req, NULL otherwise
2317  */
2318 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2319 					    u8 tm_flags)
2320 {
2321 	struct zfcp_fsf_req *req = NULL;
2322 	struct fcp_cmnd *fcp_cmnd;
2323 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2324 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2325 
2326 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2327 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2328 		return NULL;
2329 
2330 	spin_lock_irq(&qdio->req_q_lock);
2331 	if (zfcp_qdio_sbal_get(qdio))
2332 		goto out;
2333 
2334 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2335 				  SBAL_SFLAGS0_TYPE_WRITE,
2336 				  qdio->adapter->pool.scsi_req);
2337 
2338 	if (IS_ERR(req)) {
2339 		req = NULL;
2340 		goto out;
2341 	}
2342 
2343 	req->data = scmnd;
2344 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2345 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2346 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2347 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2348 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2349 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2350 
2351 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2352 
2353 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2354 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
2355 
2356 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2357 	if (!zfcp_fsf_req_send(req))
2358 		goto out;
2359 
2360 	zfcp_fsf_req_free(req);
2361 	req = NULL;
2362 out:
2363 	spin_unlock_irq(&qdio->req_q_lock);
2364 	return req;
2365 }
2366 
2367 /**
2368  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2369  * @adapter: pointer to struct zfcp_adapter
2370  * @sbal_idx: response queue index of SBAL to be processed
2371  */
2372 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2373 {
2374 	struct zfcp_adapter *adapter = qdio->adapter;
2375 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2376 	struct qdio_buffer_element *sbale;
2377 	struct zfcp_fsf_req *fsf_req;
2378 	unsigned long req_id;
2379 	int idx;
2380 
2381 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2382 
2383 		sbale = &sbal->element[idx];
2384 		req_id = (unsigned long) sbale->addr;
2385 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2386 
2387 		if (!fsf_req) {
2388 			/*
2389 			 * Unknown request means that we have potentially memory
2390 			 * corruption and must stop the machine immediately.
2391 			 */
2392 			zfcp_qdio_siosl(adapter);
2393 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2394 			      req_id, dev_name(&adapter->ccw_device->dev));
2395 		}
2396 
2397 		zfcp_fsf_req_complete(fsf_req);
2398 
2399 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2400 			break;
2401 	}
2402 }
2403