xref: /openbmc/linux/drivers/s390/scsi/zfcp_fsf.c (revision fd589a8f)
1 /*
2  * zfcp device driver
3  *
4  * Implementation of FSF commands.
5  *
6  * Copyright IBM Corporation 2002, 2009
7  */
8 
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/blktrace_api.h>
13 #include "zfcp_ext.h"
14 #include "zfcp_dbf.h"
15 
16 static void zfcp_fsf_request_timeout_handler(unsigned long data)
17 {
18 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
19 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
20 				"fsrth_1", NULL);
21 }
22 
23 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
24 				 unsigned long timeout)
25 {
26 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
27 	fsf_req->timer.data = (unsigned long) fsf_req->adapter;
28 	fsf_req->timer.expires = jiffies + timeout;
29 	add_timer(&fsf_req->timer);
30 }
31 
32 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
33 {
34 	BUG_ON(!fsf_req->erp_action);
35 	fsf_req->timer.function = zfcp_erp_timeout_handler;
36 	fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
37 	fsf_req->timer.expires = jiffies + 30 * HZ;
38 	add_timer(&fsf_req->timer);
39 }
40 
41 /* association between FSF command and FSF QTCB type */
42 static u32 fsf_qtcb_type[] = {
43 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
44 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
45 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
46 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
47 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
48 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
49 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
50 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
51 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
52 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
53 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
54 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
55 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
56 };
57 
58 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
59 {
60 	u16 subtable = table >> 16;
61 	u16 rule = table & 0xffff;
62 	const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
63 
64 	if (subtable && subtable < ARRAY_SIZE(act_type))
65 		dev_warn(&adapter->ccw_device->dev,
66 			 "Access denied according to ACT rule type %s, "
67 			 "rule %d\n", act_type[subtable], rule);
68 }
69 
70 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
71 					struct zfcp_port *port)
72 {
73 	struct fsf_qtcb_header *header = &req->qtcb->header;
74 	dev_warn(&req->adapter->ccw_device->dev,
75 		 "Access denied to port 0x%016Lx\n",
76 		 (unsigned long long)port->wwpn);
77 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
78 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
79 	zfcp_erp_port_access_denied(port, "fspad_1", req);
80 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
81 }
82 
83 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
84 					struct zfcp_unit *unit)
85 {
86 	struct fsf_qtcb_header *header = &req->qtcb->header;
87 	dev_warn(&req->adapter->ccw_device->dev,
88 		 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
89 		 (unsigned long long)unit->fcp_lun,
90 		 (unsigned long long)unit->port->wwpn);
91 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
92 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
93 	zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
94 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
95 }
96 
97 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
98 {
99 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
100 		"operational because of an unsupported FC class\n");
101 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
102 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
103 }
104 
105 /**
106  * zfcp_fsf_req_free - free memory used by fsf request
107  * @fsf_req: pointer to struct zfcp_fsf_req
108  */
109 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
110 {
111 	if (likely(req->pool)) {
112 		if (likely(req->qtcb))
113 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
114 		mempool_free(req, req->pool);
115 		return;
116 	}
117 
118 	if (likely(req->qtcb))
119 		kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
120 	kfree(req);
121 }
122 
123 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
124 {
125 	struct fsf_status_read_buffer *sr_buf = req->data;
126 	struct zfcp_adapter *adapter = req->adapter;
127 	struct zfcp_port *port;
128 	int d_id = sr_buf->d_id & ZFCP_DID_MASK;
129 	unsigned long flags;
130 
131 	read_lock_irqsave(&zfcp_data.config_lock, flags);
132 	list_for_each_entry(port, &adapter->port_list_head, list)
133 		if (port->d_id == d_id) {
134 			read_unlock_irqrestore(&zfcp_data.config_lock, flags);
135 			zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
136 			return;
137 		}
138 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
139 }
140 
141 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 					 struct fsf_link_down_info *link_down)
143 {
144 	struct zfcp_adapter *adapter = req->adapter;
145 	unsigned long flags;
146 
147 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 		return;
149 
150 	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151 
152 	read_lock_irqsave(&zfcp_data.config_lock, flags);
153 	zfcp_scsi_schedule_rports_block(adapter);
154 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155 
156 	if (!link_down)
157 		goto out;
158 
159 	switch (link_down->error_code) {
160 	case FSF_PSQ_LINK_NO_LIGHT:
161 		dev_warn(&req->adapter->ccw_device->dev,
162 			 "There is no light signal from the local "
163 			 "fibre channel cable\n");
164 		break;
165 	case FSF_PSQ_LINK_WRAP_PLUG:
166 		dev_warn(&req->adapter->ccw_device->dev,
167 			 "There is a wrap plug instead of a fibre "
168 			 "channel cable\n");
169 		break;
170 	case FSF_PSQ_LINK_NO_FCP:
171 		dev_warn(&req->adapter->ccw_device->dev,
172 			 "The adjacent fibre channel node does not "
173 			 "support FCP\n");
174 		break;
175 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
176 		dev_warn(&req->adapter->ccw_device->dev,
177 			 "The FCP device is suspended because of a "
178 			 "firmware update\n");
179 		break;
180 	case FSF_PSQ_LINK_INVALID_WWPN:
181 		dev_warn(&req->adapter->ccw_device->dev,
182 			 "The FCP device detected a WWPN that is "
183 			 "duplicate or not valid\n");
184 		break;
185 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
186 		dev_warn(&req->adapter->ccw_device->dev,
187 			 "The fibre channel fabric does not support NPIV\n");
188 		break;
189 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
190 		dev_warn(&req->adapter->ccw_device->dev,
191 			 "The FCP adapter cannot support more NPIV ports\n");
192 		break;
193 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
194 		dev_warn(&req->adapter->ccw_device->dev,
195 			 "The adjacent switch cannot support "
196 			 "more NPIV ports\n");
197 		break;
198 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
199 		dev_warn(&req->adapter->ccw_device->dev,
200 			 "The FCP adapter could not log in to the "
201 			 "fibre channel fabric\n");
202 		break;
203 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
204 		dev_warn(&req->adapter->ccw_device->dev,
205 			 "The WWPN assignment file on the FCP adapter "
206 			 "has been damaged\n");
207 		break;
208 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
209 		dev_warn(&req->adapter->ccw_device->dev,
210 			 "The mode table on the FCP adapter "
211 			 "has been damaged\n");
212 		break;
213 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
214 		dev_warn(&req->adapter->ccw_device->dev,
215 			 "All NPIV ports on the FCP adapter have "
216 			 "been assigned\n");
217 		break;
218 	default:
219 		dev_warn(&req->adapter->ccw_device->dev,
220 			 "The link between the FCP adapter and "
221 			 "the FC fabric is down\n");
222 	}
223 out:
224 	zfcp_erp_adapter_failed(adapter, id, req);
225 }
226 
227 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
228 {
229 	struct fsf_status_read_buffer *sr_buf = req->data;
230 	struct fsf_link_down_info *ldi =
231 		(struct fsf_link_down_info *) &sr_buf->payload;
232 
233 	switch (sr_buf->status_subtype) {
234 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
235 		zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
236 		break;
237 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
238 		zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
239 		break;
240 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
241 		zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
242 	};
243 }
244 
245 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
246 {
247 	struct zfcp_adapter *adapter = req->adapter;
248 	struct fsf_status_read_buffer *sr_buf = req->data;
249 
250 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
251 		zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
252 		mempool_free(sr_buf, adapter->pool.status_read_data);
253 		zfcp_fsf_req_free(req);
254 		return;
255 	}
256 
257 	zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
258 
259 	switch (sr_buf->status_type) {
260 	case FSF_STATUS_READ_PORT_CLOSED:
261 		zfcp_fsf_status_read_port_closed(req);
262 		break;
263 	case FSF_STATUS_READ_INCOMING_ELS:
264 		zfcp_fc_incoming_els(req);
265 		break;
266 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
267 		break;
268 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
269 		dev_warn(&adapter->ccw_device->dev,
270 			 "The error threshold for checksum statistics "
271 			 "has been exceeded\n");
272 		zfcp_dbf_hba_berr(adapter->dbf, req);
273 		break;
274 	case FSF_STATUS_READ_LINK_DOWN:
275 		zfcp_fsf_status_read_link_down(req);
276 		break;
277 	case FSF_STATUS_READ_LINK_UP:
278 		dev_info(&adapter->ccw_device->dev,
279 			 "The local link has been restored\n");
280 		/* All ports should be marked as ready to run again */
281 		zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
282 					       ZFCP_STATUS_COMMON_RUNNING,
283 					       ZFCP_SET);
284 		zfcp_erp_adapter_reopen(adapter,
285 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
286 					ZFCP_STATUS_COMMON_ERP_FAILED,
287 					"fssrh_2", req);
288 		break;
289 	case FSF_STATUS_READ_NOTIFICATION_LOST:
290 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
291 			zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
292 							req);
293 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
294 			schedule_work(&adapter->scan_work);
295 		break;
296 	case FSF_STATUS_READ_CFDC_UPDATED:
297 		zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
298 		break;
299 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
300 		adapter->adapter_features = sr_buf->payload.word[0];
301 		break;
302 	}
303 
304 	mempool_free(sr_buf, adapter->pool.status_read_data);
305 	zfcp_fsf_req_free(req);
306 
307 	atomic_inc(&adapter->stat_miss);
308 	queue_work(adapter->work_queue, &adapter->stat_work);
309 }
310 
311 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
312 {
313 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
314 	case FSF_SQ_FCP_RSP_AVAILABLE:
315 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
316 	case FSF_SQ_NO_RETRY_POSSIBLE:
317 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
318 		return;
319 	case FSF_SQ_COMMAND_ABORTED:
320 		req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
321 		break;
322 	case FSF_SQ_NO_RECOM:
323 		dev_err(&req->adapter->ccw_device->dev,
324 			"The FCP adapter reported a problem "
325 			"that cannot be recovered\n");
326 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
327 		break;
328 	}
329 	/* all non-return stats set FSFREQ_ERROR*/
330 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
331 }
332 
333 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
334 {
335 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
336 		return;
337 
338 	switch (req->qtcb->header.fsf_status) {
339 	case FSF_UNKNOWN_COMMAND:
340 		dev_err(&req->adapter->ccw_device->dev,
341 			"The FCP adapter does not recognize the command 0x%x\n",
342 			req->qtcb->header.fsf_command);
343 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
344 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
345 		break;
346 	case FSF_ADAPTER_STATUS_AVAILABLE:
347 		zfcp_fsf_fsfstatus_qual_eval(req);
348 		break;
349 	}
350 }
351 
352 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
353 {
354 	struct zfcp_adapter *adapter = req->adapter;
355 	struct fsf_qtcb *qtcb = req->qtcb;
356 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
357 
358 	zfcp_dbf_hba_fsf_response(req);
359 
360 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
361 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
362 			ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 		return;
364 	}
365 
366 	switch (qtcb->prefix.prot_status) {
367 	case FSF_PROT_GOOD:
368 	case FSF_PROT_FSF_STATUS_PRESENTED:
369 		return;
370 	case FSF_PROT_QTCB_VERSION_ERROR:
371 		dev_err(&adapter->ccw_device->dev,
372 			"QTCB version 0x%x not supported by FCP adapter "
373 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
374 			psq->word[0], psq->word[1]);
375 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
376 		break;
377 	case FSF_PROT_ERROR_STATE:
378 	case FSF_PROT_SEQ_NUMB_ERROR:
379 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
380 		req->status |= ZFCP_STATUS_FSFREQ_RETRY;
381 		break;
382 	case FSF_PROT_UNSUPP_QTCB_TYPE:
383 		dev_err(&adapter->ccw_device->dev,
384 			"The QTCB type is not supported by the FCP adapter\n");
385 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
386 		break;
387 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
388 		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
389 				&adapter->status);
390 		break;
391 	case FSF_PROT_DUPLICATE_REQUEST_ID:
392 		dev_err(&adapter->ccw_device->dev,
393 			"0x%Lx is an ambiguous request identifier\n",
394 			(unsigned long long)qtcb->bottom.support.req_handle);
395 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
396 		break;
397 	case FSF_PROT_LINK_DOWN:
398 		zfcp_fsf_link_down_info_eval(req, "fspse_5",
399 					     &psq->link_down_info);
400 		/* FIXME: reopening adapter now? better wait for link up */
401 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
402 		break;
403 	case FSF_PROT_REEST_QUEUE:
404 		/* All ports should be marked as ready to run again */
405 		zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
406 					       ZFCP_STATUS_COMMON_RUNNING,
407 					       ZFCP_SET);
408 		zfcp_erp_adapter_reopen(adapter,
409 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
410 					ZFCP_STATUS_COMMON_ERP_FAILED,
411 					"fspse_8", req);
412 		break;
413 	default:
414 		dev_err(&adapter->ccw_device->dev,
415 			"0x%x is not a valid transfer protocol status\n",
416 			qtcb->prefix.prot_status);
417 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
418 	}
419 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
420 }
421 
422 /**
423  * zfcp_fsf_req_complete - process completion of a FSF request
424  * @fsf_req: The FSF request that has been completed.
425  *
426  * When a request has been completed either from the FCP adapter,
427  * or it has been dismissed due to a queue shutdown, this function
428  * is called to process the completion status and trigger further
429  * events related to the FSF request.
430  */
431 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
432 {
433 	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
434 		zfcp_fsf_status_read_handler(req);
435 		return;
436 	}
437 
438 	del_timer(&req->timer);
439 	zfcp_fsf_protstatus_eval(req);
440 	zfcp_fsf_fsfstatus_eval(req);
441 	req->handler(req);
442 
443 	if (req->erp_action)
444 		zfcp_erp_notify(req->erp_action, 0);
445 
446 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
447 		zfcp_fsf_req_free(req);
448 	else
449 		complete(&req->completion);
450 }
451 
452 /**
453  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
454  * @adapter: pointer to struct zfcp_adapter
455  *
456  * Never ever call this without shutting down the adapter first.
457  * Otherwise the adapter would continue using and corrupting s390 storage.
458  * Included BUG_ON() call to ensure this is done.
459  * ERP is supposed to be the only user of this function.
460  */
461 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
462 {
463 	struct zfcp_fsf_req *req, *tmp;
464 	unsigned long flags;
465 	LIST_HEAD(remove_queue);
466 	unsigned int i;
467 
468 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
469 	spin_lock_irqsave(&adapter->req_list_lock, flags);
470 	for (i = 0; i < REQUEST_LIST_SIZE; i++)
471 		list_splice_init(&adapter->req_list[i], &remove_queue);
472 	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
473 
474 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 		list_del(&req->list);
476 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
477 		zfcp_fsf_req_complete(req);
478 	}
479 }
480 
481 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482 {
483 	struct fsf_qtcb_bottom_config *bottom;
484 	struct zfcp_adapter *adapter = req->adapter;
485 	struct Scsi_Host *shost = adapter->scsi_host;
486 
487 	bottom = &req->qtcb->bottom.config;
488 
489 	if (req->data)
490 		memcpy(req->data, bottom, sizeof(*bottom));
491 
492 	fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
493 	fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
494 	fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
495 	fc_host_speed(shost) = bottom->fc_link_speed;
496 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
497 
498 	adapter->hydra_version = bottom->adapter_type;
499 	adapter->timer_ticks = bottom->timer_interval;
500 
501 	if (fc_host_permanent_port_name(shost) == -1)
502 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
503 
504 	switch (bottom->fc_topology) {
505 	case FSF_TOPO_P2P:
506 		adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
507 		adapter->peer_wwpn = bottom->plogi_payload.wwpn;
508 		adapter->peer_wwnn = bottom->plogi_payload.wwnn;
509 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
510 		break;
511 	case FSF_TOPO_FABRIC:
512 		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
513 		break;
514 	case FSF_TOPO_AL:
515 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
516 		/* fall through */
517 	default:
518 		dev_err(&adapter->ccw_device->dev,
519 			"Unknown or unsupported arbitrated loop "
520 			"fibre channel topology detected\n");
521 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
522 		return -EIO;
523 	}
524 
525 	return 0;
526 }
527 
528 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
529 {
530 	struct zfcp_adapter *adapter = req->adapter;
531 	struct fsf_qtcb *qtcb = req->qtcb;
532 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
533 	struct Scsi_Host *shost = adapter->scsi_host;
534 
535 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
536 		return;
537 
538 	adapter->fsf_lic_version = bottom->lic_version;
539 	adapter->adapter_features = bottom->adapter_features;
540 	adapter->connection_features = bottom->connection_features;
541 	adapter->peer_wwpn = 0;
542 	adapter->peer_wwnn = 0;
543 	adapter->peer_d_id = 0;
544 
545 	switch (qtcb->header.fsf_status) {
546 	case FSF_GOOD:
547 		if (zfcp_fsf_exchange_config_evaluate(req))
548 			return;
549 
550 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
551 			dev_err(&adapter->ccw_device->dev,
552 				"FCP adapter maximum QTCB size (%d bytes) "
553 				"is too small\n",
554 				bottom->max_qtcb_size);
555 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
556 			return;
557 		}
558 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
559 				&adapter->status);
560 		break;
561 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
562 		fc_host_node_name(shost) = 0;
563 		fc_host_port_name(shost) = 0;
564 		fc_host_port_id(shost) = 0;
565 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
566 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
567 		adapter->hydra_version = 0;
568 
569 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
570 				&adapter->status);
571 
572 		zfcp_fsf_link_down_info_eval(req, "fsecdh2",
573 			&qtcb->header.fsf_status_qual.link_down_info);
574 		break;
575 	default:
576 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
577 		return;
578 	}
579 
580 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
581 		adapter->hardware_version = bottom->hardware_version;
582 		memcpy(fc_host_serial_number(shost), bottom->serial_number,
583 		       min(FC_SERIAL_NUMBER_SIZE, 17));
584 		EBCASC(fc_host_serial_number(shost),
585 		       min(FC_SERIAL_NUMBER_SIZE, 17));
586 	}
587 
588 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
589 		dev_err(&adapter->ccw_device->dev,
590 			"The FCP adapter only supports newer "
591 			"control block versions\n");
592 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
593 		return;
594 	}
595 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
596 		dev_err(&adapter->ccw_device->dev,
597 			"The FCP adapter only supports older "
598 			"control block versions\n");
599 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
600 	}
601 }
602 
603 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
604 {
605 	struct zfcp_adapter *adapter = req->adapter;
606 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
607 	struct Scsi_Host *shost = adapter->scsi_host;
608 
609 	if (req->data)
610 		memcpy(req->data, bottom, sizeof(*bottom));
611 
612 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
613 		fc_host_permanent_port_name(shost) = bottom->wwpn;
614 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
615 	} else
616 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
617 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
618 	fc_host_supported_speeds(shost) = bottom->supported_speed;
619 }
620 
621 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
622 {
623 	struct fsf_qtcb *qtcb = req->qtcb;
624 
625 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
626 		return;
627 
628 	switch (qtcb->header.fsf_status) {
629 	case FSF_GOOD:
630 		zfcp_fsf_exchange_port_evaluate(req);
631 		break;
632 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
633 		zfcp_fsf_exchange_port_evaluate(req);
634 		zfcp_fsf_link_down_info_eval(req, "fsepdh1",
635 			&qtcb->header.fsf_status_qual.link_down_info);
636 		break;
637 	}
638 }
639 
640 static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
641 {
642 	struct zfcp_qdio_queue *req_q = &qdio->req_q;
643 
644 	spin_lock_bh(&qdio->req_q_lock);
645 	if (atomic_read(&req_q->count))
646 		return 1;
647 	spin_unlock_bh(&qdio->req_q_lock);
648 	return 0;
649 }
650 
651 static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
652 {
653 	struct zfcp_adapter *adapter = qdio->adapter;
654 	long ret;
655 
656 	spin_unlock_bh(&qdio->req_q_lock);
657 	ret = wait_event_interruptible_timeout(qdio->req_q_wq,
658 			       zfcp_fsf_sbal_check(qdio), 5 * HZ);
659 	if (ret > 0)
660 		return 0;
661 	if (!ret) {
662 		atomic_inc(&qdio->req_q_full);
663 		/* assume hanging outbound queue, try queue recovery */
664 		zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
665 	}
666 
667 	spin_lock_bh(&qdio->req_q_lock);
668 	return -EIO;
669 }
670 
671 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
672 {
673 	struct zfcp_fsf_req *req;
674 
675 	if (likely(pool))
676 		req = mempool_alloc(pool, GFP_ATOMIC);
677 	else
678 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
679 
680 	if (unlikely(!req))
681 		return NULL;
682 
683 	memset(req, 0, sizeof(*req));
684 	req->pool = pool;
685 	return req;
686 }
687 
688 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
689 {
690 	struct fsf_qtcb *qtcb;
691 
692 	if (likely(pool))
693 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
694 	else
695 		qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
696 
697 	if (unlikely(!qtcb))
698 		return NULL;
699 
700 	memset(qtcb, 0, sizeof(*qtcb));
701 	return qtcb;
702 }
703 
704 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
705 						u32 fsf_cmd, mempool_t *pool)
706 {
707 	struct qdio_buffer_element *sbale;
708 	struct zfcp_qdio_queue *req_q = &qdio->req_q;
709 	struct zfcp_adapter *adapter = qdio->adapter;
710 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
711 
712 	if (unlikely(!req))
713 		return ERR_PTR(-ENOMEM);
714 
715 	if (adapter->req_no == 0)
716 		adapter->req_no++;
717 
718 	INIT_LIST_HEAD(&req->list);
719 	init_timer(&req->timer);
720 	init_completion(&req->completion);
721 
722 	req->adapter = adapter;
723 	req->fsf_command = fsf_cmd;
724 	req->req_id = adapter->req_no;
725 	req->queue_req.sbal_number = 1;
726 	req->queue_req.sbal_first = req_q->first;
727 	req->queue_req.sbal_last = req_q->first;
728 	req->queue_req.sbale_curr = 1;
729 
730 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
731 	sbale[0].addr = (void *) req->req_id;
732 	sbale[0].flags |= SBAL_FLAGS0_COMMAND;
733 
734 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
735 		if (likely(pool))
736 			req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
737 		else
738 			req->qtcb = zfcp_qtcb_alloc(NULL);
739 
740 		if (unlikely(!req->qtcb)) {
741 			zfcp_fsf_req_free(req);
742 			return ERR_PTR(-ENOMEM);
743 		}
744 
745 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
746 		req->qtcb->prefix.req_id = req->req_id;
747 		req->qtcb->prefix.ulp_info = 26;
748 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
749 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
750 		req->qtcb->header.req_handle = req->req_id;
751 		req->qtcb->header.fsf_command = req->fsf_command;
752 		req->seq_no = adapter->fsf_req_seq_no;
753 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
754 		sbale[1].addr = (void *) req->qtcb;
755 		sbale[1].length = sizeof(struct fsf_qtcb);
756 	}
757 
758 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
759 		zfcp_fsf_req_free(req);
760 		return ERR_PTR(-EIO);
761 	}
762 
763 	return req;
764 }
765 
766 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
767 {
768 	struct zfcp_adapter *adapter = req->adapter;
769 	struct zfcp_qdio *qdio = adapter->qdio;
770 	unsigned long	     flags;
771 	int		     idx;
772 	int		     with_qtcb = (req->qtcb != NULL);
773 
774 	/* put allocated FSF request into hash table */
775 	spin_lock_irqsave(&adapter->req_list_lock, flags);
776 	idx = zfcp_reqlist_hash(req->req_id);
777 	list_add_tail(&req->list, &adapter->req_list[idx]);
778 	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
779 
780 	req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
781 	req->issued = get_clock();
782 	if (zfcp_qdio_send(qdio, &req->queue_req)) {
783 		del_timer(&req->timer);
784 		spin_lock_irqsave(&adapter->req_list_lock, flags);
785 		/* lookup request again, list might have changed */
786 		if (zfcp_reqlist_find_safe(adapter, req))
787 			zfcp_reqlist_remove(adapter, req);
788 		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
789 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
790 		return -EIO;
791 	}
792 
793 	/* Don't increase for unsolicited status */
794 	if (with_qtcb)
795 		adapter->fsf_req_seq_no++;
796 	adapter->req_no++;
797 
798 	return 0;
799 }
800 
801 /**
802  * zfcp_fsf_status_read - send status read request
803  * @adapter: pointer to struct zfcp_adapter
804  * @req_flags: request flags
805  * Returns: 0 on success, ERROR otherwise
806  */
807 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
808 {
809 	struct zfcp_adapter *adapter = qdio->adapter;
810 	struct zfcp_fsf_req *req;
811 	struct fsf_status_read_buffer *sr_buf;
812 	struct qdio_buffer_element *sbale;
813 	int retval = -EIO;
814 
815 	spin_lock_bh(&qdio->req_q_lock);
816 	if (zfcp_fsf_req_sbal_get(qdio))
817 		goto out;
818 
819 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
820 				  adapter->pool.status_read_req);
821 	if (IS_ERR(req)) {
822 		retval = PTR_ERR(req);
823 		goto out;
824 	}
825 
826 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
827 	sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
828 	req->queue_req.sbale_curr = 2;
829 
830 	sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
831 	if (!sr_buf) {
832 		retval = -ENOMEM;
833 		goto failed_buf;
834 	}
835 	memset(sr_buf, 0, sizeof(*sr_buf));
836 	req->data = sr_buf;
837 	sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
838 	sbale->addr = (void *) sr_buf;
839 	sbale->length = sizeof(*sr_buf);
840 
841 	retval = zfcp_fsf_req_send(req);
842 	if (retval)
843 		goto failed_req_send;
844 
845 	goto out;
846 
847 failed_req_send:
848 	mempool_free(sr_buf, adapter->pool.status_read_data);
849 failed_buf:
850 	zfcp_fsf_req_free(req);
851 	zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
852 out:
853 	spin_unlock_bh(&qdio->req_q_lock);
854 	return retval;
855 }
856 
857 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
858 {
859 	struct zfcp_unit *unit = req->data;
860 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
861 
862 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
863 		return;
864 
865 	switch (req->qtcb->header.fsf_status) {
866 	case FSF_PORT_HANDLE_NOT_VALID:
867 		if (fsq->word[0] == fsq->word[1]) {
868 			zfcp_erp_adapter_reopen(unit->port->adapter, 0,
869 						"fsafch1", req);
870 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
871 		}
872 		break;
873 	case FSF_LUN_HANDLE_NOT_VALID:
874 		if (fsq->word[0] == fsq->word[1]) {
875 			zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
876 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
877 		}
878 		break;
879 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
880 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
881 		break;
882 	case FSF_PORT_BOXED:
883 		zfcp_erp_port_boxed(unit->port, "fsafch3", req);
884 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
885 			       ZFCP_STATUS_FSFREQ_RETRY;
886 		break;
887 	case FSF_LUN_BOXED:
888 		zfcp_erp_unit_boxed(unit, "fsafch4", req);
889 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
890 			       ZFCP_STATUS_FSFREQ_RETRY;
891                 break;
892 	case FSF_ADAPTER_STATUS_AVAILABLE:
893 		switch (fsq->word[0]) {
894 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
895 			zfcp_fc_test_link(unit->port);
896 			/* fall through */
897 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
898 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
899 			break;
900 		}
901 		break;
902 	case FSF_GOOD:
903 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
904 		break;
905 	}
906 }
907 
908 /**
909  * zfcp_fsf_abort_fcp_command - abort running SCSI command
910  * @old_req_id: unsigned long
911  * @unit: pointer to struct zfcp_unit
912  * Returns: pointer to struct zfcp_fsf_req
913  */
914 
915 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
916 						struct zfcp_unit *unit)
917 {
918 	struct qdio_buffer_element *sbale;
919 	struct zfcp_fsf_req *req = NULL;
920 	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
921 
922 	spin_lock_bh(&qdio->req_q_lock);
923 	if (zfcp_fsf_req_sbal_get(qdio))
924 		goto out;
925 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
926 				  qdio->adapter->pool.scsi_abort);
927 	if (IS_ERR(req)) {
928 		req = NULL;
929 		goto out;
930 	}
931 
932 	if (unlikely(!(atomic_read(&unit->status) &
933 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
934 		goto out_error_free;
935 
936 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
937 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
938 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
939 
940 	req->data = unit;
941 	req->handler = zfcp_fsf_abort_fcp_command_handler;
942 	req->qtcb->header.lun_handle = unit->handle;
943 	req->qtcb->header.port_handle = unit->port->handle;
944 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
945 
946 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
947 	if (!zfcp_fsf_req_send(req))
948 		goto out;
949 
950 out_error_free:
951 	zfcp_fsf_req_free(req);
952 	req = NULL;
953 out:
954 	spin_unlock_bh(&qdio->req_q_lock);
955 	return req;
956 }
957 
958 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
959 {
960 	struct zfcp_adapter *adapter = req->adapter;
961 	struct zfcp_send_ct *send_ct = req->data;
962 	struct fsf_qtcb_header *header = &req->qtcb->header;
963 
964 	send_ct->status = -EINVAL;
965 
966 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
967 		goto skip_fsfstatus;
968 
969 	switch (header->fsf_status) {
970         case FSF_GOOD:
971 		zfcp_dbf_san_ct_response(req);
972 		send_ct->status = 0;
973 		break;
974         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
975 		zfcp_fsf_class_not_supp(req);
976 		break;
977         case FSF_ADAPTER_STATUS_AVAILABLE:
978                 switch (header->fsf_status_qual.word[0]){
979                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
980                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
981 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
982 			break;
983                 }
984                 break;
985 	case FSF_ACCESS_DENIED:
986 		break;
987         case FSF_PORT_BOXED:
988 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
989 			       ZFCP_STATUS_FSFREQ_RETRY;
990 		break;
991 	case FSF_PORT_HANDLE_NOT_VALID:
992 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
993 		/* fall through */
994 	case FSF_GENERIC_COMMAND_REJECTED:
995 	case FSF_PAYLOAD_SIZE_MISMATCH:
996 	case FSF_REQUEST_SIZE_TOO_LARGE:
997 	case FSF_RESPONSE_SIZE_TOO_LARGE:
998 	case FSF_SBAL_MISMATCH:
999 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1000 		break;
1001 	}
1002 
1003 skip_fsfstatus:
1004 	if (send_ct->handler)
1005 		send_ct->handler(send_ct->handler_data);
1006 }
1007 
1008 static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1009 					    struct scatterlist *sg_req,
1010 					    struct scatterlist *sg_resp)
1011 {
1012 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1013 	sbale[2].addr   = sg_virt(sg_req);
1014 	sbale[2].length = sg_req->length;
1015 	sbale[3].addr   = sg_virt(sg_resp);
1016 	sbale[3].length = sg_resp->length;
1017 	sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1018 }
1019 
1020 static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1021 {
1022 	return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1023 }
1024 
1025 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1026 				       struct scatterlist *sg_req,
1027 				       struct scatterlist *sg_resp,
1028 				       int max_sbals)
1029 {
1030 	struct zfcp_adapter *adapter = req->adapter;
1031 	struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 							       &req->queue_req);
1033 	u32 feat = adapter->adapter_features;
1034 	int bytes;
1035 
1036 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1037 		if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1038 			return -EOPNOTSUPP;
1039 
1040 		zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1041 		return 0;
1042 	}
1043 
1044 	/* use single, unchained SBAL if it can hold the request */
1045 	if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1046 		zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1047 		return 0;
1048 	}
1049 
1050 	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1051 					SBAL_FLAGS0_TYPE_WRITE_READ,
1052 					sg_req, max_sbals);
1053 	if (bytes <= 0)
1054 		return -EIO;
1055 	req->qtcb->bottom.support.req_buf_length = bytes;
1056 	req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1057 
1058 	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1059 					SBAL_FLAGS0_TYPE_WRITE_READ,
1060 					sg_resp, max_sbals);
1061 	if (bytes <= 0)
1062 		return -EIO;
1063 
1064 	/* common settings for ct/gs and els requests */
1065 	req->qtcb->bottom.support.resp_buf_length = bytes;
1066 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1067 	req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
1068 	zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10);
1069 
1070 	return 0;
1071 }
1072 
1073 /**
1074  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1075  * @ct: pointer to struct zfcp_send_ct with data for request
1076  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1077  */
1078 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1079 {
1080 	struct zfcp_wka_port *wka_port = ct->wka_port;
1081 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1082 	struct zfcp_fsf_req *req;
1083 	int ret = -EIO;
1084 
1085 	spin_lock_bh(&qdio->req_q_lock);
1086 	if (zfcp_fsf_req_sbal_get(qdio))
1087 		goto out;
1088 
1089 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
1090 
1091 	if (IS_ERR(req)) {
1092 		ret = PTR_ERR(req);
1093 		goto out;
1094 	}
1095 
1096 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1097 	ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
1098 					  FSF_MAX_SBALS_PER_REQ);
1099 	if (ret)
1100 		goto failed_send;
1101 
1102 	req->handler = zfcp_fsf_send_ct_handler;
1103 	req->qtcb->header.port_handle = wka_port->handle;
1104 	req->data = ct;
1105 
1106 	zfcp_dbf_san_ct_request(req);
1107 
1108 	ret = zfcp_fsf_req_send(req);
1109 	if (ret)
1110 		goto failed_send;
1111 
1112 	goto out;
1113 
1114 failed_send:
1115 	zfcp_fsf_req_free(req);
1116 out:
1117 	spin_unlock_bh(&qdio->req_q_lock);
1118 	return ret;
1119 }
1120 
1121 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1122 {
1123 	struct zfcp_send_els *send_els = req->data;
1124 	struct zfcp_port *port = send_els->port;
1125 	struct fsf_qtcb_header *header = &req->qtcb->header;
1126 
1127 	send_els->status = -EINVAL;
1128 
1129 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1130 		goto skip_fsfstatus;
1131 
1132 	switch (header->fsf_status) {
1133 	case FSF_GOOD:
1134 		zfcp_dbf_san_els_response(req);
1135 		send_els->status = 0;
1136 		break;
1137 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1138 		zfcp_fsf_class_not_supp(req);
1139 		break;
1140 	case FSF_ADAPTER_STATUS_AVAILABLE:
1141 		switch (header->fsf_status_qual.word[0]){
1142 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1143 			if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1144 				zfcp_fc_test_link(port);
1145 			/*fall through */
1146 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1147 		case FSF_SQ_RETRY_IF_POSSIBLE:
1148 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1149 			break;
1150 		}
1151 		break;
1152 	case FSF_ELS_COMMAND_REJECTED:
1153 	case FSF_PAYLOAD_SIZE_MISMATCH:
1154 	case FSF_REQUEST_SIZE_TOO_LARGE:
1155 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1156 		break;
1157 	case FSF_ACCESS_DENIED:
1158 		if (port)
1159 			zfcp_fsf_access_denied_port(req, port);
1160 		break;
1161 	case FSF_SBAL_MISMATCH:
1162 		/* should never occure, avoided in zfcp_fsf_send_els */
1163 		/* fall through */
1164 	default:
1165 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1166 		break;
1167 	}
1168 skip_fsfstatus:
1169 	if (send_els->handler)
1170 		send_els->handler(send_els->handler_data);
1171 }
1172 
1173 /**
1174  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1175  * @els: pointer to struct zfcp_send_els with data for the command
1176  */
1177 int zfcp_fsf_send_els(struct zfcp_send_els *els)
1178 {
1179 	struct zfcp_fsf_req *req;
1180 	struct zfcp_qdio *qdio = els->adapter->qdio;
1181 	int ret = -EIO;
1182 
1183 	spin_lock_bh(&qdio->req_q_lock);
1184 	if (zfcp_fsf_req_sbal_get(qdio))
1185 		goto out;
1186 
1187 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
1188 
1189 	if (IS_ERR(req)) {
1190 		ret = PTR_ERR(req);
1191 		goto out;
1192 	}
1193 
1194 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1195 	ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
1196 
1197 	if (ret)
1198 		goto failed_send;
1199 
1200 	req->qtcb->bottom.support.d_id = els->d_id;
1201 	req->handler = zfcp_fsf_send_els_handler;
1202 	req->data = els;
1203 
1204 	zfcp_dbf_san_els_request(req);
1205 
1206 	ret = zfcp_fsf_req_send(req);
1207 	if (ret)
1208 		goto failed_send;
1209 
1210 	goto out;
1211 
1212 failed_send:
1213 	zfcp_fsf_req_free(req);
1214 out:
1215 	spin_unlock_bh(&qdio->req_q_lock);
1216 	return ret;
1217 }
1218 
1219 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1220 {
1221 	struct qdio_buffer_element *sbale;
1222 	struct zfcp_fsf_req *req;
1223 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1224 	int retval = -EIO;
1225 
1226 	spin_lock_bh(&qdio->req_q_lock);
1227 	if (zfcp_fsf_req_sbal_get(qdio))
1228 		goto out;
1229 
1230 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1231 				  qdio->adapter->pool.erp_req);
1232 
1233 	if (IS_ERR(req)) {
1234 		retval = PTR_ERR(req);
1235 		goto out;
1236 	}
1237 
1238 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1239 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1240 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1241 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1242 
1243 	req->qtcb->bottom.config.feature_selection =
1244 			FSF_FEATURE_CFDC |
1245 			FSF_FEATURE_LUN_SHARING |
1246 			FSF_FEATURE_NOTIFICATION_LOST |
1247 			FSF_FEATURE_UPDATE_ALERT;
1248 	req->erp_action = erp_action;
1249 	req->handler = zfcp_fsf_exchange_config_data_handler;
1250 	erp_action->fsf_req = req;
1251 
1252 	zfcp_fsf_start_erp_timer(req);
1253 	retval = zfcp_fsf_req_send(req);
1254 	if (retval) {
1255 		zfcp_fsf_req_free(req);
1256 		erp_action->fsf_req = NULL;
1257 	}
1258 out:
1259 	spin_unlock_bh(&qdio->req_q_lock);
1260 	return retval;
1261 }
1262 
1263 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1264 				       struct fsf_qtcb_bottom_config *data)
1265 {
1266 	struct qdio_buffer_element *sbale;
1267 	struct zfcp_fsf_req *req = NULL;
1268 	int retval = -EIO;
1269 
1270 	spin_lock_bh(&qdio->req_q_lock);
1271 	if (zfcp_fsf_req_sbal_get(qdio))
1272 		goto out_unlock;
1273 
1274 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
1275 
1276 	if (IS_ERR(req)) {
1277 		retval = PTR_ERR(req);
1278 		goto out_unlock;
1279 	}
1280 
1281 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1282 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1283 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1284 	req->handler = zfcp_fsf_exchange_config_data_handler;
1285 
1286 	req->qtcb->bottom.config.feature_selection =
1287 			FSF_FEATURE_CFDC |
1288 			FSF_FEATURE_LUN_SHARING |
1289 			FSF_FEATURE_NOTIFICATION_LOST |
1290 			FSF_FEATURE_UPDATE_ALERT;
1291 
1292 	if (data)
1293 		req->data = data;
1294 
1295 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1296 	retval = zfcp_fsf_req_send(req);
1297 	spin_unlock_bh(&qdio->req_q_lock);
1298 	if (!retval)
1299 		wait_for_completion(&req->completion);
1300 
1301 	zfcp_fsf_req_free(req);
1302 	return retval;
1303 
1304 out_unlock:
1305 	spin_unlock_bh(&qdio->req_q_lock);
1306 	return retval;
1307 }
1308 
1309 /**
1310  * zfcp_fsf_exchange_port_data - request information about local port
1311  * @erp_action: ERP action for the adapter for which port data is requested
1312  * Returns: 0 on success, error otherwise
1313  */
1314 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1315 {
1316 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1317 	struct qdio_buffer_element *sbale;
1318 	struct zfcp_fsf_req *req;
1319 	int retval = -EIO;
1320 
1321 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1322 		return -EOPNOTSUPP;
1323 
1324 	spin_lock_bh(&qdio->req_q_lock);
1325 	if (zfcp_fsf_req_sbal_get(qdio))
1326 		goto out;
1327 
1328 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1329 				  qdio->adapter->pool.erp_req);
1330 
1331 	if (IS_ERR(req)) {
1332 		retval = PTR_ERR(req);
1333 		goto out;
1334 	}
1335 
1336 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1337 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1338 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1339 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1340 
1341 	req->handler = zfcp_fsf_exchange_port_data_handler;
1342 	req->erp_action = erp_action;
1343 	erp_action->fsf_req = req;
1344 
1345 	zfcp_fsf_start_erp_timer(req);
1346 	retval = zfcp_fsf_req_send(req);
1347 	if (retval) {
1348 		zfcp_fsf_req_free(req);
1349 		erp_action->fsf_req = NULL;
1350 	}
1351 out:
1352 	spin_unlock_bh(&qdio->req_q_lock);
1353 	return retval;
1354 }
1355 
1356 /**
1357  * zfcp_fsf_exchange_port_data_sync - request information about local port
1358  * @qdio: pointer to struct zfcp_qdio
1359  * @data: pointer to struct fsf_qtcb_bottom_port
1360  * Returns: 0 on success, error otherwise
1361  */
1362 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1363 				     struct fsf_qtcb_bottom_port *data)
1364 {
1365 	struct qdio_buffer_element *sbale;
1366 	struct zfcp_fsf_req *req = NULL;
1367 	int retval = -EIO;
1368 
1369 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1370 		return -EOPNOTSUPP;
1371 
1372 	spin_lock_bh(&qdio->req_q_lock);
1373 	if (zfcp_fsf_req_sbal_get(qdio))
1374 		goto out_unlock;
1375 
1376 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
1377 
1378 	if (IS_ERR(req)) {
1379 		retval = PTR_ERR(req);
1380 		goto out_unlock;
1381 	}
1382 
1383 	if (data)
1384 		req->data = data;
1385 
1386 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1387 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1388 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1389 
1390 	req->handler = zfcp_fsf_exchange_port_data_handler;
1391 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1392 	retval = zfcp_fsf_req_send(req);
1393 	spin_unlock_bh(&qdio->req_q_lock);
1394 
1395 	if (!retval)
1396 		wait_for_completion(&req->completion);
1397 
1398 	zfcp_fsf_req_free(req);
1399 
1400 	return retval;
1401 
1402 out_unlock:
1403 	spin_unlock_bh(&qdio->req_q_lock);
1404 	return retval;
1405 }
1406 
1407 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1408 {
1409 	struct zfcp_port *port = req->data;
1410 	struct fsf_qtcb_header *header = &req->qtcb->header;
1411 	struct fsf_plogi *plogi;
1412 
1413 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1414 		goto out;
1415 
1416 	switch (header->fsf_status) {
1417 	case FSF_PORT_ALREADY_OPEN:
1418 		break;
1419 	case FSF_ACCESS_DENIED:
1420 		zfcp_fsf_access_denied_port(req, port);
1421 		break;
1422 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1423 		dev_warn(&req->adapter->ccw_device->dev,
1424 			 "Not enough FCP adapter resources to open "
1425 			 "remote port 0x%016Lx\n",
1426 			 (unsigned long long)port->wwpn);
1427 		zfcp_erp_port_failed(port, "fsoph_1", req);
1428 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1429 		break;
1430 	case FSF_ADAPTER_STATUS_AVAILABLE:
1431 		switch (header->fsf_status_qual.word[0]) {
1432 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1433 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1434 		case FSF_SQ_NO_RETRY_POSSIBLE:
1435 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1436 			break;
1437 		}
1438 		break;
1439 	case FSF_GOOD:
1440 		port->handle = header->port_handle;
1441 		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1442 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1443 		atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1444 		                  ZFCP_STATUS_COMMON_ACCESS_BOXED,
1445 		                  &port->status);
1446 		/* check whether D_ID has changed during open */
1447 		/*
1448 		 * FIXME: This check is not airtight, as the FCP channel does
1449 		 * not monitor closures of target port connections caused on
1450 		 * the remote side. Thus, they might miss out on invalidating
1451 		 * locally cached WWPNs (and other N_Port parameters) of gone
1452 		 * target ports. So, our heroic attempt to make things safe
1453 		 * could be undermined by 'open port' response data tagged with
1454 		 * obsolete WWPNs. Another reason to monitor potential
1455 		 * connection closures ourself at least (by interpreting
1456 		 * incoming ELS' and unsolicited status). It just crosses my
1457 		 * mind that one should be able to cross-check by means of
1458 		 * another GID_PN straight after a port has been opened.
1459 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1460 		 */
1461 		plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1462 		if (req->qtcb->bottom.support.els1_length >=
1463 		    FSF_PLOGI_MIN_LEN) {
1464 			if (plogi->serv_param.wwpn != port->wwpn)
1465 				port->d_id = 0;
1466 			else {
1467 				port->wwnn = plogi->serv_param.wwnn;
1468 				zfcp_fc_plogi_evaluate(port, plogi);
1469 			}
1470 		}
1471 		break;
1472 	case FSF_UNKNOWN_OP_SUBTYPE:
1473 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1474 		break;
1475 	}
1476 
1477 out:
1478 	zfcp_port_put(port);
1479 }
1480 
1481 /**
1482  * zfcp_fsf_open_port - create and send open port request
1483  * @erp_action: pointer to struct zfcp_erp_action
1484  * Returns: 0 on success, error otherwise
1485  */
1486 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1487 {
1488 	struct qdio_buffer_element *sbale;
1489 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1490 	struct zfcp_port *port = erp_action->port;
1491 	struct zfcp_fsf_req *req;
1492 	int retval = -EIO;
1493 
1494 	spin_lock_bh(&qdio->req_q_lock);
1495 	if (zfcp_fsf_req_sbal_get(qdio))
1496 		goto out;
1497 
1498 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1499 				  qdio->adapter->pool.erp_req);
1500 
1501 	if (IS_ERR(req)) {
1502 		retval = PTR_ERR(req);
1503 		goto out;
1504 	}
1505 
1506 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1507 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1508         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1509         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1510 
1511 	req->handler = zfcp_fsf_open_port_handler;
1512 	req->qtcb->bottom.support.d_id = port->d_id;
1513 	req->data = port;
1514 	req->erp_action = erp_action;
1515 	erp_action->fsf_req = req;
1516 	zfcp_port_get(port);
1517 
1518 	zfcp_fsf_start_erp_timer(req);
1519 	retval = zfcp_fsf_req_send(req);
1520 	if (retval) {
1521 		zfcp_fsf_req_free(req);
1522 		erp_action->fsf_req = NULL;
1523 		zfcp_port_put(port);
1524 	}
1525 out:
1526 	spin_unlock_bh(&qdio->req_q_lock);
1527 	return retval;
1528 }
1529 
1530 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1531 {
1532 	struct zfcp_port *port = req->data;
1533 
1534 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1535 		return;
1536 
1537 	switch (req->qtcb->header.fsf_status) {
1538 	case FSF_PORT_HANDLE_NOT_VALID:
1539 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1540 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1541 		break;
1542 	case FSF_ADAPTER_STATUS_AVAILABLE:
1543 		break;
1544 	case FSF_GOOD:
1545 		zfcp_erp_modify_port_status(port, "fscph_2", req,
1546 					    ZFCP_STATUS_COMMON_OPEN,
1547 					    ZFCP_CLEAR);
1548 		break;
1549 	}
1550 }
1551 
1552 /**
1553  * zfcp_fsf_close_port - create and send close port request
1554  * @erp_action: pointer to struct zfcp_erp_action
1555  * Returns: 0 on success, error otherwise
1556  */
1557 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1558 {
1559 	struct qdio_buffer_element *sbale;
1560 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1561 	struct zfcp_fsf_req *req;
1562 	int retval = -EIO;
1563 
1564 	spin_lock_bh(&qdio->req_q_lock);
1565 	if (zfcp_fsf_req_sbal_get(qdio))
1566 		goto out;
1567 
1568 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1569 				  qdio->adapter->pool.erp_req);
1570 
1571 	if (IS_ERR(req)) {
1572 		retval = PTR_ERR(req);
1573 		goto out;
1574 	}
1575 
1576 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1577 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1578 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1579 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1580 
1581 	req->handler = zfcp_fsf_close_port_handler;
1582 	req->data = erp_action->port;
1583 	req->erp_action = erp_action;
1584 	req->qtcb->header.port_handle = erp_action->port->handle;
1585 	erp_action->fsf_req = req;
1586 
1587 	zfcp_fsf_start_erp_timer(req);
1588 	retval = zfcp_fsf_req_send(req);
1589 	if (retval) {
1590 		zfcp_fsf_req_free(req);
1591 		erp_action->fsf_req = NULL;
1592 	}
1593 out:
1594 	spin_unlock_bh(&qdio->req_q_lock);
1595 	return retval;
1596 }
1597 
1598 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1599 {
1600 	struct zfcp_wka_port *wka_port = req->data;
1601 	struct fsf_qtcb_header *header = &req->qtcb->header;
1602 
1603 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1604 		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1605 		goto out;
1606 	}
1607 
1608 	switch (header->fsf_status) {
1609 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1610 		dev_warn(&req->adapter->ccw_device->dev,
1611 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1612 		/* fall through */
1613 	case FSF_ADAPTER_STATUS_AVAILABLE:
1614 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1615 		/* fall through */
1616 	case FSF_ACCESS_DENIED:
1617 		wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1618 		break;
1619 	case FSF_GOOD:
1620 		wka_port->handle = header->port_handle;
1621 		/* fall through */
1622 	case FSF_PORT_ALREADY_OPEN:
1623 		wka_port->status = ZFCP_WKA_PORT_ONLINE;
1624 	}
1625 out:
1626 	wake_up(&wka_port->completion_wq);
1627 }
1628 
1629 /**
1630  * zfcp_fsf_open_wka_port - create and send open wka-port request
1631  * @wka_port: pointer to struct zfcp_wka_port
1632  * Returns: 0 on success, error otherwise
1633  */
1634 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1635 {
1636 	struct qdio_buffer_element *sbale;
1637 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1638 	struct zfcp_fsf_req *req;
1639 	int retval = -EIO;
1640 
1641 	spin_lock_bh(&qdio->req_q_lock);
1642 	if (zfcp_fsf_req_sbal_get(qdio))
1643 		goto out;
1644 
1645 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1646 				  qdio->adapter->pool.erp_req);
1647 
1648 	if (unlikely(IS_ERR(req))) {
1649 		retval = PTR_ERR(req);
1650 		goto out;
1651 	}
1652 
1653 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1654 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1655 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1656 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1657 
1658 	req->handler = zfcp_fsf_open_wka_port_handler;
1659 	req->qtcb->bottom.support.d_id = wka_port->d_id;
1660 	req->data = wka_port;
1661 
1662 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1663 	retval = zfcp_fsf_req_send(req);
1664 	if (retval)
1665 		zfcp_fsf_req_free(req);
1666 out:
1667 	spin_unlock_bh(&qdio->req_q_lock);
1668 	return retval;
1669 }
1670 
1671 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1672 {
1673 	struct zfcp_wka_port *wka_port = req->data;
1674 
1675 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1676 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1677 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1678 	}
1679 
1680 	wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1681 	wake_up(&wka_port->completion_wq);
1682 }
1683 
1684 /**
1685  * zfcp_fsf_close_wka_port - create and send close wka port request
1686  * @erp_action: pointer to struct zfcp_erp_action
1687  * Returns: 0 on success, error otherwise
1688  */
1689 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1690 {
1691 	struct qdio_buffer_element *sbale;
1692 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1693 	struct zfcp_fsf_req *req;
1694 	int retval = -EIO;
1695 
1696 	spin_lock_bh(&qdio->req_q_lock);
1697 	if (zfcp_fsf_req_sbal_get(qdio))
1698 		goto out;
1699 
1700 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1701 				  qdio->adapter->pool.erp_req);
1702 
1703 	if (unlikely(IS_ERR(req))) {
1704 		retval = PTR_ERR(req);
1705 		goto out;
1706 	}
1707 
1708 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1709 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1710 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1711 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1712 
1713 	req->handler = zfcp_fsf_close_wka_port_handler;
1714 	req->data = wka_port;
1715 	req->qtcb->header.port_handle = wka_port->handle;
1716 
1717 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1718 	retval = zfcp_fsf_req_send(req);
1719 	if (retval)
1720 		zfcp_fsf_req_free(req);
1721 out:
1722 	spin_unlock_bh(&qdio->req_q_lock);
1723 	return retval;
1724 }
1725 
1726 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1727 {
1728 	struct zfcp_port *port = req->data;
1729 	struct fsf_qtcb_header *header = &req->qtcb->header;
1730 	struct zfcp_unit *unit;
1731 
1732 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1733 		return;
1734 
1735 	switch (header->fsf_status) {
1736 	case FSF_PORT_HANDLE_NOT_VALID:
1737 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1738 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1739 		break;
1740 	case FSF_ACCESS_DENIED:
1741 		zfcp_fsf_access_denied_port(req, port);
1742 		break;
1743 	case FSF_PORT_BOXED:
1744 		/* can't use generic zfcp_erp_modify_port_status because
1745 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1746 		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1747 		list_for_each_entry(unit, &port->unit_list_head, list)
1748 			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1749 					  &unit->status);
1750 		zfcp_erp_port_boxed(port, "fscpph2", req);
1751 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1752 			       ZFCP_STATUS_FSFREQ_RETRY;
1753 
1754 		break;
1755 	case FSF_ADAPTER_STATUS_AVAILABLE:
1756 		switch (header->fsf_status_qual.word[0]) {
1757 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1758 			/* fall through */
1759 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1760 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1761 			break;
1762 		}
1763 		break;
1764 	case FSF_GOOD:
1765 		/* can't use generic zfcp_erp_modify_port_status because
1766 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1767 		 */
1768 		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1769 		list_for_each_entry(unit, &port->unit_list_head, list)
1770 			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1771 					  &unit->status);
1772 		break;
1773 	}
1774 }
1775 
1776 /**
1777  * zfcp_fsf_close_physical_port - close physical port
1778  * @erp_action: pointer to struct zfcp_erp_action
1779  * Returns: 0 on success
1780  */
1781 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1782 {
1783 	struct qdio_buffer_element *sbale;
1784 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1785 	struct zfcp_fsf_req *req;
1786 	int retval = -EIO;
1787 
1788 	spin_lock_bh(&qdio->req_q_lock);
1789 	if (zfcp_fsf_req_sbal_get(qdio))
1790 		goto out;
1791 
1792 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1793 				  qdio->adapter->pool.erp_req);
1794 
1795 	if (IS_ERR(req)) {
1796 		retval = PTR_ERR(req);
1797 		goto out;
1798 	}
1799 
1800 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1801 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1802 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1803 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1804 
1805 	req->data = erp_action->port;
1806 	req->qtcb->header.port_handle = erp_action->port->handle;
1807 	req->erp_action = erp_action;
1808 	req->handler = zfcp_fsf_close_physical_port_handler;
1809 	erp_action->fsf_req = req;
1810 
1811 	zfcp_fsf_start_erp_timer(req);
1812 	retval = zfcp_fsf_req_send(req);
1813 	if (retval) {
1814 		zfcp_fsf_req_free(req);
1815 		erp_action->fsf_req = NULL;
1816 	}
1817 out:
1818 	spin_unlock_bh(&qdio->req_q_lock);
1819 	return retval;
1820 }
1821 
1822 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1823 {
1824 	struct zfcp_adapter *adapter = req->adapter;
1825 	struct zfcp_unit *unit = req->data;
1826 	struct fsf_qtcb_header *header = &req->qtcb->header;
1827 	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1828 	struct fsf_queue_designator *queue_designator =
1829 				&header->fsf_status_qual.fsf_queue_designator;
1830 	int exclusive, readwrite;
1831 
1832 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1833 		return;
1834 
1835 	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1836 			  ZFCP_STATUS_COMMON_ACCESS_BOXED |
1837 			  ZFCP_STATUS_UNIT_SHARED |
1838 			  ZFCP_STATUS_UNIT_READONLY,
1839 			  &unit->status);
1840 
1841 	switch (header->fsf_status) {
1842 
1843 	case FSF_PORT_HANDLE_NOT_VALID:
1844 		zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
1845 		/* fall through */
1846 	case FSF_LUN_ALREADY_OPEN:
1847 		break;
1848 	case FSF_ACCESS_DENIED:
1849 		zfcp_fsf_access_denied_unit(req, unit);
1850 		atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1851 		atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1852 		break;
1853 	case FSF_PORT_BOXED:
1854 		zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1855 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1856 			       ZFCP_STATUS_FSFREQ_RETRY;
1857 		break;
1858 	case FSF_LUN_SHARING_VIOLATION:
1859 		if (header->fsf_status_qual.word[0])
1860 			dev_warn(&adapter->ccw_device->dev,
1861 				 "LUN 0x%Lx on port 0x%Lx is already in "
1862 				 "use by CSS%d, MIF Image ID %x\n",
1863 				 (unsigned long long)unit->fcp_lun,
1864 				 (unsigned long long)unit->port->wwpn,
1865 				 queue_designator->cssid,
1866 				 queue_designator->hla);
1867 		else
1868 			zfcp_act_eval_err(adapter,
1869 					  header->fsf_status_qual.word[2]);
1870 		zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1871 		atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1872 		atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1873 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1874 		break;
1875 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1876 		dev_warn(&adapter->ccw_device->dev,
1877 			 "No handle is available for LUN "
1878 			 "0x%016Lx on port 0x%016Lx\n",
1879 			 (unsigned long long)unit->fcp_lun,
1880 			 (unsigned long long)unit->port->wwpn);
1881 		zfcp_erp_unit_failed(unit, "fsouh_4", req);
1882 		/* fall through */
1883 	case FSF_INVALID_COMMAND_OPTION:
1884 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1885 		break;
1886 	case FSF_ADAPTER_STATUS_AVAILABLE:
1887 		switch (header->fsf_status_qual.word[0]) {
1888 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1889 			zfcp_fc_test_link(unit->port);
1890 			/* fall through */
1891 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1892 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1893 			break;
1894 		}
1895 		break;
1896 
1897 	case FSF_GOOD:
1898 		unit->handle = header->lun_handle;
1899 		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1900 
1901 		if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1902 		    (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1903 		    !zfcp_ccw_priv_sch(adapter)) {
1904 			exclusive = (bottom->lun_access_info &
1905 					FSF_UNIT_ACCESS_EXCLUSIVE);
1906 			readwrite = (bottom->lun_access_info &
1907 					FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1908 
1909 			if (!exclusive)
1910 		                atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1911 						&unit->status);
1912 
1913 			if (!readwrite) {
1914                 		atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1915 						&unit->status);
1916 				dev_info(&adapter->ccw_device->dev,
1917 					 "SCSI device at LUN 0x%016Lx on port "
1918 					 "0x%016Lx opened read-only\n",
1919 					 (unsigned long long)unit->fcp_lun,
1920 					 (unsigned long long)unit->port->wwpn);
1921         		}
1922 
1923         		if (exclusive && !readwrite) {
1924 				dev_err(&adapter->ccw_device->dev,
1925 					"Exclusive read-only access not "
1926 					"supported (unit 0x%016Lx, "
1927 					"port 0x%016Lx)\n",
1928 					(unsigned long long)unit->fcp_lun,
1929 					(unsigned long long)unit->port->wwpn);
1930 				zfcp_erp_unit_failed(unit, "fsouh_5", req);
1931 				req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1932 				zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1933         		} else if (!exclusive && readwrite) {
1934 				dev_err(&adapter->ccw_device->dev,
1935 					"Shared read-write access not "
1936 					"supported (unit 0x%016Lx, port "
1937 					"0x%016Lx)\n",
1938 					(unsigned long long)unit->fcp_lun,
1939 					(unsigned long long)unit->port->wwpn);
1940 				zfcp_erp_unit_failed(unit, "fsouh_7", req);
1941 				req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1942 				zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1943         		}
1944 		}
1945 		break;
1946 	}
1947 }
1948 
1949 /**
1950  * zfcp_fsf_open_unit - open unit
1951  * @erp_action: pointer to struct zfcp_erp_action
1952  * Returns: 0 on success, error otherwise
1953  */
1954 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1955 {
1956 	struct qdio_buffer_element *sbale;
1957 	struct zfcp_adapter *adapter = erp_action->adapter;
1958 	struct zfcp_qdio *qdio = adapter->qdio;
1959 	struct zfcp_fsf_req *req;
1960 	int retval = -EIO;
1961 
1962 	spin_lock_bh(&qdio->req_q_lock);
1963 	if (zfcp_fsf_req_sbal_get(qdio))
1964 		goto out;
1965 
1966 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1967 				  adapter->pool.erp_req);
1968 
1969 	if (IS_ERR(req)) {
1970 		retval = PTR_ERR(req);
1971 		goto out;
1972 	}
1973 
1974 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1975 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1976         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1977         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1978 
1979 	req->qtcb->header.port_handle = erp_action->port->handle;
1980 	req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
1981 	req->handler = zfcp_fsf_open_unit_handler;
1982 	req->data = erp_action->unit;
1983 	req->erp_action = erp_action;
1984 	erp_action->fsf_req = req;
1985 
1986 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1987 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1988 
1989 	zfcp_fsf_start_erp_timer(req);
1990 	retval = zfcp_fsf_req_send(req);
1991 	if (retval) {
1992 		zfcp_fsf_req_free(req);
1993 		erp_action->fsf_req = NULL;
1994 	}
1995 out:
1996 	spin_unlock_bh(&qdio->req_q_lock);
1997 	return retval;
1998 }
1999 
2000 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2001 {
2002 	struct zfcp_unit *unit = req->data;
2003 
2004 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2005 		return;
2006 
2007 	switch (req->qtcb->header.fsf_status) {
2008 	case FSF_PORT_HANDLE_NOT_VALID:
2009 		zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
2010 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2011 		break;
2012 	case FSF_LUN_HANDLE_NOT_VALID:
2013 		zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
2014 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2015 		break;
2016 	case FSF_PORT_BOXED:
2017 		zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2018 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2019 			       ZFCP_STATUS_FSFREQ_RETRY;
2020 		break;
2021 	case FSF_ADAPTER_STATUS_AVAILABLE:
2022 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
2023 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2024 			zfcp_fc_test_link(unit->port);
2025 			/* fall through */
2026 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2027 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2028 			break;
2029 		}
2030 		break;
2031 	case FSF_GOOD:
2032 		atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
2033 		break;
2034 	}
2035 }
2036 
2037 /**
2038  * zfcp_fsf_close_unit - close zfcp unit
2039  * @erp_action: pointer to struct zfcp_unit
2040  * Returns: 0 on success, error otherwise
2041  */
2042 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2043 {
2044 	struct qdio_buffer_element *sbale;
2045 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2046 	struct zfcp_fsf_req *req;
2047 	int retval = -EIO;
2048 
2049 	spin_lock_bh(&qdio->req_q_lock);
2050 	if (zfcp_fsf_req_sbal_get(qdio))
2051 		goto out;
2052 
2053 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2054 				  qdio->adapter->pool.erp_req);
2055 
2056 	if (IS_ERR(req)) {
2057 		retval = PTR_ERR(req);
2058 		goto out;
2059 	}
2060 
2061 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2062 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2063 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2064 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2065 
2066 	req->qtcb->header.port_handle = erp_action->port->handle;
2067 	req->qtcb->header.lun_handle = erp_action->unit->handle;
2068 	req->handler = zfcp_fsf_close_unit_handler;
2069 	req->data = erp_action->unit;
2070 	req->erp_action = erp_action;
2071 	erp_action->fsf_req = req;
2072 
2073 	zfcp_fsf_start_erp_timer(req);
2074 	retval = zfcp_fsf_req_send(req);
2075 	if (retval) {
2076 		zfcp_fsf_req_free(req);
2077 		erp_action->fsf_req = NULL;
2078 	}
2079 out:
2080 	spin_unlock_bh(&qdio->req_q_lock);
2081 	return retval;
2082 }
2083 
2084 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2085 {
2086 	lat_rec->sum += lat;
2087 	lat_rec->min = min(lat_rec->min, lat);
2088 	lat_rec->max = max(lat_rec->max, lat);
2089 }
2090 
2091 static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2092 {
2093 	struct fsf_qual_latency_info *lat_inf;
2094 	struct latency_cont *lat;
2095 	struct zfcp_unit *unit = req->unit;
2096 
2097 	lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info;
2098 
2099 	switch (req->qtcb->bottom.io.data_direction) {
2100 	case FSF_DATADIR_READ:
2101 		lat = &unit->latencies.read;
2102 		break;
2103 	case FSF_DATADIR_WRITE:
2104 		lat = &unit->latencies.write;
2105 		break;
2106 	case FSF_DATADIR_CMND:
2107 		lat = &unit->latencies.cmd;
2108 		break;
2109 	default:
2110 		return;
2111 	}
2112 
2113 	spin_lock(&unit->latencies.lock);
2114 	zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
2115 	zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
2116 	lat->counter++;
2117 	spin_unlock(&unit->latencies.lock);
2118 }
2119 
2120 #ifdef CONFIG_BLK_DEV_IO_TRACE
2121 static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2122 {
2123 	struct fsf_qual_latency_info *lat_inf;
2124 	struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2125 	struct request *req = scsi_cmnd->request;
2126 	struct zfcp_blk_drv_data trace;
2127 	int ticks = fsf_req->adapter->timer_ticks;
2128 
2129 	trace.flags = 0;
2130 	trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2131 	if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2132 		trace.flags |= ZFCP_BLK_LAT_VALID;
2133 		lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
2134 		trace.channel_lat = lat_inf->channel_lat * ticks;
2135 		trace.fabric_lat = lat_inf->fabric_lat * ticks;
2136 	}
2137 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2138 		trace.flags |= ZFCP_BLK_REQ_ERROR;
2139 	trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2140 	trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2141 
2142 	blk_add_driver_data(req->q, req, &trace, sizeof(trace));
2143 }
2144 #else
2145 static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2146 {
2147 }
2148 #endif
2149 
2150 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2151 {
2152 	struct scsi_cmnd *scpnt;
2153 	struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2154 	    &(req->qtcb->bottom.io.fcp_rsp);
2155 	u32 sns_len;
2156 	char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2157 	unsigned long flags;
2158 
2159 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2160 
2161 	scpnt = req->data;
2162 	if (unlikely(!scpnt)) {
2163 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2164 		return;
2165 	}
2166 
2167 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2168 		set_host_byte(scpnt, DID_SOFT_ERROR);
2169 		goto skip_fsfstatus;
2170 	}
2171 
2172 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2173 		set_host_byte(scpnt, DID_ERROR);
2174 		goto skip_fsfstatus;
2175 	}
2176 
2177 	set_msg_byte(scpnt, COMMAND_COMPLETE);
2178 
2179 	scpnt->result |= fcp_rsp_iu->scsi_status;
2180 
2181 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2182 		zfcp_fsf_req_latency(req);
2183 
2184 	zfcp_fsf_trace_latency(req);
2185 
2186 	if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2187 		if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2188 			set_host_byte(scpnt, DID_OK);
2189 		else {
2190 			set_host_byte(scpnt, DID_ERROR);
2191 			goto skip_fsfstatus;
2192 		}
2193 	}
2194 
2195 	if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) {
2196 		sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2197 			  fcp_rsp_iu->fcp_rsp_len;
2198 		sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2199 		sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2200 
2201 		memcpy(scpnt->sense_buffer,
2202 		       zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2203 	}
2204 
2205 	if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2206 		scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2207 		if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2208 		    scpnt->underflow)
2209 			set_host_byte(scpnt, DID_ERROR);
2210 	}
2211 skip_fsfstatus:
2212 	if (scpnt->result != 0)
2213 		zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
2214 	else if (scpnt->retries > 0)
2215 		zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2216 	else
2217 		zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2218 
2219 	scpnt->host_scribble = NULL;
2220 	(scpnt->scsi_done) (scpnt);
2221 	/*
2222 	 * We must hold this lock until scsi_done has been called.
2223 	 * Otherwise we may call scsi_done after abort regarding this
2224 	 * command has completed.
2225 	 * Note: scsi_done must not block!
2226 	 */
2227 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2228 }
2229 
2230 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2231 {
2232 	struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2233 	    &(req->qtcb->bottom.io.fcp_rsp);
2234 	char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2235 
2236 	if ((fcp_rsp_info[3] != RSP_CODE_GOOD) ||
2237 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2238 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2239 }
2240 
2241 
2242 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2243 {
2244 	struct zfcp_unit *unit;
2245 	struct fsf_qtcb_header *header = &req->qtcb->header;
2246 
2247 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2248 		unit = req->data;
2249 	else
2250 		unit = req->unit;
2251 
2252 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2253 		goto skip_fsfstatus;
2254 
2255 	switch (header->fsf_status) {
2256 	case FSF_HANDLE_MISMATCH:
2257 	case FSF_PORT_HANDLE_NOT_VALID:
2258 		zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2259 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2260 		break;
2261 	case FSF_FCPLUN_NOT_VALID:
2262 	case FSF_LUN_HANDLE_NOT_VALID:
2263 		zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2264 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2265 		break;
2266 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2267 		zfcp_fsf_class_not_supp(req);
2268 		break;
2269 	case FSF_ACCESS_DENIED:
2270 		zfcp_fsf_access_denied_unit(req, unit);
2271 		break;
2272 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2273 		dev_err(&req->adapter->ccw_device->dev,
2274 			"Incorrect direction %d, unit 0x%016Lx on port "
2275 			"0x%016Lx closed\n",
2276 			req->qtcb->bottom.io.data_direction,
2277 			(unsigned long long)unit->fcp_lun,
2278 			(unsigned long long)unit->port->wwpn);
2279 		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2280 					  req);
2281 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2282 		break;
2283 	case FSF_CMND_LENGTH_NOT_VALID:
2284 		dev_err(&req->adapter->ccw_device->dev,
2285 			"Incorrect CDB length %d, unit 0x%016Lx on "
2286 			"port 0x%016Lx closed\n",
2287 			req->qtcb->bottom.io.fcp_cmnd_length,
2288 			(unsigned long long)unit->fcp_lun,
2289 			(unsigned long long)unit->port->wwpn);
2290 		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2291 					  req);
2292 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2293 		break;
2294 	case FSF_PORT_BOXED:
2295 		zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2296 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2297 			       ZFCP_STATUS_FSFREQ_RETRY;
2298 		break;
2299 	case FSF_LUN_BOXED:
2300 		zfcp_erp_unit_boxed(unit, "fssfch6", req);
2301 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2302 			       ZFCP_STATUS_FSFREQ_RETRY;
2303 		break;
2304 	case FSF_ADAPTER_STATUS_AVAILABLE:
2305 		if (header->fsf_status_qual.word[0] ==
2306 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2307 			zfcp_fc_test_link(unit->port);
2308 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2309 		break;
2310 	}
2311 skip_fsfstatus:
2312 	if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2313 		zfcp_fsf_send_fcp_ctm_handler(req);
2314 	else {
2315 		zfcp_fsf_send_fcp_command_task_handler(req);
2316 		req->unit = NULL;
2317 		zfcp_unit_put(unit);
2318 	}
2319 }
2320 
2321 static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2322 {
2323 	u32 *fcp_dl_ptr;
2324 
2325 	/*
2326 	 * fcp_dl_addr = start address of fcp_cmnd structure +
2327 	 * size of fixed part + size of dynamically sized add_dcp_cdb field
2328 	 * SEE FCP-2 documentation
2329 	 */
2330 	fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2331 			(fcp_cmd->add_fcp_cdb_length << 2));
2332 	*fcp_dl_ptr = fcp_dl;
2333 }
2334 
2335 /**
2336  * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2337  * @unit: unit where command is sent to
2338  * @scsi_cmnd: scsi command to be sent
2339  */
2340 int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2341 				   struct scsi_cmnd *scsi_cmnd)
2342 {
2343 	struct zfcp_fsf_req *req;
2344 	struct fcp_cmnd_iu *fcp_cmnd_iu;
2345 	unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2346 	int real_bytes, retval = -EIO;
2347 	struct zfcp_adapter *adapter = unit->port->adapter;
2348 	struct zfcp_qdio *qdio = adapter->qdio;
2349 
2350 	if (unlikely(!(atomic_read(&unit->status) &
2351 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2352 		return -EBUSY;
2353 
2354 	spin_lock(&qdio->req_q_lock);
2355 	if (atomic_read(&qdio->req_q.count) <= 0) {
2356 		atomic_inc(&qdio->req_q_full);
2357 		goto out;
2358 	}
2359 
2360 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2361 				  adapter->pool.scsi_req);
2362 
2363 	if (IS_ERR(req)) {
2364 		retval = PTR_ERR(req);
2365 		goto out;
2366 	}
2367 
2368 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2369 	zfcp_unit_get(unit);
2370 	req->unit = unit;
2371 	req->data = scsi_cmnd;
2372 	req->handler = zfcp_fsf_send_fcp_command_handler;
2373 	req->qtcb->header.lun_handle = unit->handle;
2374 	req->qtcb->header.port_handle = unit->port->handle;
2375 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2376 
2377 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2378 
2379 	fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2380 	fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2381 	/*
2382 	 * set depending on data direction:
2383 	 *      data direction bits in SBALE (SB Type)
2384 	 *      data direction bits in QTCB
2385 	 *      data direction bits in FCP_CMND IU
2386 	 */
2387 	switch (scsi_cmnd->sc_data_direction) {
2388 	case DMA_NONE:
2389 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2390 		break;
2391 	case DMA_FROM_DEVICE:
2392 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2393 		fcp_cmnd_iu->rddata = 1;
2394 		break;
2395 	case DMA_TO_DEVICE:
2396 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2397 		sbtype = SBAL_FLAGS0_TYPE_WRITE;
2398 		fcp_cmnd_iu->wddata = 1;
2399 		break;
2400 	case DMA_BIDIRECTIONAL:
2401 		goto failed_scsi_cmnd;
2402 	}
2403 
2404 	if (likely((scsi_cmnd->device->simple_tags) ||
2405 		   ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) &&
2406 		    (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2407 		fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2408 	else
2409 		fcp_cmnd_iu->task_attribute = UNTAGGED;
2410 
2411 	if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2412 		fcp_cmnd_iu->add_fcp_cdb_length =
2413 			(scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2414 
2415 	memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2416 
2417 	req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2418 		fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2419 
2420 	real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2421 					     scsi_sglist(scsi_cmnd),
2422 					     FSF_MAX_SBALS_PER_REQ);
2423 	if (unlikely(real_bytes < 0)) {
2424 		if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2425 			dev_err(&adapter->ccw_device->dev,
2426 				"Oversize data package, unit 0x%016Lx "
2427 				"on port 0x%016Lx closed\n",
2428 				(unsigned long long)unit->fcp_lun,
2429 				(unsigned long long)unit->port->wwpn);
2430 			zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2431 			retval = -EINVAL;
2432 		}
2433 		goto failed_scsi_cmnd;
2434 	}
2435 
2436 	zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2437 
2438 	retval = zfcp_fsf_req_send(req);
2439 	if (unlikely(retval))
2440 		goto failed_scsi_cmnd;
2441 
2442 	goto out;
2443 
2444 failed_scsi_cmnd:
2445 	zfcp_unit_put(unit);
2446 	zfcp_fsf_req_free(req);
2447 	scsi_cmnd->host_scribble = NULL;
2448 out:
2449 	spin_unlock(&qdio->req_q_lock);
2450 	return retval;
2451 }
2452 
2453 /**
2454  * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2455  * @unit: pointer to struct zfcp_unit
2456  * @tm_flags: unsigned byte for task management flags
2457  * Returns: on success pointer to struct fsf_req, NULL otherwise
2458  */
2459 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2460 {
2461 	struct qdio_buffer_element *sbale;
2462 	struct zfcp_fsf_req *req = NULL;
2463 	struct fcp_cmnd_iu *fcp_cmnd_iu;
2464 	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2465 
2466 	if (unlikely(!(atomic_read(&unit->status) &
2467 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2468 		return NULL;
2469 
2470 	spin_lock_bh(&qdio->req_q_lock);
2471 	if (zfcp_fsf_req_sbal_get(qdio))
2472 		goto out;
2473 
2474 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2475 				  qdio->adapter->pool.scsi_req);
2476 
2477 	if (IS_ERR(req)) {
2478 		req = NULL;
2479 		goto out;
2480 	}
2481 
2482 	req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2483 	req->data = unit;
2484 	req->handler = zfcp_fsf_send_fcp_command_handler;
2485 	req->qtcb->header.lun_handle = unit->handle;
2486 	req->qtcb->header.port_handle = unit->port->handle;
2487 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2488 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2489 	req->qtcb->bottom.io.fcp_cmnd_length = 	sizeof(struct fcp_cmnd_iu) +
2490 						sizeof(u32);
2491 
2492 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2493 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2494 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2495 
2496 	fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd;
2497 	fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2498 	fcp_cmnd_iu->task_management_flags = tm_flags;
2499 
2500 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2501 	if (!zfcp_fsf_req_send(req))
2502 		goto out;
2503 
2504 	zfcp_fsf_req_free(req);
2505 	req = NULL;
2506 out:
2507 	spin_unlock_bh(&qdio->req_q_lock);
2508 	return req;
2509 }
2510 
2511 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2512 {
2513 }
2514 
2515 /**
2516  * zfcp_fsf_control_file - control file upload/download
2517  * @adapter: pointer to struct zfcp_adapter
2518  * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2519  * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2520  */
2521 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2522 					   struct zfcp_fsf_cfdc *fsf_cfdc)
2523 {
2524 	struct qdio_buffer_element *sbale;
2525 	struct zfcp_qdio *qdio = adapter->qdio;
2526 	struct zfcp_fsf_req *req = NULL;
2527 	struct fsf_qtcb_bottom_support *bottom;
2528 	int direction, retval = -EIO, bytes;
2529 
2530 	if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2531 		return ERR_PTR(-EOPNOTSUPP);
2532 
2533 	switch (fsf_cfdc->command) {
2534 	case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2535 		direction = SBAL_FLAGS0_TYPE_WRITE;
2536 		break;
2537 	case FSF_QTCB_UPLOAD_CONTROL_FILE:
2538 		direction = SBAL_FLAGS0_TYPE_READ;
2539 		break;
2540 	default:
2541 		return ERR_PTR(-EINVAL);
2542 	}
2543 
2544 	spin_lock_bh(&qdio->req_q_lock);
2545 	if (zfcp_fsf_req_sbal_get(qdio))
2546 		goto out;
2547 
2548 	req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
2549 	if (IS_ERR(req)) {
2550 		retval = -EPERM;
2551 		goto out;
2552 	}
2553 
2554 	req->handler = zfcp_fsf_control_file_handler;
2555 
2556 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2557 	sbale[0].flags |= direction;
2558 
2559 	bottom = &req->qtcb->bottom.support;
2560 	bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2561 	bottom->option = fsf_cfdc->option;
2562 
2563 	bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
2564 					direction, fsf_cfdc->sg,
2565 					FSF_MAX_SBALS_PER_REQ);
2566 	if (bytes != ZFCP_CFDC_MAX_SIZE) {
2567 		zfcp_fsf_req_free(req);
2568 		goto out;
2569 	}
2570 
2571 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2572 	retval = zfcp_fsf_req_send(req);
2573 out:
2574 	spin_unlock_bh(&qdio->req_q_lock);
2575 
2576 	if (!retval) {
2577 		wait_for_completion(&req->completion);
2578 		return req;
2579 	}
2580 	return ERR_PTR(retval);
2581 }
2582 
2583 /**
2584  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2585  * @adapter: pointer to struct zfcp_adapter
2586  * @sbal_idx: response queue index of SBAL to be processed
2587  */
2588 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2589 {
2590 	struct zfcp_adapter *adapter = qdio->adapter;
2591 	struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2592 	struct qdio_buffer_element *sbale;
2593 	struct zfcp_fsf_req *fsf_req;
2594 	unsigned long flags, req_id;
2595 	int idx;
2596 
2597 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2598 
2599 		sbale = &sbal->element[idx];
2600 		req_id = (unsigned long) sbale->addr;
2601 		spin_lock_irqsave(&adapter->req_list_lock, flags);
2602 		fsf_req = zfcp_reqlist_find(adapter, req_id);
2603 
2604 		if (!fsf_req)
2605 			/*
2606 			 * Unknown request means that we have potentially memory
2607 			 * corruption and must stop the machine immediately.
2608 			 */
2609 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2610 			      req_id, dev_name(&adapter->ccw_device->dev));
2611 
2612 		list_del(&fsf_req->list);
2613 		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
2614 
2615 		fsf_req->queue_req.sbal_response = sbal_idx;
2616 		fsf_req->queue_req.qdio_inb_usage =
2617 			atomic_read(&qdio->resp_q.count);
2618 		zfcp_fsf_req_complete(fsf_req);
2619 
2620 		if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2621 			break;
2622 	}
2623 }
2624