xref: /openbmc/linux/drivers/s390/scsi/zfcp_fsf.c (revision 7dd65feb)
1 /*
2  * zfcp device driver
3  *
4  * Implementation of FSF commands.
5  *
6  * Copyright IBM Corporation 2002, 2009
7  */
8 
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/blktrace_api.h>
13 #include <scsi/fc/fc_els.h>
14 #include "zfcp_ext.h"
15 #include "zfcp_fc.h"
16 #include "zfcp_dbf.h"
17 
18 static void zfcp_fsf_request_timeout_handler(unsigned long data)
19 {
20 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
21 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
22 				"fsrth_1", NULL);
23 }
24 
25 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
26 				 unsigned long timeout)
27 {
28 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
29 	fsf_req->timer.data = (unsigned long) fsf_req->adapter;
30 	fsf_req->timer.expires = jiffies + timeout;
31 	add_timer(&fsf_req->timer);
32 }
33 
34 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
35 {
36 	BUG_ON(!fsf_req->erp_action);
37 	fsf_req->timer.function = zfcp_erp_timeout_handler;
38 	fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
39 	fsf_req->timer.expires = jiffies + 30 * HZ;
40 	add_timer(&fsf_req->timer);
41 }
42 
43 /* association between FSF command and FSF QTCB type */
44 static u32 fsf_qtcb_type[] = {
45 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
46 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
47 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
48 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
49 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
50 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
51 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
52 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
53 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
54 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
55 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
56 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
57 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
58 };
59 
60 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
61 {
62 	u16 subtable = table >> 16;
63 	u16 rule = table & 0xffff;
64 	const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
65 
66 	if (subtable && subtable < ARRAY_SIZE(act_type))
67 		dev_warn(&adapter->ccw_device->dev,
68 			 "Access denied according to ACT rule type %s, "
69 			 "rule %d\n", act_type[subtable], rule);
70 }
71 
72 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
73 					struct zfcp_port *port)
74 {
75 	struct fsf_qtcb_header *header = &req->qtcb->header;
76 	dev_warn(&req->adapter->ccw_device->dev,
77 		 "Access denied to port 0x%016Lx\n",
78 		 (unsigned long long)port->wwpn);
79 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
80 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
81 	zfcp_erp_port_access_denied(port, "fspad_1", req);
82 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
83 }
84 
85 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
86 					struct zfcp_unit *unit)
87 {
88 	struct fsf_qtcb_header *header = &req->qtcb->header;
89 	dev_warn(&req->adapter->ccw_device->dev,
90 		 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
91 		 (unsigned long long)unit->fcp_lun,
92 		 (unsigned long long)unit->port->wwpn);
93 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
94 	zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
95 	zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
96 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
97 }
98 
99 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
100 {
101 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
102 		"operational because of an unsupported FC class\n");
103 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
104 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
105 }
106 
107 /**
108  * zfcp_fsf_req_free - free memory used by fsf request
109  * @fsf_req: pointer to struct zfcp_fsf_req
110  */
111 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
112 {
113 	if (likely(req->pool)) {
114 		if (likely(req->qtcb))
115 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
116 		mempool_free(req, req->pool);
117 		return;
118 	}
119 
120 	if (likely(req->qtcb))
121 		kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
122 	kfree(req);
123 }
124 
125 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
126 {
127 	unsigned long flags;
128 	struct fsf_status_read_buffer *sr_buf = req->data;
129 	struct zfcp_adapter *adapter = req->adapter;
130 	struct zfcp_port *port;
131 	int d_id = ntoh24(sr_buf->d_id);
132 
133 	read_lock_irqsave(&adapter->port_list_lock, flags);
134 	list_for_each_entry(port, &adapter->port_list, list)
135 		if (port->d_id == d_id) {
136 			zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
137 			break;
138 		}
139 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
140 }
141 
142 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
143 					 struct fsf_link_down_info *link_down)
144 {
145 	struct zfcp_adapter *adapter = req->adapter;
146 
147 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 		return;
149 
150 	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151 
152 	zfcp_scsi_schedule_rports_block(adapter);
153 
154 	if (!link_down)
155 		goto out;
156 
157 	switch (link_down->error_code) {
158 	case FSF_PSQ_LINK_NO_LIGHT:
159 		dev_warn(&req->adapter->ccw_device->dev,
160 			 "There is no light signal from the local "
161 			 "fibre channel cable\n");
162 		break;
163 	case FSF_PSQ_LINK_WRAP_PLUG:
164 		dev_warn(&req->adapter->ccw_device->dev,
165 			 "There is a wrap plug instead of a fibre "
166 			 "channel cable\n");
167 		break;
168 	case FSF_PSQ_LINK_NO_FCP:
169 		dev_warn(&req->adapter->ccw_device->dev,
170 			 "The adjacent fibre channel node does not "
171 			 "support FCP\n");
172 		break;
173 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
174 		dev_warn(&req->adapter->ccw_device->dev,
175 			 "The FCP device is suspended because of a "
176 			 "firmware update\n");
177 		break;
178 	case FSF_PSQ_LINK_INVALID_WWPN:
179 		dev_warn(&req->adapter->ccw_device->dev,
180 			 "The FCP device detected a WWPN that is "
181 			 "duplicate or not valid\n");
182 		break;
183 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
184 		dev_warn(&req->adapter->ccw_device->dev,
185 			 "The fibre channel fabric does not support NPIV\n");
186 		break;
187 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
188 		dev_warn(&req->adapter->ccw_device->dev,
189 			 "The FCP adapter cannot support more NPIV ports\n");
190 		break;
191 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
192 		dev_warn(&req->adapter->ccw_device->dev,
193 			 "The adjacent switch cannot support "
194 			 "more NPIV ports\n");
195 		break;
196 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
197 		dev_warn(&req->adapter->ccw_device->dev,
198 			 "The FCP adapter could not log in to the "
199 			 "fibre channel fabric\n");
200 		break;
201 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
202 		dev_warn(&req->adapter->ccw_device->dev,
203 			 "The WWPN assignment file on the FCP adapter "
204 			 "has been damaged\n");
205 		break;
206 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
207 		dev_warn(&req->adapter->ccw_device->dev,
208 			 "The mode table on the FCP adapter "
209 			 "has been damaged\n");
210 		break;
211 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
212 		dev_warn(&req->adapter->ccw_device->dev,
213 			 "All NPIV ports on the FCP adapter have "
214 			 "been assigned\n");
215 		break;
216 	default:
217 		dev_warn(&req->adapter->ccw_device->dev,
218 			 "The link between the FCP adapter and "
219 			 "the FC fabric is down\n");
220 	}
221 out:
222 	zfcp_erp_adapter_failed(adapter, id, req);
223 }
224 
225 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
226 {
227 	struct fsf_status_read_buffer *sr_buf = req->data;
228 	struct fsf_link_down_info *ldi =
229 		(struct fsf_link_down_info *) &sr_buf->payload;
230 
231 	switch (sr_buf->status_subtype) {
232 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
233 		zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
234 		break;
235 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
236 		zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
237 		break;
238 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
239 		zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
240 	};
241 }
242 
243 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
244 {
245 	struct zfcp_adapter *adapter = req->adapter;
246 	struct fsf_status_read_buffer *sr_buf = req->data;
247 
248 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
249 		zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
250 		mempool_free(sr_buf, adapter->pool.status_read_data);
251 		zfcp_fsf_req_free(req);
252 		return;
253 	}
254 
255 	zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
256 
257 	switch (sr_buf->status_type) {
258 	case FSF_STATUS_READ_PORT_CLOSED:
259 		zfcp_fsf_status_read_port_closed(req);
260 		break;
261 	case FSF_STATUS_READ_INCOMING_ELS:
262 		zfcp_fc_incoming_els(req);
263 		break;
264 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
265 		break;
266 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
267 		dev_warn(&adapter->ccw_device->dev,
268 			 "The error threshold for checksum statistics "
269 			 "has been exceeded\n");
270 		zfcp_dbf_hba_berr(adapter->dbf, req);
271 		break;
272 	case FSF_STATUS_READ_LINK_DOWN:
273 		zfcp_fsf_status_read_link_down(req);
274 		break;
275 	case FSF_STATUS_READ_LINK_UP:
276 		dev_info(&adapter->ccw_device->dev,
277 			 "The local link has been restored\n");
278 		/* All ports should be marked as ready to run again */
279 		zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
280 					       ZFCP_STATUS_COMMON_RUNNING,
281 					       ZFCP_SET);
282 		zfcp_erp_adapter_reopen(adapter,
283 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
284 					ZFCP_STATUS_COMMON_ERP_FAILED,
285 					"fssrh_2", req);
286 		break;
287 	case FSF_STATUS_READ_NOTIFICATION_LOST:
288 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
289 			zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
290 							req);
291 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
292 			queue_work(adapter->work_queue, &adapter->scan_work);
293 		break;
294 	case FSF_STATUS_READ_CFDC_UPDATED:
295 		zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
296 		break;
297 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
298 		adapter->adapter_features = sr_buf->payload.word[0];
299 		break;
300 	}
301 
302 	mempool_free(sr_buf, adapter->pool.status_read_data);
303 	zfcp_fsf_req_free(req);
304 
305 	atomic_inc(&adapter->stat_miss);
306 	queue_work(adapter->work_queue, &adapter->stat_work);
307 }
308 
309 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
310 {
311 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
312 	case FSF_SQ_FCP_RSP_AVAILABLE:
313 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
314 	case FSF_SQ_NO_RETRY_POSSIBLE:
315 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
316 		return;
317 	case FSF_SQ_COMMAND_ABORTED:
318 		break;
319 	case FSF_SQ_NO_RECOM:
320 		dev_err(&req->adapter->ccw_device->dev,
321 			"The FCP adapter reported a problem "
322 			"that cannot be recovered\n");
323 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
324 		break;
325 	}
326 	/* all non-return stats set FSFREQ_ERROR*/
327 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
328 }
329 
330 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
331 {
332 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
333 		return;
334 
335 	switch (req->qtcb->header.fsf_status) {
336 	case FSF_UNKNOWN_COMMAND:
337 		dev_err(&req->adapter->ccw_device->dev,
338 			"The FCP adapter does not recognize the command 0x%x\n",
339 			req->qtcb->header.fsf_command);
340 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
341 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
342 		break;
343 	case FSF_ADAPTER_STATUS_AVAILABLE:
344 		zfcp_fsf_fsfstatus_qual_eval(req);
345 		break;
346 	}
347 }
348 
349 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
350 {
351 	struct zfcp_adapter *adapter = req->adapter;
352 	struct fsf_qtcb *qtcb = req->qtcb;
353 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
354 
355 	zfcp_dbf_hba_fsf_response(req);
356 
357 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
358 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
359 		return;
360 	}
361 
362 	switch (qtcb->prefix.prot_status) {
363 	case FSF_PROT_GOOD:
364 	case FSF_PROT_FSF_STATUS_PRESENTED:
365 		return;
366 	case FSF_PROT_QTCB_VERSION_ERROR:
367 		dev_err(&adapter->ccw_device->dev,
368 			"QTCB version 0x%x not supported by FCP adapter "
369 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
370 			psq->word[0], psq->word[1]);
371 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
372 		break;
373 	case FSF_PROT_ERROR_STATE:
374 	case FSF_PROT_SEQ_NUMB_ERROR:
375 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
376 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
377 		break;
378 	case FSF_PROT_UNSUPP_QTCB_TYPE:
379 		dev_err(&adapter->ccw_device->dev,
380 			"The QTCB type is not supported by the FCP adapter\n");
381 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
382 		break;
383 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
384 		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
385 				&adapter->status);
386 		break;
387 	case FSF_PROT_DUPLICATE_REQUEST_ID:
388 		dev_err(&adapter->ccw_device->dev,
389 			"0x%Lx is an ambiguous request identifier\n",
390 			(unsigned long long)qtcb->bottom.support.req_handle);
391 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
392 		break;
393 	case FSF_PROT_LINK_DOWN:
394 		zfcp_fsf_link_down_info_eval(req, "fspse_5",
395 					     &psq->link_down_info);
396 		/* FIXME: reopening adapter now? better wait for link up */
397 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
398 		break;
399 	case FSF_PROT_REEST_QUEUE:
400 		/* All ports should be marked as ready to run again */
401 		zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
402 					       ZFCP_STATUS_COMMON_RUNNING,
403 					       ZFCP_SET);
404 		zfcp_erp_adapter_reopen(adapter,
405 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
406 					ZFCP_STATUS_COMMON_ERP_FAILED,
407 					"fspse_8", req);
408 		break;
409 	default:
410 		dev_err(&adapter->ccw_device->dev,
411 			"0x%x is not a valid transfer protocol status\n",
412 			qtcb->prefix.prot_status);
413 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
414 	}
415 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
416 }
417 
418 /**
419  * zfcp_fsf_req_complete - process completion of a FSF request
420  * @fsf_req: The FSF request that has been completed.
421  *
422  * When a request has been completed either from the FCP adapter,
423  * or it has been dismissed due to a queue shutdown, this function
424  * is called to process the completion status and trigger further
425  * events related to the FSF request.
426  */
427 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
428 {
429 	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
430 		zfcp_fsf_status_read_handler(req);
431 		return;
432 	}
433 
434 	del_timer(&req->timer);
435 	zfcp_fsf_protstatus_eval(req);
436 	zfcp_fsf_fsfstatus_eval(req);
437 	req->handler(req);
438 
439 	if (req->erp_action)
440 		zfcp_erp_notify(req->erp_action, 0);
441 
442 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
443 		zfcp_fsf_req_free(req);
444 	else
445 		complete(&req->completion);
446 }
447 
448 /**
449  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
450  * @adapter: pointer to struct zfcp_adapter
451  *
452  * Never ever call this without shutting down the adapter first.
453  * Otherwise the adapter would continue using and corrupting s390 storage.
454  * Included BUG_ON() call to ensure this is done.
455  * ERP is supposed to be the only user of this function.
456  */
457 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
458 {
459 	struct zfcp_fsf_req *req, *tmp;
460 	unsigned long flags;
461 	LIST_HEAD(remove_queue);
462 	unsigned int i;
463 
464 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
465 	spin_lock_irqsave(&adapter->req_list_lock, flags);
466 	for (i = 0; i < REQUEST_LIST_SIZE; i++)
467 		list_splice_init(&adapter->req_list[i], &remove_queue);
468 	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
469 
470 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
471 		list_del(&req->list);
472 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
473 		zfcp_fsf_req_complete(req);
474 	}
475 }
476 
477 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
478 {
479 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
480 	struct zfcp_adapter *adapter = req->adapter;
481 	struct Scsi_Host *shost = adapter->scsi_host;
482 	struct fc_els_flogi *nsp, *plogi;
483 
484 	/* adjust pointers for missing command code */
485 	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
486 					- sizeof(u32));
487 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
488 					- sizeof(u32));
489 
490 	if (req->data)
491 		memcpy(req->data, bottom, sizeof(*bottom));
492 
493 	fc_host_port_name(shost) = nsp->fl_wwpn;
494 	fc_host_node_name(shost) = nsp->fl_wwnn;
495 	fc_host_port_id(shost) = ntoh24(bottom->s_id);
496 	fc_host_speed(shost) = bottom->fc_link_speed;
497 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
498 	fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
499 	fc_host_active_fc4s(shost)[2] = 1; /* FCP */
500 
501 	adapter->hydra_version = bottom->adapter_type;
502 	adapter->timer_ticks = bottom->timer_interval;
503 
504 	if (fc_host_permanent_port_name(shost) == -1)
505 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
506 
507 	switch (bottom->fc_topology) {
508 	case FSF_TOPO_P2P:
509 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
510 		adapter->peer_wwpn = plogi->fl_wwpn;
511 		adapter->peer_wwnn = plogi->fl_wwnn;
512 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
513 		break;
514 	case FSF_TOPO_FABRIC:
515 		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
516 		break;
517 	case FSF_TOPO_AL:
518 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
519 		/* fall through */
520 	default:
521 		dev_err(&adapter->ccw_device->dev,
522 			"Unknown or unsupported arbitrated loop "
523 			"fibre channel topology detected\n");
524 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
525 		return -EIO;
526 	}
527 
528 	return 0;
529 }
530 
531 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
532 {
533 	struct zfcp_adapter *adapter = req->adapter;
534 	struct fsf_qtcb *qtcb = req->qtcb;
535 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
536 	struct Scsi_Host *shost = adapter->scsi_host;
537 
538 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
539 		return;
540 
541 	adapter->fsf_lic_version = bottom->lic_version;
542 	adapter->adapter_features = bottom->adapter_features;
543 	adapter->connection_features = bottom->connection_features;
544 	adapter->peer_wwpn = 0;
545 	adapter->peer_wwnn = 0;
546 	adapter->peer_d_id = 0;
547 
548 	switch (qtcb->header.fsf_status) {
549 	case FSF_GOOD:
550 		if (zfcp_fsf_exchange_config_evaluate(req))
551 			return;
552 
553 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
554 			dev_err(&adapter->ccw_device->dev,
555 				"FCP adapter maximum QTCB size (%d bytes) "
556 				"is too small\n",
557 				bottom->max_qtcb_size);
558 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
559 			return;
560 		}
561 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
562 				&adapter->status);
563 		break;
564 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
565 		fc_host_node_name(shost) = 0;
566 		fc_host_port_name(shost) = 0;
567 		fc_host_port_id(shost) = 0;
568 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
569 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
570 		adapter->hydra_version = 0;
571 
572 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
573 				&adapter->status);
574 
575 		zfcp_fsf_link_down_info_eval(req, "fsecdh2",
576 			&qtcb->header.fsf_status_qual.link_down_info);
577 		break;
578 	default:
579 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
580 		return;
581 	}
582 
583 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
584 		adapter->hardware_version = bottom->hardware_version;
585 		memcpy(fc_host_serial_number(shost), bottom->serial_number,
586 		       min(FC_SERIAL_NUMBER_SIZE, 17));
587 		EBCASC(fc_host_serial_number(shost),
588 		       min(FC_SERIAL_NUMBER_SIZE, 17));
589 	}
590 
591 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
592 		dev_err(&adapter->ccw_device->dev,
593 			"The FCP adapter only supports newer "
594 			"control block versions\n");
595 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
596 		return;
597 	}
598 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
599 		dev_err(&adapter->ccw_device->dev,
600 			"The FCP adapter only supports older "
601 			"control block versions\n");
602 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
603 	}
604 }
605 
606 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
607 {
608 	struct zfcp_adapter *adapter = req->adapter;
609 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
610 	struct Scsi_Host *shost = adapter->scsi_host;
611 
612 	if (req->data)
613 		memcpy(req->data, bottom, sizeof(*bottom));
614 
615 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
616 		fc_host_permanent_port_name(shost) = bottom->wwpn;
617 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
618 	} else
619 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
620 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
621 	fc_host_supported_speeds(shost) = bottom->supported_speed;
622 }
623 
624 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
625 {
626 	struct fsf_qtcb *qtcb = req->qtcb;
627 
628 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
629 		return;
630 
631 	switch (qtcb->header.fsf_status) {
632 	case FSF_GOOD:
633 		zfcp_fsf_exchange_port_evaluate(req);
634 		break;
635 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
636 		zfcp_fsf_exchange_port_evaluate(req);
637 		zfcp_fsf_link_down_info_eval(req, "fsepdh1",
638 			&qtcb->header.fsf_status_qual.link_down_info);
639 		break;
640 	}
641 }
642 
643 static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
644 {
645 	struct zfcp_qdio_queue *req_q = &qdio->req_q;
646 
647 	spin_lock_bh(&qdio->req_q_lock);
648 	if (atomic_read(&req_q->count))
649 		return 1;
650 	spin_unlock_bh(&qdio->req_q_lock);
651 	return 0;
652 }
653 
654 static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
655 {
656 	struct zfcp_adapter *adapter = qdio->adapter;
657 	long ret;
658 
659 	spin_unlock_bh(&qdio->req_q_lock);
660 	ret = wait_event_interruptible_timeout(qdio->req_q_wq,
661 			       zfcp_fsf_sbal_check(qdio), 5 * HZ);
662 	if (ret > 0)
663 		return 0;
664 	if (!ret) {
665 		atomic_inc(&qdio->req_q_full);
666 		/* assume hanging outbound queue, try queue recovery */
667 		zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
668 	}
669 
670 	spin_lock_bh(&qdio->req_q_lock);
671 	return -EIO;
672 }
673 
674 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
675 {
676 	struct zfcp_fsf_req *req;
677 
678 	if (likely(pool))
679 		req = mempool_alloc(pool, GFP_ATOMIC);
680 	else
681 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
682 
683 	if (unlikely(!req))
684 		return NULL;
685 
686 	memset(req, 0, sizeof(*req));
687 	req->pool = pool;
688 	return req;
689 }
690 
691 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
692 {
693 	struct fsf_qtcb *qtcb;
694 
695 	if (likely(pool))
696 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
697 	else
698 		qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
699 
700 	if (unlikely(!qtcb))
701 		return NULL;
702 
703 	memset(qtcb, 0, sizeof(*qtcb));
704 	return qtcb;
705 }
706 
707 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
708 						u32 fsf_cmd, mempool_t *pool)
709 {
710 	struct qdio_buffer_element *sbale;
711 	struct zfcp_qdio_queue *req_q = &qdio->req_q;
712 	struct zfcp_adapter *adapter = qdio->adapter;
713 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
714 
715 	if (unlikely(!req))
716 		return ERR_PTR(-ENOMEM);
717 
718 	if (adapter->req_no == 0)
719 		adapter->req_no++;
720 
721 	INIT_LIST_HEAD(&req->list);
722 	init_timer(&req->timer);
723 	init_completion(&req->completion);
724 
725 	req->adapter = adapter;
726 	req->fsf_command = fsf_cmd;
727 	req->req_id = adapter->req_no;
728 	req->queue_req.sbal_number = 1;
729 	req->queue_req.sbal_first = req_q->first;
730 	req->queue_req.sbal_last = req_q->first;
731 	req->queue_req.sbale_curr = 1;
732 
733 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
734 	sbale[0].addr = (void *) req->req_id;
735 	sbale[0].flags |= SBAL_FLAGS0_COMMAND;
736 
737 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
738 		if (likely(pool))
739 			req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
740 		else
741 			req->qtcb = zfcp_qtcb_alloc(NULL);
742 
743 		if (unlikely(!req->qtcb)) {
744 			zfcp_fsf_req_free(req);
745 			return ERR_PTR(-ENOMEM);
746 		}
747 
748 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
749 		req->qtcb->prefix.req_id = req->req_id;
750 		req->qtcb->prefix.ulp_info = 26;
751 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
752 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
753 		req->qtcb->header.req_handle = req->req_id;
754 		req->qtcb->header.fsf_command = req->fsf_command;
755 		req->seq_no = adapter->fsf_req_seq_no;
756 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
757 		sbale[1].addr = (void *) req->qtcb;
758 		sbale[1].length = sizeof(struct fsf_qtcb);
759 	}
760 
761 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
762 		zfcp_fsf_req_free(req);
763 		return ERR_PTR(-EIO);
764 	}
765 
766 	return req;
767 }
768 
769 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
770 {
771 	struct zfcp_adapter *adapter = req->adapter;
772 	struct zfcp_qdio *qdio = adapter->qdio;
773 	unsigned long	     flags;
774 	int		     idx;
775 	int		     with_qtcb = (req->qtcb != NULL);
776 
777 	/* put allocated FSF request into hash table */
778 	spin_lock_irqsave(&adapter->req_list_lock, flags);
779 	idx = zfcp_reqlist_hash(req->req_id);
780 	list_add_tail(&req->list, &adapter->req_list[idx]);
781 	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
782 
783 	req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
784 	req->issued = get_clock();
785 	if (zfcp_qdio_send(qdio, &req->queue_req)) {
786 		del_timer(&req->timer);
787 		spin_lock_irqsave(&adapter->req_list_lock, flags);
788 		/* lookup request again, list might have changed */
789 		if (zfcp_reqlist_find_safe(adapter, req))
790 			zfcp_reqlist_remove(adapter, req);
791 		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
792 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
793 		return -EIO;
794 	}
795 
796 	/* Don't increase for unsolicited status */
797 	if (with_qtcb)
798 		adapter->fsf_req_seq_no++;
799 	adapter->req_no++;
800 
801 	return 0;
802 }
803 
804 /**
805  * zfcp_fsf_status_read - send status read request
806  * @adapter: pointer to struct zfcp_adapter
807  * @req_flags: request flags
808  * Returns: 0 on success, ERROR otherwise
809  */
810 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
811 {
812 	struct zfcp_adapter *adapter = qdio->adapter;
813 	struct zfcp_fsf_req *req;
814 	struct fsf_status_read_buffer *sr_buf;
815 	struct qdio_buffer_element *sbale;
816 	int retval = -EIO;
817 
818 	spin_lock_bh(&qdio->req_q_lock);
819 	if (zfcp_fsf_req_sbal_get(qdio))
820 		goto out;
821 
822 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
823 				  adapter->pool.status_read_req);
824 	if (IS_ERR(req)) {
825 		retval = PTR_ERR(req);
826 		goto out;
827 	}
828 
829 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
830 	sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
831 	req->queue_req.sbale_curr = 2;
832 
833 	sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
834 	if (!sr_buf) {
835 		retval = -ENOMEM;
836 		goto failed_buf;
837 	}
838 	memset(sr_buf, 0, sizeof(*sr_buf));
839 	req->data = sr_buf;
840 	sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
841 	sbale->addr = (void *) sr_buf;
842 	sbale->length = sizeof(*sr_buf);
843 
844 	retval = zfcp_fsf_req_send(req);
845 	if (retval)
846 		goto failed_req_send;
847 
848 	goto out;
849 
850 failed_req_send:
851 	mempool_free(sr_buf, adapter->pool.status_read_data);
852 failed_buf:
853 	zfcp_fsf_req_free(req);
854 	zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
855 out:
856 	spin_unlock_bh(&qdio->req_q_lock);
857 	return retval;
858 }
859 
860 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
861 {
862 	struct zfcp_unit *unit = req->data;
863 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
864 
865 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
866 		return;
867 
868 	switch (req->qtcb->header.fsf_status) {
869 	case FSF_PORT_HANDLE_NOT_VALID:
870 		if (fsq->word[0] == fsq->word[1]) {
871 			zfcp_erp_adapter_reopen(unit->port->adapter, 0,
872 						"fsafch1", req);
873 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
874 		}
875 		break;
876 	case FSF_LUN_HANDLE_NOT_VALID:
877 		if (fsq->word[0] == fsq->word[1]) {
878 			zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
879 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
880 		}
881 		break;
882 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
883 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
884 		break;
885 	case FSF_PORT_BOXED:
886 		zfcp_erp_port_boxed(unit->port, "fsafch3", req);
887 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
888 		break;
889 	case FSF_LUN_BOXED:
890 		zfcp_erp_unit_boxed(unit, "fsafch4", req);
891 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
892                 break;
893 	case FSF_ADAPTER_STATUS_AVAILABLE:
894 		switch (fsq->word[0]) {
895 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
896 			zfcp_fc_test_link(unit->port);
897 			/* fall through */
898 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
899 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
900 			break;
901 		}
902 		break;
903 	case FSF_GOOD:
904 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
905 		break;
906 	}
907 }
908 
909 /**
910  * zfcp_fsf_abort_fcp_command - abort running SCSI command
911  * @old_req_id: unsigned long
912  * @unit: pointer to struct zfcp_unit
913  * Returns: pointer to struct zfcp_fsf_req
914  */
915 
916 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
917 						struct zfcp_unit *unit)
918 {
919 	struct qdio_buffer_element *sbale;
920 	struct zfcp_fsf_req *req = NULL;
921 	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
922 
923 	spin_lock_bh(&qdio->req_q_lock);
924 	if (zfcp_fsf_req_sbal_get(qdio))
925 		goto out;
926 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
927 				  qdio->adapter->pool.scsi_abort);
928 	if (IS_ERR(req)) {
929 		req = NULL;
930 		goto out;
931 	}
932 
933 	if (unlikely(!(atomic_read(&unit->status) &
934 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
935 		goto out_error_free;
936 
937 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
938 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
939 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
940 
941 	req->data = unit;
942 	req->handler = zfcp_fsf_abort_fcp_command_handler;
943 	req->qtcb->header.lun_handle = unit->handle;
944 	req->qtcb->header.port_handle = unit->port->handle;
945 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
946 
947 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
948 	if (!zfcp_fsf_req_send(req))
949 		goto out;
950 
951 out_error_free:
952 	zfcp_fsf_req_free(req);
953 	req = NULL;
954 out:
955 	spin_unlock_bh(&qdio->req_q_lock);
956 	return req;
957 }
958 
959 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
960 {
961 	struct zfcp_adapter *adapter = req->adapter;
962 	struct zfcp_fsf_ct_els *ct = req->data;
963 	struct fsf_qtcb_header *header = &req->qtcb->header;
964 
965 	ct->status = -EINVAL;
966 
967 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
968 		goto skip_fsfstatus;
969 
970 	switch (header->fsf_status) {
971         case FSF_GOOD:
972 		zfcp_dbf_san_ct_response(req);
973 		ct->status = 0;
974 		break;
975         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
976 		zfcp_fsf_class_not_supp(req);
977 		break;
978         case FSF_ADAPTER_STATUS_AVAILABLE:
979                 switch (header->fsf_status_qual.word[0]){
980                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
981                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
982 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
983 			break;
984                 }
985                 break;
986 	case FSF_ACCESS_DENIED:
987 		break;
988         case FSF_PORT_BOXED:
989 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
990 		break;
991 	case FSF_PORT_HANDLE_NOT_VALID:
992 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
993 		/* fall through */
994 	case FSF_GENERIC_COMMAND_REJECTED:
995 	case FSF_PAYLOAD_SIZE_MISMATCH:
996 	case FSF_REQUEST_SIZE_TOO_LARGE:
997 	case FSF_RESPONSE_SIZE_TOO_LARGE:
998 	case FSF_SBAL_MISMATCH:
999 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1000 		break;
1001 	}
1002 
1003 skip_fsfstatus:
1004 	if (ct->handler)
1005 		ct->handler(ct->handler_data);
1006 }
1007 
1008 static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1009 					    struct scatterlist *sg_req,
1010 					    struct scatterlist *sg_resp)
1011 {
1012 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1013 	sbale[2].addr   = sg_virt(sg_req);
1014 	sbale[2].length = sg_req->length;
1015 	sbale[3].addr   = sg_virt(sg_resp);
1016 	sbale[3].length = sg_resp->length;
1017 	sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1018 }
1019 
1020 static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1021 {
1022 	return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1023 }
1024 
1025 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1026 				       struct scatterlist *sg_req,
1027 				       struct scatterlist *sg_resp,
1028 				       int max_sbals)
1029 {
1030 	struct zfcp_adapter *adapter = req->adapter;
1031 	struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 							       &req->queue_req);
1033 	u32 feat = adapter->adapter_features;
1034 	int bytes;
1035 
1036 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1037 		if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1038 			return -EOPNOTSUPP;
1039 
1040 		zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1041 		return 0;
1042 	}
1043 
1044 	/* use single, unchained SBAL if it can hold the request */
1045 	if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1046 		zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1047 		return 0;
1048 	}
1049 
1050 	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1051 					SBAL_FLAGS0_TYPE_WRITE_READ,
1052 					sg_req, max_sbals);
1053 	if (bytes <= 0)
1054 		return -EIO;
1055 	req->qtcb->bottom.support.req_buf_length = bytes;
1056 	req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1057 
1058 	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1059 					SBAL_FLAGS0_TYPE_WRITE_READ,
1060 					sg_resp, max_sbals);
1061 	req->qtcb->bottom.support.resp_buf_length = bytes;
1062 	if (bytes <= 0)
1063 		return -EIO;
1064 
1065 	return 0;
1066 }
1067 
1068 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 				 struct scatterlist *sg_req,
1070 				 struct scatterlist *sg_resp,
1071 				 int max_sbals)
1072 {
1073 	int ret;
1074 	unsigned int fcp_chan_timeout;
1075 
1076 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
1077 	if (ret)
1078 		return ret;
1079 
1080 	/* common settings for ct/gs and els requests */
1081 	fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000;
1082 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1083 	req->qtcb->bottom.support.timeout = fcp_chan_timeout;
1084 	zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ);
1085 
1086 	return 0;
1087 }
1088 
1089 /**
1090  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1091  * @ct: pointer to struct zfcp_send_ct with data for request
1092  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1093  */
1094 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1095 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool)
1096 {
1097 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1098 	struct zfcp_fsf_req *req;
1099 	int ret = -EIO;
1100 
1101 	spin_lock_bh(&qdio->req_q_lock);
1102 	if (zfcp_fsf_req_sbal_get(qdio))
1103 		goto out;
1104 
1105 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
1106 
1107 	if (IS_ERR(req)) {
1108 		ret = PTR_ERR(req);
1109 		goto out;
1110 	}
1111 
1112 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1113 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1114 				    FSF_MAX_SBALS_PER_REQ);
1115 	if (ret)
1116 		goto failed_send;
1117 
1118 	req->handler = zfcp_fsf_send_ct_handler;
1119 	req->qtcb->header.port_handle = wka_port->handle;
1120 	req->data = ct;
1121 
1122 	zfcp_dbf_san_ct_request(req, wka_port->d_id);
1123 
1124 	ret = zfcp_fsf_req_send(req);
1125 	if (ret)
1126 		goto failed_send;
1127 
1128 	goto out;
1129 
1130 failed_send:
1131 	zfcp_fsf_req_free(req);
1132 out:
1133 	spin_unlock_bh(&qdio->req_q_lock);
1134 	return ret;
1135 }
1136 
1137 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1138 {
1139 	struct zfcp_fsf_ct_els *send_els = req->data;
1140 	struct zfcp_port *port = send_els->port;
1141 	struct fsf_qtcb_header *header = &req->qtcb->header;
1142 
1143 	send_els->status = -EINVAL;
1144 
1145 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1146 		goto skip_fsfstatus;
1147 
1148 	switch (header->fsf_status) {
1149 	case FSF_GOOD:
1150 		zfcp_dbf_san_els_response(req);
1151 		send_els->status = 0;
1152 		break;
1153 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1154 		zfcp_fsf_class_not_supp(req);
1155 		break;
1156 	case FSF_ADAPTER_STATUS_AVAILABLE:
1157 		switch (header->fsf_status_qual.word[0]){
1158 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1159 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1160 		case FSF_SQ_RETRY_IF_POSSIBLE:
1161 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1162 			break;
1163 		}
1164 		break;
1165 	case FSF_ELS_COMMAND_REJECTED:
1166 	case FSF_PAYLOAD_SIZE_MISMATCH:
1167 	case FSF_REQUEST_SIZE_TOO_LARGE:
1168 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1169 		break;
1170 	case FSF_ACCESS_DENIED:
1171 		if (port)
1172 			zfcp_fsf_access_denied_port(req, port);
1173 		break;
1174 	case FSF_SBAL_MISMATCH:
1175 		/* should never occure, avoided in zfcp_fsf_send_els */
1176 		/* fall through */
1177 	default:
1178 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1179 		break;
1180 	}
1181 skip_fsfstatus:
1182 	if (send_els->handler)
1183 		send_els->handler(send_els->handler_data);
1184 }
1185 
1186 /**
1187  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1188  * @els: pointer to struct zfcp_send_els with data for the command
1189  */
1190 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1191 		      struct zfcp_fsf_ct_els *els)
1192 {
1193 	struct zfcp_fsf_req *req;
1194 	struct zfcp_qdio *qdio = adapter->qdio;
1195 	int ret = -EIO;
1196 
1197 	spin_lock_bh(&qdio->req_q_lock);
1198 	if (zfcp_fsf_req_sbal_get(qdio))
1199 		goto out;
1200 
1201 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
1202 
1203 	if (IS_ERR(req)) {
1204 		ret = PTR_ERR(req);
1205 		goto out;
1206 	}
1207 
1208 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1209 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
1210 
1211 	if (ret)
1212 		goto failed_send;
1213 
1214 	hton24(req->qtcb->bottom.support.d_id, d_id);
1215 	req->handler = zfcp_fsf_send_els_handler;
1216 	req->data = els;
1217 
1218 	zfcp_dbf_san_els_request(req);
1219 
1220 	ret = zfcp_fsf_req_send(req);
1221 	if (ret)
1222 		goto failed_send;
1223 
1224 	goto out;
1225 
1226 failed_send:
1227 	zfcp_fsf_req_free(req);
1228 out:
1229 	spin_unlock_bh(&qdio->req_q_lock);
1230 	return ret;
1231 }
1232 
1233 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1234 {
1235 	struct qdio_buffer_element *sbale;
1236 	struct zfcp_fsf_req *req;
1237 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1238 	int retval = -EIO;
1239 
1240 	spin_lock_bh(&qdio->req_q_lock);
1241 	if (zfcp_fsf_req_sbal_get(qdio))
1242 		goto out;
1243 
1244 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1245 				  qdio->adapter->pool.erp_req);
1246 
1247 	if (IS_ERR(req)) {
1248 		retval = PTR_ERR(req);
1249 		goto out;
1250 	}
1251 
1252 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1253 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1254 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1255 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1256 
1257 	req->qtcb->bottom.config.feature_selection =
1258 			FSF_FEATURE_CFDC |
1259 			FSF_FEATURE_LUN_SHARING |
1260 			FSF_FEATURE_NOTIFICATION_LOST |
1261 			FSF_FEATURE_UPDATE_ALERT;
1262 	req->erp_action = erp_action;
1263 	req->handler = zfcp_fsf_exchange_config_data_handler;
1264 	erp_action->fsf_req = req;
1265 
1266 	zfcp_fsf_start_erp_timer(req);
1267 	retval = zfcp_fsf_req_send(req);
1268 	if (retval) {
1269 		zfcp_fsf_req_free(req);
1270 		erp_action->fsf_req = NULL;
1271 	}
1272 out:
1273 	spin_unlock_bh(&qdio->req_q_lock);
1274 	return retval;
1275 }
1276 
1277 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1278 				       struct fsf_qtcb_bottom_config *data)
1279 {
1280 	struct qdio_buffer_element *sbale;
1281 	struct zfcp_fsf_req *req = NULL;
1282 	int retval = -EIO;
1283 
1284 	spin_lock_bh(&qdio->req_q_lock);
1285 	if (zfcp_fsf_req_sbal_get(qdio))
1286 		goto out_unlock;
1287 
1288 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
1289 
1290 	if (IS_ERR(req)) {
1291 		retval = PTR_ERR(req);
1292 		goto out_unlock;
1293 	}
1294 
1295 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1296 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1297 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1298 	req->handler = zfcp_fsf_exchange_config_data_handler;
1299 
1300 	req->qtcb->bottom.config.feature_selection =
1301 			FSF_FEATURE_CFDC |
1302 			FSF_FEATURE_LUN_SHARING |
1303 			FSF_FEATURE_NOTIFICATION_LOST |
1304 			FSF_FEATURE_UPDATE_ALERT;
1305 
1306 	if (data)
1307 		req->data = data;
1308 
1309 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1310 	retval = zfcp_fsf_req_send(req);
1311 	spin_unlock_bh(&qdio->req_q_lock);
1312 	if (!retval)
1313 		wait_for_completion(&req->completion);
1314 
1315 	zfcp_fsf_req_free(req);
1316 	return retval;
1317 
1318 out_unlock:
1319 	spin_unlock_bh(&qdio->req_q_lock);
1320 	return retval;
1321 }
1322 
1323 /**
1324  * zfcp_fsf_exchange_port_data - request information about local port
1325  * @erp_action: ERP action for the adapter for which port data is requested
1326  * Returns: 0 on success, error otherwise
1327  */
1328 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1329 {
1330 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1331 	struct qdio_buffer_element *sbale;
1332 	struct zfcp_fsf_req *req;
1333 	int retval = -EIO;
1334 
1335 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1336 		return -EOPNOTSUPP;
1337 
1338 	spin_lock_bh(&qdio->req_q_lock);
1339 	if (zfcp_fsf_req_sbal_get(qdio))
1340 		goto out;
1341 
1342 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1343 				  qdio->adapter->pool.erp_req);
1344 
1345 	if (IS_ERR(req)) {
1346 		retval = PTR_ERR(req);
1347 		goto out;
1348 	}
1349 
1350 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1351 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1352 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1353 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1354 
1355 	req->handler = zfcp_fsf_exchange_port_data_handler;
1356 	req->erp_action = erp_action;
1357 	erp_action->fsf_req = req;
1358 
1359 	zfcp_fsf_start_erp_timer(req);
1360 	retval = zfcp_fsf_req_send(req);
1361 	if (retval) {
1362 		zfcp_fsf_req_free(req);
1363 		erp_action->fsf_req = NULL;
1364 	}
1365 out:
1366 	spin_unlock_bh(&qdio->req_q_lock);
1367 	return retval;
1368 }
1369 
1370 /**
1371  * zfcp_fsf_exchange_port_data_sync - request information about local port
1372  * @qdio: pointer to struct zfcp_qdio
1373  * @data: pointer to struct fsf_qtcb_bottom_port
1374  * Returns: 0 on success, error otherwise
1375  */
1376 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1377 				     struct fsf_qtcb_bottom_port *data)
1378 {
1379 	struct qdio_buffer_element *sbale;
1380 	struct zfcp_fsf_req *req = NULL;
1381 	int retval = -EIO;
1382 
1383 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1384 		return -EOPNOTSUPP;
1385 
1386 	spin_lock_bh(&qdio->req_q_lock);
1387 	if (zfcp_fsf_req_sbal_get(qdio))
1388 		goto out_unlock;
1389 
1390 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
1391 
1392 	if (IS_ERR(req)) {
1393 		retval = PTR_ERR(req);
1394 		goto out_unlock;
1395 	}
1396 
1397 	if (data)
1398 		req->data = data;
1399 
1400 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1401 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1402 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1403 
1404 	req->handler = zfcp_fsf_exchange_port_data_handler;
1405 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1406 	retval = zfcp_fsf_req_send(req);
1407 	spin_unlock_bh(&qdio->req_q_lock);
1408 
1409 	if (!retval)
1410 		wait_for_completion(&req->completion);
1411 
1412 	zfcp_fsf_req_free(req);
1413 
1414 	return retval;
1415 
1416 out_unlock:
1417 	spin_unlock_bh(&qdio->req_q_lock);
1418 	return retval;
1419 }
1420 
1421 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1422 {
1423 	struct zfcp_port *port = req->data;
1424 	struct fsf_qtcb_header *header = &req->qtcb->header;
1425 	struct fc_els_flogi *plogi;
1426 
1427 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1428 		goto out;
1429 
1430 	switch (header->fsf_status) {
1431 	case FSF_PORT_ALREADY_OPEN:
1432 		break;
1433 	case FSF_ACCESS_DENIED:
1434 		zfcp_fsf_access_denied_port(req, port);
1435 		break;
1436 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1437 		dev_warn(&req->adapter->ccw_device->dev,
1438 			 "Not enough FCP adapter resources to open "
1439 			 "remote port 0x%016Lx\n",
1440 			 (unsigned long long)port->wwpn);
1441 		zfcp_erp_port_failed(port, "fsoph_1", req);
1442 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1443 		break;
1444 	case FSF_ADAPTER_STATUS_AVAILABLE:
1445 		switch (header->fsf_status_qual.word[0]) {
1446 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1447 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1448 		case FSF_SQ_NO_RETRY_POSSIBLE:
1449 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1450 			break;
1451 		}
1452 		break;
1453 	case FSF_GOOD:
1454 		port->handle = header->port_handle;
1455 		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1456 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1457 		atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1458 		                  ZFCP_STATUS_COMMON_ACCESS_BOXED,
1459 		                  &port->status);
1460 		/* check whether D_ID has changed during open */
1461 		/*
1462 		 * FIXME: This check is not airtight, as the FCP channel does
1463 		 * not monitor closures of target port connections caused on
1464 		 * the remote side. Thus, they might miss out on invalidating
1465 		 * locally cached WWPNs (and other N_Port parameters) of gone
1466 		 * target ports. So, our heroic attempt to make things safe
1467 		 * could be undermined by 'open port' response data tagged with
1468 		 * obsolete WWPNs. Another reason to monitor potential
1469 		 * connection closures ourself at least (by interpreting
1470 		 * incoming ELS' and unsolicited status). It just crosses my
1471 		 * mind that one should be able to cross-check by means of
1472 		 * another GID_PN straight after a port has been opened.
1473 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1474 		 */
1475 		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1476 		if (req->qtcb->bottom.support.els1_length >=
1477 		    FSF_PLOGI_MIN_LEN)
1478 				zfcp_fc_plogi_evaluate(port, plogi);
1479 		break;
1480 	case FSF_UNKNOWN_OP_SUBTYPE:
1481 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1482 		break;
1483 	}
1484 
1485 out:
1486 	put_device(&port->sysfs_device);
1487 }
1488 
1489 /**
1490  * zfcp_fsf_open_port - create and send open port request
1491  * @erp_action: pointer to struct zfcp_erp_action
1492  * Returns: 0 on success, error otherwise
1493  */
1494 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1495 {
1496 	struct qdio_buffer_element *sbale;
1497 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1498 	struct zfcp_port *port = erp_action->port;
1499 	struct zfcp_fsf_req *req;
1500 	int retval = -EIO;
1501 
1502 	spin_lock_bh(&qdio->req_q_lock);
1503 	if (zfcp_fsf_req_sbal_get(qdio))
1504 		goto out;
1505 
1506 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1507 				  qdio->adapter->pool.erp_req);
1508 
1509 	if (IS_ERR(req)) {
1510 		retval = PTR_ERR(req);
1511 		goto out;
1512 	}
1513 
1514 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1515 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1516         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1517         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1518 
1519 	req->handler = zfcp_fsf_open_port_handler;
1520 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1521 	req->data = port;
1522 	req->erp_action = erp_action;
1523 	erp_action->fsf_req = req;
1524 	get_device(&port->sysfs_device);
1525 
1526 	zfcp_fsf_start_erp_timer(req);
1527 	retval = zfcp_fsf_req_send(req);
1528 	if (retval) {
1529 		zfcp_fsf_req_free(req);
1530 		erp_action->fsf_req = NULL;
1531 		put_device(&port->sysfs_device);
1532 	}
1533 out:
1534 	spin_unlock_bh(&qdio->req_q_lock);
1535 	return retval;
1536 }
1537 
1538 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1539 {
1540 	struct zfcp_port *port = req->data;
1541 
1542 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1543 		return;
1544 
1545 	switch (req->qtcb->header.fsf_status) {
1546 	case FSF_PORT_HANDLE_NOT_VALID:
1547 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1548 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1549 		break;
1550 	case FSF_ADAPTER_STATUS_AVAILABLE:
1551 		break;
1552 	case FSF_GOOD:
1553 		zfcp_erp_modify_port_status(port, "fscph_2", req,
1554 					    ZFCP_STATUS_COMMON_OPEN,
1555 					    ZFCP_CLEAR);
1556 		break;
1557 	}
1558 }
1559 
1560 /**
1561  * zfcp_fsf_close_port - create and send close port request
1562  * @erp_action: pointer to struct zfcp_erp_action
1563  * Returns: 0 on success, error otherwise
1564  */
1565 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1566 {
1567 	struct qdio_buffer_element *sbale;
1568 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1569 	struct zfcp_fsf_req *req;
1570 	int retval = -EIO;
1571 
1572 	spin_lock_bh(&qdio->req_q_lock);
1573 	if (zfcp_fsf_req_sbal_get(qdio))
1574 		goto out;
1575 
1576 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1577 				  qdio->adapter->pool.erp_req);
1578 
1579 	if (IS_ERR(req)) {
1580 		retval = PTR_ERR(req);
1581 		goto out;
1582 	}
1583 
1584 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1585 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1586 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1587 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1588 
1589 	req->handler = zfcp_fsf_close_port_handler;
1590 	req->data = erp_action->port;
1591 	req->erp_action = erp_action;
1592 	req->qtcb->header.port_handle = erp_action->port->handle;
1593 	erp_action->fsf_req = req;
1594 
1595 	zfcp_fsf_start_erp_timer(req);
1596 	retval = zfcp_fsf_req_send(req);
1597 	if (retval) {
1598 		zfcp_fsf_req_free(req);
1599 		erp_action->fsf_req = NULL;
1600 	}
1601 out:
1602 	spin_unlock_bh(&qdio->req_q_lock);
1603 	return retval;
1604 }
1605 
1606 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1607 {
1608 	struct zfcp_fc_wka_port *wka_port = req->data;
1609 	struct fsf_qtcb_header *header = &req->qtcb->header;
1610 
1611 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1612 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1613 		goto out;
1614 	}
1615 
1616 	switch (header->fsf_status) {
1617 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1618 		dev_warn(&req->adapter->ccw_device->dev,
1619 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1620 		/* fall through */
1621 	case FSF_ADAPTER_STATUS_AVAILABLE:
1622 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1623 		/* fall through */
1624 	case FSF_ACCESS_DENIED:
1625 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1626 		break;
1627 	case FSF_GOOD:
1628 		wka_port->handle = header->port_handle;
1629 		/* fall through */
1630 	case FSF_PORT_ALREADY_OPEN:
1631 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1632 	}
1633 out:
1634 	wake_up(&wka_port->completion_wq);
1635 }
1636 
1637 /**
1638  * zfcp_fsf_open_wka_port - create and send open wka-port request
1639  * @wka_port: pointer to struct zfcp_fc_wka_port
1640  * Returns: 0 on success, error otherwise
1641  */
1642 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1643 {
1644 	struct qdio_buffer_element *sbale;
1645 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1646 	struct zfcp_fsf_req *req;
1647 	int retval = -EIO;
1648 
1649 	spin_lock_bh(&qdio->req_q_lock);
1650 	if (zfcp_fsf_req_sbal_get(qdio))
1651 		goto out;
1652 
1653 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1654 				  qdio->adapter->pool.erp_req);
1655 
1656 	if (unlikely(IS_ERR(req))) {
1657 		retval = PTR_ERR(req);
1658 		goto out;
1659 	}
1660 
1661 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1662 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1663 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1664 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1665 
1666 	req->handler = zfcp_fsf_open_wka_port_handler;
1667 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1668 	req->data = wka_port;
1669 
1670 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1671 	retval = zfcp_fsf_req_send(req);
1672 	if (retval)
1673 		zfcp_fsf_req_free(req);
1674 out:
1675 	spin_unlock_bh(&qdio->req_q_lock);
1676 	return retval;
1677 }
1678 
1679 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1680 {
1681 	struct zfcp_fc_wka_port *wka_port = req->data;
1682 
1683 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1684 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1685 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1686 	}
1687 
1688 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1689 	wake_up(&wka_port->completion_wq);
1690 }
1691 
1692 /**
1693  * zfcp_fsf_close_wka_port - create and send close wka port request
1694  * @wka_port: WKA port to open
1695  * Returns: 0 on success, error otherwise
1696  */
1697 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1698 {
1699 	struct qdio_buffer_element *sbale;
1700 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1701 	struct zfcp_fsf_req *req;
1702 	int retval = -EIO;
1703 
1704 	spin_lock_bh(&qdio->req_q_lock);
1705 	if (zfcp_fsf_req_sbal_get(qdio))
1706 		goto out;
1707 
1708 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1709 				  qdio->adapter->pool.erp_req);
1710 
1711 	if (unlikely(IS_ERR(req))) {
1712 		retval = PTR_ERR(req);
1713 		goto out;
1714 	}
1715 
1716 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1717 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1718 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1719 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1720 
1721 	req->handler = zfcp_fsf_close_wka_port_handler;
1722 	req->data = wka_port;
1723 	req->qtcb->header.port_handle = wka_port->handle;
1724 
1725 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1726 	retval = zfcp_fsf_req_send(req);
1727 	if (retval)
1728 		zfcp_fsf_req_free(req);
1729 out:
1730 	spin_unlock_bh(&qdio->req_q_lock);
1731 	return retval;
1732 }
1733 
1734 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1735 {
1736 	struct zfcp_port *port = req->data;
1737 	struct fsf_qtcb_header *header = &req->qtcb->header;
1738 	struct zfcp_unit *unit;
1739 
1740 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1741 		return;
1742 
1743 	switch (header->fsf_status) {
1744 	case FSF_PORT_HANDLE_NOT_VALID:
1745 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1746 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1747 		break;
1748 	case FSF_ACCESS_DENIED:
1749 		zfcp_fsf_access_denied_port(req, port);
1750 		break;
1751 	case FSF_PORT_BOXED:
1752 		/* can't use generic zfcp_erp_modify_port_status because
1753 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1754 		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1755 		read_lock(&port->unit_list_lock);
1756 		list_for_each_entry(unit, &port->unit_list, list)
1757 			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1758 					  &unit->status);
1759 		read_unlock(&port->unit_list_lock);
1760 		zfcp_erp_port_boxed(port, "fscpph2", req);
1761 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1762 		break;
1763 	case FSF_ADAPTER_STATUS_AVAILABLE:
1764 		switch (header->fsf_status_qual.word[0]) {
1765 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1766 			/* fall through */
1767 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1768 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1769 			break;
1770 		}
1771 		break;
1772 	case FSF_GOOD:
1773 		/* can't use generic zfcp_erp_modify_port_status because
1774 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1775 		 */
1776 		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1777 		read_lock(&port->unit_list_lock);
1778 		list_for_each_entry(unit, &port->unit_list, list)
1779 			atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1780 					  &unit->status);
1781 		read_unlock(&port->unit_list_lock);
1782 		break;
1783 	}
1784 }
1785 
1786 /**
1787  * zfcp_fsf_close_physical_port - close physical port
1788  * @erp_action: pointer to struct zfcp_erp_action
1789  * Returns: 0 on success
1790  */
1791 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1792 {
1793 	struct qdio_buffer_element *sbale;
1794 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1795 	struct zfcp_fsf_req *req;
1796 	int retval = -EIO;
1797 
1798 	spin_lock_bh(&qdio->req_q_lock);
1799 	if (zfcp_fsf_req_sbal_get(qdio))
1800 		goto out;
1801 
1802 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1803 				  qdio->adapter->pool.erp_req);
1804 
1805 	if (IS_ERR(req)) {
1806 		retval = PTR_ERR(req);
1807 		goto out;
1808 	}
1809 
1810 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1811 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1812 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1813 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1814 
1815 	req->data = erp_action->port;
1816 	req->qtcb->header.port_handle = erp_action->port->handle;
1817 	req->erp_action = erp_action;
1818 	req->handler = zfcp_fsf_close_physical_port_handler;
1819 	erp_action->fsf_req = req;
1820 
1821 	zfcp_fsf_start_erp_timer(req);
1822 	retval = zfcp_fsf_req_send(req);
1823 	if (retval) {
1824 		zfcp_fsf_req_free(req);
1825 		erp_action->fsf_req = NULL;
1826 	}
1827 out:
1828 	spin_unlock_bh(&qdio->req_q_lock);
1829 	return retval;
1830 }
1831 
1832 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1833 {
1834 	struct zfcp_adapter *adapter = req->adapter;
1835 	struct zfcp_unit *unit = req->data;
1836 	struct fsf_qtcb_header *header = &req->qtcb->header;
1837 	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1838 	struct fsf_queue_designator *queue_designator =
1839 				&header->fsf_status_qual.fsf_queue_designator;
1840 	int exclusive, readwrite;
1841 
1842 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1843 		return;
1844 
1845 	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1846 			  ZFCP_STATUS_COMMON_ACCESS_BOXED |
1847 			  ZFCP_STATUS_UNIT_SHARED |
1848 			  ZFCP_STATUS_UNIT_READONLY,
1849 			  &unit->status);
1850 
1851 	switch (header->fsf_status) {
1852 
1853 	case FSF_PORT_HANDLE_NOT_VALID:
1854 		zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
1855 		/* fall through */
1856 	case FSF_LUN_ALREADY_OPEN:
1857 		break;
1858 	case FSF_ACCESS_DENIED:
1859 		zfcp_fsf_access_denied_unit(req, unit);
1860 		atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1861 		atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1862 		break;
1863 	case FSF_PORT_BOXED:
1864 		zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1865 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1866 		break;
1867 	case FSF_LUN_SHARING_VIOLATION:
1868 		if (header->fsf_status_qual.word[0])
1869 			dev_warn(&adapter->ccw_device->dev,
1870 				 "LUN 0x%Lx on port 0x%Lx is already in "
1871 				 "use by CSS%d, MIF Image ID %x\n",
1872 				 (unsigned long long)unit->fcp_lun,
1873 				 (unsigned long long)unit->port->wwpn,
1874 				 queue_designator->cssid,
1875 				 queue_designator->hla);
1876 		else
1877 			zfcp_act_eval_err(adapter,
1878 					  header->fsf_status_qual.word[2]);
1879 		zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1880 		atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1881 		atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1882 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1883 		break;
1884 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1885 		dev_warn(&adapter->ccw_device->dev,
1886 			 "No handle is available for LUN "
1887 			 "0x%016Lx on port 0x%016Lx\n",
1888 			 (unsigned long long)unit->fcp_lun,
1889 			 (unsigned long long)unit->port->wwpn);
1890 		zfcp_erp_unit_failed(unit, "fsouh_4", req);
1891 		/* fall through */
1892 	case FSF_INVALID_COMMAND_OPTION:
1893 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1894 		break;
1895 	case FSF_ADAPTER_STATUS_AVAILABLE:
1896 		switch (header->fsf_status_qual.word[0]) {
1897 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1898 			zfcp_fc_test_link(unit->port);
1899 			/* fall through */
1900 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1901 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1902 			break;
1903 		}
1904 		break;
1905 
1906 	case FSF_GOOD:
1907 		unit->handle = header->lun_handle;
1908 		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1909 
1910 		if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1911 		    (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1912 		    !zfcp_ccw_priv_sch(adapter)) {
1913 			exclusive = (bottom->lun_access_info &
1914 					FSF_UNIT_ACCESS_EXCLUSIVE);
1915 			readwrite = (bottom->lun_access_info &
1916 					FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1917 
1918 			if (!exclusive)
1919 		                atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1920 						&unit->status);
1921 
1922 			if (!readwrite) {
1923                 		atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1924 						&unit->status);
1925 				dev_info(&adapter->ccw_device->dev,
1926 					 "SCSI device at LUN 0x%016Lx on port "
1927 					 "0x%016Lx opened read-only\n",
1928 					 (unsigned long long)unit->fcp_lun,
1929 					 (unsigned long long)unit->port->wwpn);
1930         		}
1931 
1932         		if (exclusive && !readwrite) {
1933 				dev_err(&adapter->ccw_device->dev,
1934 					"Exclusive read-only access not "
1935 					"supported (unit 0x%016Lx, "
1936 					"port 0x%016Lx)\n",
1937 					(unsigned long long)unit->fcp_lun,
1938 					(unsigned long long)unit->port->wwpn);
1939 				zfcp_erp_unit_failed(unit, "fsouh_5", req);
1940 				req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1941 				zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1942         		} else if (!exclusive && readwrite) {
1943 				dev_err(&adapter->ccw_device->dev,
1944 					"Shared read-write access not "
1945 					"supported (unit 0x%016Lx, port "
1946 					"0x%016Lx)\n",
1947 					(unsigned long long)unit->fcp_lun,
1948 					(unsigned long long)unit->port->wwpn);
1949 				zfcp_erp_unit_failed(unit, "fsouh_7", req);
1950 				req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1951 				zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1952         		}
1953 		}
1954 		break;
1955 	}
1956 }
1957 
1958 /**
1959  * zfcp_fsf_open_unit - open unit
1960  * @erp_action: pointer to struct zfcp_erp_action
1961  * Returns: 0 on success, error otherwise
1962  */
1963 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1964 {
1965 	struct qdio_buffer_element *sbale;
1966 	struct zfcp_adapter *adapter = erp_action->adapter;
1967 	struct zfcp_qdio *qdio = adapter->qdio;
1968 	struct zfcp_fsf_req *req;
1969 	int retval = -EIO;
1970 
1971 	spin_lock_bh(&qdio->req_q_lock);
1972 	if (zfcp_fsf_req_sbal_get(qdio))
1973 		goto out;
1974 
1975 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1976 				  adapter->pool.erp_req);
1977 
1978 	if (IS_ERR(req)) {
1979 		retval = PTR_ERR(req);
1980 		goto out;
1981 	}
1982 
1983 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1984 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1985         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1986         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1987 
1988 	req->qtcb->header.port_handle = erp_action->port->handle;
1989 	req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
1990 	req->handler = zfcp_fsf_open_unit_handler;
1991 	req->data = erp_action->unit;
1992 	req->erp_action = erp_action;
1993 	erp_action->fsf_req = req;
1994 
1995 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1996 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1997 
1998 	zfcp_fsf_start_erp_timer(req);
1999 	retval = zfcp_fsf_req_send(req);
2000 	if (retval) {
2001 		zfcp_fsf_req_free(req);
2002 		erp_action->fsf_req = NULL;
2003 	}
2004 out:
2005 	spin_unlock_bh(&qdio->req_q_lock);
2006 	return retval;
2007 }
2008 
2009 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2010 {
2011 	struct zfcp_unit *unit = req->data;
2012 
2013 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2014 		return;
2015 
2016 	switch (req->qtcb->header.fsf_status) {
2017 	case FSF_PORT_HANDLE_NOT_VALID:
2018 		zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
2019 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2020 		break;
2021 	case FSF_LUN_HANDLE_NOT_VALID:
2022 		zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
2023 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2024 		break;
2025 	case FSF_PORT_BOXED:
2026 		zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2027 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2028 		break;
2029 	case FSF_ADAPTER_STATUS_AVAILABLE:
2030 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
2031 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2032 			zfcp_fc_test_link(unit->port);
2033 			/* fall through */
2034 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2035 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2036 			break;
2037 		}
2038 		break;
2039 	case FSF_GOOD:
2040 		atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
2041 		break;
2042 	}
2043 }
2044 
2045 /**
2046  * zfcp_fsf_close_unit - close zfcp unit
2047  * @erp_action: pointer to struct zfcp_unit
2048  * Returns: 0 on success, error otherwise
2049  */
2050 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2051 {
2052 	struct qdio_buffer_element *sbale;
2053 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2054 	struct zfcp_fsf_req *req;
2055 	int retval = -EIO;
2056 
2057 	spin_lock_bh(&qdio->req_q_lock);
2058 	if (zfcp_fsf_req_sbal_get(qdio))
2059 		goto out;
2060 
2061 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2062 				  qdio->adapter->pool.erp_req);
2063 
2064 	if (IS_ERR(req)) {
2065 		retval = PTR_ERR(req);
2066 		goto out;
2067 	}
2068 
2069 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2070 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2071 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2072 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2073 
2074 	req->qtcb->header.port_handle = erp_action->port->handle;
2075 	req->qtcb->header.lun_handle = erp_action->unit->handle;
2076 	req->handler = zfcp_fsf_close_unit_handler;
2077 	req->data = erp_action->unit;
2078 	req->erp_action = erp_action;
2079 	erp_action->fsf_req = req;
2080 
2081 	zfcp_fsf_start_erp_timer(req);
2082 	retval = zfcp_fsf_req_send(req);
2083 	if (retval) {
2084 		zfcp_fsf_req_free(req);
2085 		erp_action->fsf_req = NULL;
2086 	}
2087 out:
2088 	spin_unlock_bh(&qdio->req_q_lock);
2089 	return retval;
2090 }
2091 
2092 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2093 {
2094 	lat_rec->sum += lat;
2095 	lat_rec->min = min(lat_rec->min, lat);
2096 	lat_rec->max = max(lat_rec->max, lat);
2097 }
2098 
2099 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2100 {
2101 	struct fsf_qual_latency_info *lat_in;
2102 	struct latency_cont *lat = NULL;
2103 	struct zfcp_unit *unit = req->unit;
2104 	struct zfcp_blk_drv_data blktrc;
2105 	int ticks = req->adapter->timer_ticks;
2106 
2107 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2108 
2109 	blktrc.flags = 0;
2110 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2111 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2112 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2113 	blktrc.inb_usage = req->queue_req.qdio_inb_usage;
2114 	blktrc.outb_usage = req->queue_req.qdio_outb_usage;
2115 
2116 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2117 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2118 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2119 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2120 
2121 		switch (req->qtcb->bottom.io.data_direction) {
2122 		case FSF_DATADIR_READ:
2123 			lat = &unit->latencies.read;
2124 			break;
2125 		case FSF_DATADIR_WRITE:
2126 			lat = &unit->latencies.write;
2127 			break;
2128 		case FSF_DATADIR_CMND:
2129 			lat = &unit->latencies.cmd;
2130 			break;
2131 		}
2132 
2133 		if (lat) {
2134 			spin_lock(&unit->latencies.lock);
2135 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2136 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2137 			lat->counter++;
2138 			spin_unlock(&unit->latencies.lock);
2139 		}
2140 	}
2141 
2142 	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2143 			    sizeof(blktrc));
2144 }
2145 
2146 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2147 {
2148 	struct scsi_cmnd *scpnt;
2149 	struct fcp_resp_with_ext *fcp_rsp;
2150 	unsigned long flags;
2151 
2152 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2153 
2154 	scpnt = req->data;
2155 	if (unlikely(!scpnt)) {
2156 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2157 		return;
2158 	}
2159 
2160 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2161 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2162 		goto skip_fsfstatus;
2163 	}
2164 
2165 	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2166 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2167 
2168 	zfcp_fsf_req_trace(req, scpnt);
2169 
2170 skip_fsfstatus:
2171 	if (scpnt->result != 0)
2172 		zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
2173 	else if (scpnt->retries > 0)
2174 		zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2175 	else
2176 		zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2177 
2178 	scpnt->host_scribble = NULL;
2179 	(scpnt->scsi_done) (scpnt);
2180 	/*
2181 	 * We must hold this lock until scsi_done has been called.
2182 	 * Otherwise we may call scsi_done after abort regarding this
2183 	 * command has completed.
2184 	 * Note: scsi_done must not block!
2185 	 */
2186 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2187 }
2188 
2189 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2190 {
2191 	struct fcp_resp_with_ext *fcp_rsp;
2192 	struct fcp_resp_rsp_info *rsp_info;
2193 
2194 	fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2195 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2196 
2197 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2198 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2199 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2200 }
2201 
2202 
2203 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2204 {
2205 	struct zfcp_unit *unit;
2206 	struct fsf_qtcb_header *header = &req->qtcb->header;
2207 
2208 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2209 		unit = req->data;
2210 	else
2211 		unit = req->unit;
2212 
2213 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2214 		goto skip_fsfstatus;
2215 
2216 	switch (header->fsf_status) {
2217 	case FSF_HANDLE_MISMATCH:
2218 	case FSF_PORT_HANDLE_NOT_VALID:
2219 		zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2220 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2221 		break;
2222 	case FSF_FCPLUN_NOT_VALID:
2223 	case FSF_LUN_HANDLE_NOT_VALID:
2224 		zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2225 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2226 		break;
2227 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2228 		zfcp_fsf_class_not_supp(req);
2229 		break;
2230 	case FSF_ACCESS_DENIED:
2231 		zfcp_fsf_access_denied_unit(req, unit);
2232 		break;
2233 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2234 		dev_err(&req->adapter->ccw_device->dev,
2235 			"Incorrect direction %d, unit 0x%016Lx on port "
2236 			"0x%016Lx closed\n",
2237 			req->qtcb->bottom.io.data_direction,
2238 			(unsigned long long)unit->fcp_lun,
2239 			(unsigned long long)unit->port->wwpn);
2240 		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2241 					  req);
2242 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2243 		break;
2244 	case FSF_CMND_LENGTH_NOT_VALID:
2245 		dev_err(&req->adapter->ccw_device->dev,
2246 			"Incorrect CDB length %d, unit 0x%016Lx on "
2247 			"port 0x%016Lx closed\n",
2248 			req->qtcb->bottom.io.fcp_cmnd_length,
2249 			(unsigned long long)unit->fcp_lun,
2250 			(unsigned long long)unit->port->wwpn);
2251 		zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2252 					  req);
2253 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2254 		break;
2255 	case FSF_PORT_BOXED:
2256 		zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2257 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2258 		break;
2259 	case FSF_LUN_BOXED:
2260 		zfcp_erp_unit_boxed(unit, "fssfch6", req);
2261 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2262 		break;
2263 	case FSF_ADAPTER_STATUS_AVAILABLE:
2264 		if (header->fsf_status_qual.word[0] ==
2265 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2266 			zfcp_fc_test_link(unit->port);
2267 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2268 		break;
2269 	}
2270 skip_fsfstatus:
2271 	if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2272 		zfcp_fsf_send_fcp_ctm_handler(req);
2273 	else {
2274 		zfcp_fsf_send_fcp_command_task_handler(req);
2275 		req->unit = NULL;
2276 		put_device(&unit->sysfs_device);
2277 	}
2278 }
2279 
2280 /**
2281  * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2282  * @unit: unit where command is sent to
2283  * @scsi_cmnd: scsi command to be sent
2284  */
2285 int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2286 				   struct scsi_cmnd *scsi_cmnd)
2287 {
2288 	struct zfcp_fsf_req *req;
2289 	struct fcp_cmnd *fcp_cmnd;
2290 	unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2291 	int real_bytes, retval = -EIO;
2292 	struct zfcp_adapter *adapter = unit->port->adapter;
2293 	struct zfcp_qdio *qdio = adapter->qdio;
2294 
2295 	if (unlikely(!(atomic_read(&unit->status) &
2296 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2297 		return -EBUSY;
2298 
2299 	spin_lock(&qdio->req_q_lock);
2300 	if (atomic_read(&qdio->req_q.count) <= 0) {
2301 		atomic_inc(&qdio->req_q_full);
2302 		goto out;
2303 	}
2304 
2305 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2306 				  adapter->pool.scsi_req);
2307 
2308 	if (IS_ERR(req)) {
2309 		retval = PTR_ERR(req);
2310 		goto out;
2311 	}
2312 
2313 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2314 	get_device(&unit->sysfs_device);
2315 	req->unit = unit;
2316 	req->data = scsi_cmnd;
2317 	req->handler = zfcp_fsf_send_fcp_command_handler;
2318 	req->qtcb->header.lun_handle = unit->handle;
2319 	req->qtcb->header.port_handle = unit->port->handle;
2320 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2321 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2322 
2323 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2324 
2325 	/*
2326 	 * set depending on data direction:
2327 	 *      data direction bits in SBALE (SB Type)
2328 	 *      data direction bits in QTCB
2329 	 */
2330 	switch (scsi_cmnd->sc_data_direction) {
2331 	case DMA_NONE:
2332 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2333 		break;
2334 	case DMA_FROM_DEVICE:
2335 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2336 		break;
2337 	case DMA_TO_DEVICE:
2338 		req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2339 		sbtype = SBAL_FLAGS0_TYPE_WRITE;
2340 		break;
2341 	case DMA_BIDIRECTIONAL:
2342 		goto failed_scsi_cmnd;
2343 	}
2344 
2345 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2346 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2347 
2348 	real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2349 					     scsi_sglist(scsi_cmnd),
2350 					     FSF_MAX_SBALS_PER_REQ);
2351 	if (unlikely(real_bytes < 0)) {
2352 		if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2353 			dev_err(&adapter->ccw_device->dev,
2354 				"Oversize data package, unit 0x%016Lx "
2355 				"on port 0x%016Lx closed\n",
2356 				(unsigned long long)unit->fcp_lun,
2357 				(unsigned long long)unit->port->wwpn);
2358 			zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2359 			retval = -EINVAL;
2360 		}
2361 		goto failed_scsi_cmnd;
2362 	}
2363 
2364 	retval = zfcp_fsf_req_send(req);
2365 	if (unlikely(retval))
2366 		goto failed_scsi_cmnd;
2367 
2368 	goto out;
2369 
2370 failed_scsi_cmnd:
2371 	put_device(&unit->sysfs_device);
2372 	zfcp_fsf_req_free(req);
2373 	scsi_cmnd->host_scribble = NULL;
2374 out:
2375 	spin_unlock(&qdio->req_q_lock);
2376 	return retval;
2377 }
2378 
2379 /**
2380  * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2381  * @unit: pointer to struct zfcp_unit
2382  * @tm_flags: unsigned byte for task management flags
2383  * Returns: on success pointer to struct fsf_req, NULL otherwise
2384  */
2385 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2386 {
2387 	struct qdio_buffer_element *sbale;
2388 	struct zfcp_fsf_req *req = NULL;
2389 	struct fcp_cmnd *fcp_cmnd;
2390 	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2391 
2392 	if (unlikely(!(atomic_read(&unit->status) &
2393 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2394 		return NULL;
2395 
2396 	spin_lock_bh(&qdio->req_q_lock);
2397 	if (zfcp_fsf_req_sbal_get(qdio))
2398 		goto out;
2399 
2400 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2401 				  qdio->adapter->pool.scsi_req);
2402 
2403 	if (IS_ERR(req)) {
2404 		req = NULL;
2405 		goto out;
2406 	}
2407 
2408 	req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2409 	req->data = unit;
2410 	req->handler = zfcp_fsf_send_fcp_command_handler;
2411 	req->qtcb->header.lun_handle = unit->handle;
2412 	req->qtcb->header.port_handle = unit->port->handle;
2413 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2414 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2415 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2416 
2417 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2418 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2419 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2420 
2421 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2422 	zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
2423 
2424 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2425 	if (!zfcp_fsf_req_send(req))
2426 		goto out;
2427 
2428 	zfcp_fsf_req_free(req);
2429 	req = NULL;
2430 out:
2431 	spin_unlock_bh(&qdio->req_q_lock);
2432 	return req;
2433 }
2434 
2435 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2436 {
2437 }
2438 
2439 /**
2440  * zfcp_fsf_control_file - control file upload/download
2441  * @adapter: pointer to struct zfcp_adapter
2442  * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2443  * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2444  */
2445 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2446 					   struct zfcp_fsf_cfdc *fsf_cfdc)
2447 {
2448 	struct qdio_buffer_element *sbale;
2449 	struct zfcp_qdio *qdio = adapter->qdio;
2450 	struct zfcp_fsf_req *req = NULL;
2451 	struct fsf_qtcb_bottom_support *bottom;
2452 	int direction, retval = -EIO, bytes;
2453 
2454 	if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2455 		return ERR_PTR(-EOPNOTSUPP);
2456 
2457 	switch (fsf_cfdc->command) {
2458 	case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2459 		direction = SBAL_FLAGS0_TYPE_WRITE;
2460 		break;
2461 	case FSF_QTCB_UPLOAD_CONTROL_FILE:
2462 		direction = SBAL_FLAGS0_TYPE_READ;
2463 		break;
2464 	default:
2465 		return ERR_PTR(-EINVAL);
2466 	}
2467 
2468 	spin_lock_bh(&qdio->req_q_lock);
2469 	if (zfcp_fsf_req_sbal_get(qdio))
2470 		goto out;
2471 
2472 	req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
2473 	if (IS_ERR(req)) {
2474 		retval = -EPERM;
2475 		goto out;
2476 	}
2477 
2478 	req->handler = zfcp_fsf_control_file_handler;
2479 
2480 	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2481 	sbale[0].flags |= direction;
2482 
2483 	bottom = &req->qtcb->bottom.support;
2484 	bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2485 	bottom->option = fsf_cfdc->option;
2486 
2487 	bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
2488 					direction, fsf_cfdc->sg,
2489 					FSF_MAX_SBALS_PER_REQ);
2490 	if (bytes != ZFCP_CFDC_MAX_SIZE) {
2491 		zfcp_fsf_req_free(req);
2492 		goto out;
2493 	}
2494 
2495 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2496 	retval = zfcp_fsf_req_send(req);
2497 out:
2498 	spin_unlock_bh(&qdio->req_q_lock);
2499 
2500 	if (!retval) {
2501 		wait_for_completion(&req->completion);
2502 		return req;
2503 	}
2504 	return ERR_PTR(retval);
2505 }
2506 
2507 /**
2508  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2509  * @adapter: pointer to struct zfcp_adapter
2510  * @sbal_idx: response queue index of SBAL to be processed
2511  */
2512 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2513 {
2514 	struct zfcp_adapter *adapter = qdio->adapter;
2515 	struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2516 	struct qdio_buffer_element *sbale;
2517 	struct zfcp_fsf_req *fsf_req;
2518 	unsigned long flags, req_id;
2519 	int idx;
2520 
2521 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2522 
2523 		sbale = &sbal->element[idx];
2524 		req_id = (unsigned long) sbale->addr;
2525 		spin_lock_irqsave(&adapter->req_list_lock, flags);
2526 		fsf_req = zfcp_reqlist_find(adapter, req_id);
2527 
2528 		if (!fsf_req)
2529 			/*
2530 			 * Unknown request means that we have potentially memory
2531 			 * corruption and must stop the machine immediately.
2532 			 */
2533 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2534 			      req_id, dev_name(&adapter->ccw_device->dev));
2535 
2536 		list_del(&fsf_req->list);
2537 		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
2538 
2539 		fsf_req->queue_req.sbal_response = sbal_idx;
2540 		fsf_req->queue_req.qdio_inb_usage =
2541 			atomic_read(&qdio->resp_q.count);
2542 		zfcp_fsf_req_complete(fsf_req);
2543 
2544 		if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2545 			break;
2546 	}
2547 }
2548