xref: /openbmc/linux/drivers/s390/scsi/zfcp_fsf.c (revision 901b894a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Implementation of FSF commands.
6  *
7  * Copyright IBM Corp. 2002, 2023
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/blktrace_api.h>
14 #include <linux/jiffies.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <scsi/fc/fc_els.h>
18 #include "zfcp_ext.h"
19 #include "zfcp_fc.h"
20 #include "zfcp_dbf.h"
21 #include "zfcp_qdio.h"
22 #include "zfcp_reqlist.h"
23 #include "zfcp_diag.h"
24 
25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */
28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
29 
30 struct kmem_cache *zfcp_fsf_qtcb_cache;
31 
32 static bool ber_stop = true;
33 module_param(ber_stop, bool, 0600);
34 MODULE_PARM_DESC(ber_stop,
35 		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
36 
zfcp_fsf_request_timeout_handler(struct timer_list * t)37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
38 {
39 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
40 	struct zfcp_adapter *adapter = fsf_req->adapter;
41 
42 	zfcp_qdio_siosl(adapter);
43 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
44 				"fsrth_1");
45 }
46 
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
48 				 unsigned long timeout)
49 {
50 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
51 	fsf_req->timer.expires = jiffies + timeout;
52 	add_timer(&fsf_req->timer);
53 }
54 
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
56 {
57 	BUG_ON(!fsf_req->erp_action);
58 	fsf_req->timer.function = zfcp_erp_timeout_handler;
59 	fsf_req->timer.expires = jiffies + 30 * HZ;
60 	add_timer(&fsf_req->timer);
61 }
62 
63 /* association between FSF command and FSF QTCB type */
64 static u32 fsf_qtcb_type[] = {
65 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
66 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
67 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
68 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
69 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
70 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
71 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
72 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
73 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
74 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
75 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
76 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
77 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
78 };
79 
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
81 {
82 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
83 		"operational because of an unsupported FC class\n");
84 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
85 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
86 }
87 
88 /**
89  * zfcp_fsf_req_free - free memory used by fsf request
90  * @req: pointer to struct zfcp_fsf_req
91  */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
93 {
94 	if (likely(req->pool)) {
95 		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
96 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
97 		mempool_free(req, req->pool);
98 		return;
99 	}
100 
101 	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
102 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
103 	kfree(req);
104 }
105 
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
107 {
108 	unsigned long flags;
109 	struct fsf_status_read_buffer *sr_buf = req->data;
110 	struct zfcp_adapter *adapter = req->adapter;
111 	struct zfcp_port *port;
112 	int d_id = ntoh24(sr_buf->d_id);
113 
114 	read_lock_irqsave(&adapter->port_list_lock, flags);
115 	list_for_each_entry(port, &adapter->port_list, list)
116 		if (port->d_id == d_id) {
117 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
118 			break;
119 		}
120 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
121 }
122 
zfcp_fsf_fc_host_link_down(struct zfcp_adapter * adapter)123 void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
124 {
125 	struct Scsi_Host *shost = adapter->scsi_host;
126 
127 	adapter->hydra_version = 0;
128 	adapter->peer_wwpn = 0;
129 	adapter->peer_wwnn = 0;
130 	adapter->peer_d_id = 0;
131 
132 	/* if there is no shost yet, we have nothing to zero-out */
133 	if (shost == NULL)
134 		return;
135 
136 	fc_host_port_id(shost) = 0;
137 	fc_host_fabric_name(shost) = 0;
138 	fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
139 	fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
140 	snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
141 	memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
142 }
143 
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)144 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
145 					 struct fsf_link_down_info *link_down)
146 {
147 	struct zfcp_adapter *adapter = req->adapter;
148 
149 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
150 		return;
151 
152 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
153 
154 	zfcp_scsi_schedule_rports_block(adapter);
155 
156 	zfcp_fsf_fc_host_link_down(adapter);
157 
158 	if (!link_down)
159 		goto out;
160 
161 	switch (link_down->error_code) {
162 	case FSF_PSQ_LINK_NO_LIGHT:
163 		dev_warn(&req->adapter->ccw_device->dev,
164 			 "There is no light signal from the local "
165 			 "fibre channel cable\n");
166 		break;
167 	case FSF_PSQ_LINK_WRAP_PLUG:
168 		dev_warn(&req->adapter->ccw_device->dev,
169 			 "There is a wrap plug instead of a fibre "
170 			 "channel cable\n");
171 		break;
172 	case FSF_PSQ_LINK_NO_FCP:
173 		dev_warn(&req->adapter->ccw_device->dev,
174 			 "The adjacent fibre channel node does not "
175 			 "support FCP\n");
176 		break;
177 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
178 		dev_warn(&req->adapter->ccw_device->dev,
179 			 "The FCP device is suspended because of a "
180 			 "firmware update\n");
181 		break;
182 	case FSF_PSQ_LINK_INVALID_WWPN:
183 		dev_warn(&req->adapter->ccw_device->dev,
184 			 "The FCP device detected a WWPN that is "
185 			 "duplicate or not valid\n");
186 		break;
187 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
188 		dev_warn(&req->adapter->ccw_device->dev,
189 			 "The fibre channel fabric does not support NPIV\n");
190 		break;
191 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
192 		dev_warn(&req->adapter->ccw_device->dev,
193 			 "The FCP adapter cannot support more NPIV ports\n");
194 		break;
195 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
196 		dev_warn(&req->adapter->ccw_device->dev,
197 			 "The adjacent switch cannot support "
198 			 "more NPIV ports\n");
199 		break;
200 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
201 		dev_warn(&req->adapter->ccw_device->dev,
202 			 "The FCP adapter could not log in to the "
203 			 "fibre channel fabric\n");
204 		break;
205 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
206 		dev_warn(&req->adapter->ccw_device->dev,
207 			 "The WWPN assignment file on the FCP adapter "
208 			 "has been damaged\n");
209 		break;
210 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
211 		dev_warn(&req->adapter->ccw_device->dev,
212 			 "The mode table on the FCP adapter "
213 			 "has been damaged\n");
214 		break;
215 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
216 		dev_warn(&req->adapter->ccw_device->dev,
217 			 "All NPIV ports on the FCP adapter have "
218 			 "been assigned\n");
219 		break;
220 	default:
221 		dev_warn(&req->adapter->ccw_device->dev,
222 			 "The link between the FCP adapter and "
223 			 "the FC fabric is down\n");
224 	}
225 out:
226 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
227 }
228 
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)229 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
230 {
231 	struct fsf_status_read_buffer *sr_buf = req->data;
232 	struct fsf_link_down_info *ldi =
233 		(struct fsf_link_down_info *) &sr_buf->payload;
234 
235 	switch (sr_buf->status_subtype) {
236 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
237 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
238 		zfcp_fsf_link_down_info_eval(req, ldi);
239 		break;
240 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
241 		zfcp_fsf_link_down_info_eval(req, NULL);
242 	}
243 }
244 
245 static void
zfcp_fsf_status_read_version_change(struct zfcp_adapter * adapter,struct fsf_status_read_buffer * sr_buf)246 zfcp_fsf_status_read_version_change(struct zfcp_adapter *adapter,
247 				    struct fsf_status_read_buffer *sr_buf)
248 {
249 	if (sr_buf->status_subtype == FSF_STATUS_READ_SUB_LIC_CHANGE) {
250 		u32 version = sr_buf->payload.version_change.current_version;
251 
252 		WRITE_ONCE(adapter->fsf_lic_version, version);
253 		snprintf(fc_host_firmware_version(adapter->scsi_host),
254 			 FC_VERSION_STRING_SIZE, "%#08x", version);
255 	}
256 }
257 
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)258 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
259 {
260 	struct zfcp_adapter *adapter = req->adapter;
261 	struct fsf_status_read_buffer *sr_buf = req->data;
262 
263 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
264 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
265 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
266 		zfcp_fsf_req_free(req);
267 		return;
268 	}
269 
270 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
271 
272 	switch (sr_buf->status_type) {
273 	case FSF_STATUS_READ_PORT_CLOSED:
274 		zfcp_fsf_status_read_port_closed(req);
275 		break;
276 	case FSF_STATUS_READ_INCOMING_ELS:
277 		zfcp_fc_incoming_els(req);
278 		break;
279 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
280 		break;
281 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
282 		zfcp_dbf_hba_bit_err("fssrh_3", req);
283 		if (ber_stop) {
284 			dev_warn(&adapter->ccw_device->dev,
285 				 "All paths over this FCP device are disused because of excessive bit errors\n");
286 			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
287 		} else {
288 			dev_warn(&adapter->ccw_device->dev,
289 				 "The error threshold for checksum statistics has been exceeded\n");
290 		}
291 		break;
292 	case FSF_STATUS_READ_LINK_DOWN:
293 		zfcp_fsf_status_read_link_down(req);
294 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
295 		break;
296 	case FSF_STATUS_READ_LINK_UP:
297 		dev_info(&adapter->ccw_device->dev,
298 			 "The local link has been restored\n");
299 		/* All ports should be marked as ready to run again */
300 		zfcp_erp_set_adapter_status(adapter,
301 					    ZFCP_STATUS_COMMON_RUNNING);
302 		zfcp_erp_adapter_reopen(adapter,
303 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
304 					ZFCP_STATUS_COMMON_ERP_FAILED,
305 					"fssrh_2");
306 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
307 
308 		break;
309 	case FSF_STATUS_READ_NOTIFICATION_LOST:
310 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
311 			zfcp_fc_conditional_port_scan(adapter);
312 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_VERSION_CHANGE)
313 			queue_work(adapter->work_queue,
314 				   &adapter->version_change_lost_work);
315 		break;
316 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
317 		adapter->adapter_features = sr_buf->payload.word[0];
318 		break;
319 	case FSF_STATUS_READ_VERSION_CHANGE:
320 		zfcp_fsf_status_read_version_change(adapter, sr_buf);
321 		break;
322 	}
323 
324 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
325 	zfcp_fsf_req_free(req);
326 
327 	atomic_inc(&adapter->stat_miss);
328 	queue_work(adapter->work_queue, &adapter->stat_work);
329 }
330 
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)331 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
332 {
333 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
334 	case FSF_SQ_FCP_RSP_AVAILABLE:
335 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
336 	case FSF_SQ_NO_RETRY_POSSIBLE:
337 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
338 		return;
339 	case FSF_SQ_COMMAND_ABORTED:
340 		break;
341 	case FSF_SQ_NO_RECOM:
342 		dev_err(&req->adapter->ccw_device->dev,
343 			"The FCP adapter reported a problem "
344 			"that cannot be recovered\n");
345 		zfcp_qdio_siosl(req->adapter);
346 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
347 		break;
348 	}
349 	/* all non-return stats set FSFREQ_ERROR*/
350 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
351 }
352 
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)353 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
354 {
355 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
356 		return;
357 
358 	switch (req->qtcb->header.fsf_status) {
359 	case FSF_UNKNOWN_COMMAND:
360 		dev_err(&req->adapter->ccw_device->dev,
361 			"The FCP adapter does not recognize the command 0x%x\n",
362 			req->qtcb->header.fsf_command);
363 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
364 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
365 		break;
366 	case FSF_ADAPTER_STATUS_AVAILABLE:
367 		zfcp_fsf_fsfstatus_qual_eval(req);
368 		break;
369 	}
370 }
371 
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)372 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
373 {
374 	struct zfcp_adapter *adapter = req->adapter;
375 	struct fsf_qtcb *qtcb = req->qtcb;
376 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
377 
378 	zfcp_dbf_hba_fsf_response(req);
379 
380 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
381 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
382 		return;
383 	}
384 
385 	switch (qtcb->prefix.prot_status) {
386 	case FSF_PROT_GOOD:
387 	case FSF_PROT_FSF_STATUS_PRESENTED:
388 		return;
389 	case FSF_PROT_QTCB_VERSION_ERROR:
390 		dev_err(&adapter->ccw_device->dev,
391 			"QTCB version 0x%x not supported by FCP adapter "
392 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
393 			psq->word[0], psq->word[1]);
394 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
395 		break;
396 	case FSF_PROT_ERROR_STATE:
397 	case FSF_PROT_SEQ_NUMB_ERROR:
398 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
399 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
400 		break;
401 	case FSF_PROT_UNSUPP_QTCB_TYPE:
402 		dev_err(&adapter->ccw_device->dev,
403 			"The QTCB type is not supported by the FCP adapter\n");
404 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
405 		break;
406 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
407 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
408 				&adapter->status);
409 		break;
410 	case FSF_PROT_DUPLICATE_REQUEST_ID:
411 		dev_err(&adapter->ccw_device->dev,
412 			"0x%Lx is an ambiguous request identifier\n",
413 			(unsigned long long)qtcb->bottom.support.req_handle);
414 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
415 		break;
416 	case FSF_PROT_LINK_DOWN:
417 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
418 		/* go through reopen to flush pending requests */
419 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
420 		break;
421 	case FSF_PROT_REEST_QUEUE:
422 		/* All ports should be marked as ready to run again */
423 		zfcp_erp_set_adapter_status(adapter,
424 					    ZFCP_STATUS_COMMON_RUNNING);
425 		zfcp_erp_adapter_reopen(adapter,
426 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
427 					ZFCP_STATUS_COMMON_ERP_FAILED,
428 					"fspse_8");
429 		break;
430 	default:
431 		dev_err(&adapter->ccw_device->dev,
432 			"0x%x is not a valid transfer protocol status\n",
433 			qtcb->prefix.prot_status);
434 		zfcp_qdio_siosl(adapter);
435 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
436 	}
437 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
438 }
439 
440 /**
441  * zfcp_fsf_req_complete - process completion of a FSF request
442  * @req: The FSF request that has been completed.
443  *
444  * When a request has been completed either from the FCP adapter,
445  * or it has been dismissed due to a queue shutdown, this function
446  * is called to process the completion status and trigger further
447  * events related to the FSF request.
448  * Caller must ensure that the request has been removed from
449  * adapter->req_list, to protect against concurrent modification
450  * by zfcp_erp_strategy_check_fsfreq().
451  */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)452 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
453 {
454 	struct zfcp_erp_action *erp_action;
455 
456 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
457 		zfcp_fsf_status_read_handler(req);
458 		return;
459 	}
460 
461 	del_timer_sync(&req->timer);
462 	zfcp_fsf_protstatus_eval(req);
463 	zfcp_fsf_fsfstatus_eval(req);
464 	req->handler(req);
465 
466 	erp_action = req->erp_action;
467 	if (erp_action)
468 		zfcp_erp_notify(erp_action, 0);
469 
470 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
471 		zfcp_fsf_req_free(req);
472 	else
473 		complete(&req->completion);
474 }
475 
476 /**
477  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
478  * @adapter: pointer to struct zfcp_adapter
479  *
480  * Never ever call this without shutting down the adapter first.
481  * Otherwise the adapter would continue using and corrupting s390 storage.
482  * Included BUG_ON() call to ensure this is done.
483  * ERP is supposed to be the only user of this function.
484  */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)485 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
486 {
487 	struct zfcp_fsf_req *req, *tmp;
488 	LIST_HEAD(remove_queue);
489 
490 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
491 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
492 
493 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
494 		list_del(&req->list);
495 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
496 		zfcp_fsf_req_complete(req);
497 	}
498 }
499 
500 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
501 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
502 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
503 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
504 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
505 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
506 #define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
507 #define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
508 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
509 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
510 
zfcp_fsf_convert_portspeed(u32 fsf_speed)511 u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
512 {
513 	u32 fdmi_speed = 0;
514 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
515 		fdmi_speed |= FC_PORTSPEED_1GBIT;
516 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
517 		fdmi_speed |= FC_PORTSPEED_2GBIT;
518 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
519 		fdmi_speed |= FC_PORTSPEED_4GBIT;
520 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
521 		fdmi_speed |= FC_PORTSPEED_10GBIT;
522 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
523 		fdmi_speed |= FC_PORTSPEED_8GBIT;
524 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
525 		fdmi_speed |= FC_PORTSPEED_16GBIT;
526 	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
527 		fdmi_speed |= FC_PORTSPEED_32GBIT;
528 	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
529 		fdmi_speed |= FC_PORTSPEED_64GBIT;
530 	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
531 		fdmi_speed |= FC_PORTSPEED_128GBIT;
532 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
533 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
534 	return fdmi_speed;
535 }
536 
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)537 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
538 {
539 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
540 	struct zfcp_adapter *adapter = req->adapter;
541 	struct fc_els_flogi *plogi;
542 
543 	/* adjust pointers for missing command code */
544 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
545 					- sizeof(u32));
546 
547 	if (req->data)
548 		memcpy(req->data, bottom, sizeof(*bottom));
549 
550 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
551 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
552 					 (u16)FSF_STATUS_READS_RECOM);
553 
554 	/* no error return above here, otherwise must fix call chains */
555 	/* do not evaluate invalid fields */
556 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
557 		return 0;
558 
559 	adapter->hydra_version = bottom->adapter_type;
560 
561 	switch (bottom->fc_topology) {
562 	case FSF_TOPO_P2P:
563 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
564 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
565 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
566 		break;
567 	case FSF_TOPO_FABRIC:
568 		break;
569 	case FSF_TOPO_AL:
570 	default:
571 		dev_err(&adapter->ccw_device->dev,
572 			"Unknown or unsupported arbitrated loop "
573 			"fibre channel topology detected\n");
574 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
575 		return -EIO;
576 	}
577 
578 	return 0;
579 }
580 
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)581 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
582 {
583 	struct zfcp_adapter *adapter = req->adapter;
584 	struct zfcp_diag_header *const diag_hdr =
585 		&adapter->diagnostics->config_data.header;
586 	struct fsf_qtcb *qtcb = req->qtcb;
587 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
588 
589 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
590 		return;
591 
592 	adapter->fsf_lic_version = bottom->lic_version;
593 	adapter->adapter_features = bottom->adapter_features;
594 	adapter->connection_features = bottom->connection_features;
595 	adapter->peer_wwpn = 0;
596 	adapter->peer_wwnn = 0;
597 	adapter->peer_d_id = 0;
598 
599 	switch (qtcb->header.fsf_status) {
600 	case FSF_GOOD:
601 		/*
602 		 * usually we wait with an update till the cache is too old,
603 		 * but because we have the data available, update it anyway
604 		 */
605 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
606 
607 		zfcp_scsi_shost_update_config_data(adapter, bottom, false);
608 		if (zfcp_fsf_exchange_config_evaluate(req))
609 			return;
610 
611 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
612 			dev_err(&adapter->ccw_device->dev,
613 				"FCP adapter maximum QTCB size (%d bytes) "
614 				"is too small\n",
615 				bottom->max_qtcb_size);
616 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
617 			return;
618 		}
619 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
620 				&adapter->status);
621 		break;
622 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
623 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
624 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
625 
626 		/* avoids adapter shutdown to be able to recognize
627 		 * events such as LINK UP */
628 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
629 				&adapter->status);
630 		zfcp_fsf_link_down_info_eval(req,
631 			&qtcb->header.fsf_status_qual.link_down_info);
632 
633 		zfcp_scsi_shost_update_config_data(adapter, bottom, true);
634 		if (zfcp_fsf_exchange_config_evaluate(req))
635 			return;
636 		break;
637 	default:
638 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
639 		return;
640 	}
641 
642 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
643 		adapter->hardware_version = bottom->hardware_version;
644 
645 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
646 		dev_err(&adapter->ccw_device->dev,
647 			"The FCP adapter only supports newer "
648 			"control block versions\n");
649 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
650 		return;
651 	}
652 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
653 		dev_err(&adapter->ccw_device->dev,
654 			"The FCP adapter only supports older "
655 			"control block versions\n");
656 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
657 	}
658 }
659 
660 /*
661  * Mapping of FC Endpoint Security flag masks to mnemonics
662  *
663  * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
664  *       changes.
665  */
666 static const struct {
667 	u32	mask;
668 	char	*name;
669 } zfcp_fsf_fc_security_mnemonics[] = {
670 	{ FSF_FC_SECURITY_AUTH,		"Authentication" },
671 	{ FSF_FC_SECURITY_ENC_FCSP2 |
672 	  FSF_FC_SECURITY_ENC_ERAS,	"Encryption" },
673 };
674 
675 /* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
676 #define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
677 
678 /**
679  * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
680  *                                   mnemonics and place in a buffer
681  * @buf        : the buffer to place the translated FC Endpoint Security flag(s)
682  *               into
683  * @size       : the size of the buffer, including the trailing null space
684  * @fc_security: one or more FC Endpoint Security flags, or zero
685  * @fmt        : specifies whether a list or a single item is to be put into the
686  *               buffer
687  *
688  * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
689  * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
690  *
691  * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
692  * a comma followed by a space into the buffer. If one or more FC Endpoint
693  * Security flags cannot be translated into a mnemonic, as they are undefined
694  * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
695  * representation is placed into the buffer.
696  *
697  * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
698  * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
699  * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
700  * representation is placed into the buffer. If more than one FC Endpoint
701  * Security flag was specified, their value in hexadecimal representation is
702  * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
703  * can be used to define a buffer that is large enough to hold one mnemonic.
704  *
705  * Return: The number of characters written into buf not including the trailing
706  *         '\0'. If size is == 0 the function returns 0.
707  */
zfcp_fsf_scnprint_fc_security(char * buf,size_t size,u32 fc_security,enum zfcp_fsf_print_fmt fmt)708 ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
709 				      enum zfcp_fsf_print_fmt fmt)
710 {
711 	const char *prefix = "";
712 	ssize_t len = 0;
713 	int i;
714 
715 	if (fc_security == 0)
716 		return scnprintf(buf, size, "none");
717 	if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
718 		return scnprintf(buf, size, "0x%08x", fc_security);
719 
720 	for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
721 		if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
722 			continue;
723 
724 		len += scnprintf(buf + len, size - len, "%s%s", prefix,
725 				 zfcp_fsf_fc_security_mnemonics[i].name);
726 		prefix = ", ";
727 		fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
728 	}
729 
730 	if (fc_security != 0)
731 		len += scnprintf(buf + len, size - len, "%s0x%08x",
732 				 prefix, fc_security);
733 
734 	return len;
735 }
736 
zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter * adapter,struct zfcp_fsf_req * req)737 static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
738 					     struct zfcp_fsf_req *req)
739 {
740 	if (adapter->fc_security_algorithms ==
741 	    adapter->fc_security_algorithms_old) {
742 		/* no change, no trace */
743 		return;
744 	}
745 
746 	zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
747 			      adapter->fc_security_algorithms_old,
748 			      adapter->fc_security_algorithms);
749 
750 	adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
751 }
752 
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)753 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
754 {
755 	struct zfcp_adapter *adapter = req->adapter;
756 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
757 
758 	if (req->data)
759 		memcpy(req->data, bottom, sizeof(*bottom));
760 
761 	if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
762 		adapter->fc_security_algorithms =
763 			bottom->fc_security_algorithms;
764 	else
765 		adapter->fc_security_algorithms = 0;
766 	zfcp_fsf_dbf_adapter_fc_security(adapter, req);
767 }
768 
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)769 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
770 {
771 	struct zfcp_diag_header *const diag_hdr =
772 		&req->adapter->diagnostics->port_data.header;
773 	struct fsf_qtcb *qtcb = req->qtcb;
774 	struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
775 
776 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
777 		return;
778 
779 	switch (qtcb->header.fsf_status) {
780 	case FSF_GOOD:
781 		/*
782 		 * usually we wait with an update till the cache is too old,
783 		 * but because we have the data available, update it anyway
784 		 */
785 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
786 
787 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
788 		zfcp_fsf_exchange_port_evaluate(req);
789 		break;
790 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
791 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
792 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
793 
794 		zfcp_fsf_link_down_info_eval(req,
795 			&qtcb->header.fsf_status_qual.link_down_info);
796 
797 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
798 		zfcp_fsf_exchange_port_evaluate(req);
799 		break;
800 	}
801 }
802 
zfcp_fsf_alloc(mempool_t * pool)803 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
804 {
805 	struct zfcp_fsf_req *req;
806 
807 	if (likely(pool))
808 		req = mempool_alloc(pool, GFP_ATOMIC);
809 	else
810 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
811 
812 	if (unlikely(!req))
813 		return NULL;
814 
815 	memset(req, 0, sizeof(*req));
816 	req->pool = pool;
817 	return req;
818 }
819 
zfcp_fsf_qtcb_alloc(mempool_t * pool)820 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
821 {
822 	struct fsf_qtcb *qtcb;
823 
824 	if (likely(pool))
825 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
826 	else
827 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
828 
829 	if (unlikely(!qtcb))
830 		return NULL;
831 
832 	memset(qtcb, 0, sizeof(*qtcb));
833 	return qtcb;
834 }
835 
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)836 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
837 						u32 fsf_cmd, u8 sbtype,
838 						mempool_t *pool)
839 {
840 	struct zfcp_adapter *adapter = qdio->adapter;
841 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
842 
843 	if (unlikely(!req))
844 		return ERR_PTR(-ENOMEM);
845 
846 	if (adapter->req_no == 0)
847 		adapter->req_no++;
848 
849 	timer_setup(&req->timer, NULL, 0);
850 	init_completion(&req->completion);
851 
852 	req->adapter = adapter;
853 	req->req_id = adapter->req_no;
854 
855 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
856 		if (likely(pool))
857 			req->qtcb = zfcp_fsf_qtcb_alloc(
858 				adapter->pool.qtcb_pool);
859 		else
860 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
861 
862 		if (unlikely(!req->qtcb)) {
863 			zfcp_fsf_req_free(req);
864 			return ERR_PTR(-ENOMEM);
865 		}
866 
867 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
868 		req->qtcb->prefix.req_id = req->req_id;
869 		req->qtcb->prefix.ulp_info = 26;
870 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
871 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
872 		req->qtcb->header.req_handle = req->req_id;
873 		req->qtcb->header.fsf_command = fsf_cmd;
874 	}
875 
876 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
877 			   req->qtcb, sizeof(struct fsf_qtcb));
878 
879 	return req;
880 }
881 
zfcp_fsf_req_send(struct zfcp_fsf_req * req)882 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
883 {
884 	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
885 	struct zfcp_adapter *adapter = req->adapter;
886 	struct zfcp_qdio *qdio = adapter->qdio;
887 	u64 req_id = req->req_id;
888 
889 	zfcp_reqlist_add(adapter->req_list, req);
890 
891 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
892 	req->issued = get_tod_clock();
893 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
894 		del_timer_sync(&req->timer);
895 
896 		/* lookup request again, list might have changed */
897 		if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL)
898 			zfcp_dbf_hba_fsf_reqid("fsrsrmf", 1, adapter, req_id);
899 
900 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
901 		return -EIO;
902 	}
903 
904 	/*
905 	 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
906 	 *	 ONLY TOUCH SYNC req AGAIN ON req->completion.
907 	 *
908 	 * The request might complete and be freed concurrently at any point
909 	 * now. This is not protected by the QDIO-lock (req_q_lock). So any
910 	 * uncontrolled access after this might result in an use-after-free bug.
911 	 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
912 	 * when it is completed via req->completion, is it safe to use req
913 	 * again.
914 	 */
915 
916 	/* Don't increase for unsolicited status */
917 	if (!is_srb)
918 		adapter->fsf_req_seq_no++;
919 	adapter->req_no++;
920 
921 	return 0;
922 }
923 
924 /**
925  * zfcp_fsf_status_read - send status read request
926  * @qdio: pointer to struct zfcp_qdio
927  * Returns: 0 on success, ERROR otherwise
928  */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)929 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
930 {
931 	struct zfcp_adapter *adapter = qdio->adapter;
932 	struct zfcp_fsf_req *req;
933 	struct fsf_status_read_buffer *sr_buf;
934 	struct page *page;
935 	int retval = -EIO;
936 
937 	spin_lock_irq(&qdio->req_q_lock);
938 	if (zfcp_qdio_sbal_get(qdio))
939 		goto out;
940 
941 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
942 				  SBAL_SFLAGS0_TYPE_STATUS,
943 				  adapter->pool.status_read_req);
944 	if (IS_ERR(req)) {
945 		retval = PTR_ERR(req);
946 		goto out;
947 	}
948 
949 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
950 	if (!page) {
951 		retval = -ENOMEM;
952 		goto failed_buf;
953 	}
954 	sr_buf = page_address(page);
955 	memset(sr_buf, 0, sizeof(*sr_buf));
956 	req->data = sr_buf;
957 
958 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
959 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
960 
961 	retval = zfcp_fsf_req_send(req);
962 	if (retval)
963 		goto failed_req_send;
964 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
965 
966 	goto out;
967 
968 failed_req_send:
969 	req->data = NULL;
970 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
971 failed_buf:
972 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
973 	zfcp_fsf_req_free(req);
974 out:
975 	spin_unlock_irq(&qdio->req_q_lock);
976 	return retval;
977 }
978 
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)979 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
980 {
981 	struct scsi_device *sdev = req->data;
982 	struct zfcp_scsi_dev *zfcp_sdev;
983 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
984 
985 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
986 		return;
987 
988 	zfcp_sdev = sdev_to_zfcp(sdev);
989 
990 	switch (req->qtcb->header.fsf_status) {
991 	case FSF_PORT_HANDLE_NOT_VALID:
992 		if (fsq->word[0] == fsq->word[1]) {
993 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
994 						"fsafch1");
995 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
996 		}
997 		break;
998 	case FSF_LUN_HANDLE_NOT_VALID:
999 		if (fsq->word[0] == fsq->word[1]) {
1000 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
1001 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1002 		}
1003 		break;
1004 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
1005 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1006 		break;
1007 	case FSF_PORT_BOXED:
1008 		zfcp_erp_set_port_status(zfcp_sdev->port,
1009 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1010 		zfcp_erp_port_reopen(zfcp_sdev->port,
1011 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
1012 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1013 		break;
1014 	case FSF_LUN_BOXED:
1015 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1016 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
1017 				    "fsafch4");
1018 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1019                 break;
1020 	case FSF_ADAPTER_STATUS_AVAILABLE:
1021 		switch (fsq->word[0]) {
1022 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1023 			zfcp_fc_test_link(zfcp_sdev->port);
1024 			fallthrough;
1025 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1026 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1027 			break;
1028 		}
1029 		break;
1030 	case FSF_GOOD:
1031 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1032 		break;
1033 	}
1034 }
1035 
1036 /**
1037  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
1038  * @scmnd: The SCSI command to abort
1039  * Returns: pointer to struct zfcp_fsf_req
1040  */
1041 
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)1042 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
1043 {
1044 	struct zfcp_fsf_req *req = NULL;
1045 	struct scsi_device *sdev = scmnd->device;
1046 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1047 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
1048 	u64 old_req_id = (u64) scmnd->host_scribble;
1049 
1050 	spin_lock_irq(&qdio->req_q_lock);
1051 	if (zfcp_qdio_sbal_get(qdio))
1052 		goto out;
1053 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
1054 				  SBAL_SFLAGS0_TYPE_READ,
1055 				  qdio->adapter->pool.scsi_abort);
1056 	if (IS_ERR(req)) {
1057 		req = NULL;
1058 		goto out;
1059 	}
1060 
1061 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
1062 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
1063 		goto out_error_free;
1064 
1065 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1066 
1067 	req->data = sdev;
1068 	req->handler = zfcp_fsf_abort_fcp_command_handler;
1069 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1070 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
1071 	req->qtcb->bottom.support.req_handle = old_req_id;
1072 
1073 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
1074 	if (!zfcp_fsf_req_send(req)) {
1075 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
1076 		goto out;
1077 	}
1078 
1079 out_error_free:
1080 	zfcp_fsf_req_free(req);
1081 	req = NULL;
1082 out:
1083 	spin_unlock_irq(&qdio->req_q_lock);
1084 	return req;
1085 }
1086 
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)1087 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1088 {
1089 	struct zfcp_adapter *adapter = req->adapter;
1090 	struct zfcp_fsf_ct_els *ct = req->data;
1091 	struct fsf_qtcb_header *header = &req->qtcb->header;
1092 
1093 	ct->status = -EINVAL;
1094 
1095 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1096 		goto skip_fsfstatus;
1097 
1098 	switch (header->fsf_status) {
1099         case FSF_GOOD:
1100 		ct->status = 0;
1101 		zfcp_dbf_san_res("fsscth2", req);
1102 		break;
1103         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1104 		zfcp_fsf_class_not_supp(req);
1105 		break;
1106         case FSF_ADAPTER_STATUS_AVAILABLE:
1107                 switch (header->fsf_status_qual.word[0]){
1108                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1109                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1110 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1111 			break;
1112                 }
1113                 break;
1114         case FSF_PORT_BOXED:
1115 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1116 		break;
1117 	case FSF_PORT_HANDLE_NOT_VALID:
1118 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
1119 		fallthrough;
1120 	case FSF_GENERIC_COMMAND_REJECTED:
1121 	case FSF_PAYLOAD_SIZE_MISMATCH:
1122 	case FSF_REQUEST_SIZE_TOO_LARGE:
1123 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1124 	case FSF_SBAL_MISMATCH:
1125 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1126 		break;
1127 	}
1128 
1129 skip_fsfstatus:
1130 	if (ct->handler)
1131 		ct->handler(ct->handler_data);
1132 }
1133 
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1134 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
1135 					    struct zfcp_qdio_req *q_req,
1136 					    struct scatterlist *sg_req,
1137 					    struct scatterlist *sg_resp)
1138 {
1139 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
1140 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
1141 	zfcp_qdio_set_sbale_last(qdio, q_req);
1142 }
1143 
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1144 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1145 				       struct scatterlist *sg_req,
1146 				       struct scatterlist *sg_resp)
1147 {
1148 	struct zfcp_adapter *adapter = req->adapter;
1149 	struct zfcp_qdio *qdio = adapter->qdio;
1150 	struct fsf_qtcb *qtcb = req->qtcb;
1151 	u32 feat = adapter->adapter_features;
1152 
1153 	if (zfcp_adapter_multi_buffer_active(adapter)) {
1154 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1155 			return -EIO;
1156 		qtcb->bottom.support.req_buf_length =
1157 			zfcp_qdio_real_bytes(sg_req);
1158 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1159 			return -EIO;
1160 		qtcb->bottom.support.resp_buf_length =
1161 			zfcp_qdio_real_bytes(sg_resp);
1162 
1163 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
1164 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1165 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
1166 		return 0;
1167 	}
1168 
1169 	/* use single, unchained SBAL if it can hold the request */
1170 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1171 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1172 						sg_req, sg_resp);
1173 		return 0;
1174 	}
1175 
1176 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1177 		return -EOPNOTSUPP;
1178 
1179 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1180 		return -EIO;
1181 
1182 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1183 
1184 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1185 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1186 
1187 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1188 		return -EIO;
1189 
1190 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1191 
1192 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1193 
1194 	return 0;
1195 }
1196 
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1197 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1198 				 struct scatterlist *sg_req,
1199 				 struct scatterlist *sg_resp,
1200 				 unsigned int timeout)
1201 {
1202 	int ret;
1203 
1204 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1205 	if (ret)
1206 		return ret;
1207 
1208 	/* common settings for ct/gs and els requests */
1209 	if (timeout > 255)
1210 		timeout = 255; /* max value accepted by hardware */
1211 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1212 	req->qtcb->bottom.support.timeout = timeout;
1213 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1214 
1215 	return 0;
1216 }
1217 
1218 /**
1219  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1220  * @wka_port: pointer to zfcp WKA port to send CT/GS to
1221  * @ct: pointer to struct zfcp_send_ct with data for request
1222  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1223  * @timeout: timeout that hardware should use, and a later software timeout
1224  */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1225 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1226 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1227 		     unsigned int timeout)
1228 {
1229 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1230 	struct zfcp_fsf_req *req;
1231 	int ret = -EIO;
1232 
1233 	spin_lock_irq(&qdio->req_q_lock);
1234 	if (zfcp_qdio_sbal_get(qdio))
1235 		goto out;
1236 
1237 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1238 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1239 
1240 	if (IS_ERR(req)) {
1241 		ret = PTR_ERR(req);
1242 		goto out;
1243 	}
1244 
1245 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1246 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1247 	if (ret)
1248 		goto failed_send;
1249 
1250 	req->handler = zfcp_fsf_send_ct_handler;
1251 	req->qtcb->header.port_handle = wka_port->handle;
1252 	ct->d_id = wka_port->d_id;
1253 	req->data = ct;
1254 
1255 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1256 
1257 	ret = zfcp_fsf_req_send(req);
1258 	if (ret)
1259 		goto failed_send;
1260 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1261 
1262 	goto out;
1263 
1264 failed_send:
1265 	zfcp_fsf_req_free(req);
1266 out:
1267 	spin_unlock_irq(&qdio->req_q_lock);
1268 	return ret;
1269 }
1270 
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1271 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1272 {
1273 	struct zfcp_fsf_ct_els *send_els = req->data;
1274 	struct fsf_qtcb_header *header = &req->qtcb->header;
1275 
1276 	send_els->status = -EINVAL;
1277 
1278 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1279 		goto skip_fsfstatus;
1280 
1281 	switch (header->fsf_status) {
1282 	case FSF_GOOD:
1283 		send_els->status = 0;
1284 		zfcp_dbf_san_res("fsselh1", req);
1285 		break;
1286 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1287 		zfcp_fsf_class_not_supp(req);
1288 		break;
1289 	case FSF_ADAPTER_STATUS_AVAILABLE:
1290 		switch (header->fsf_status_qual.word[0]){
1291 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1292 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1293 		case FSF_SQ_RETRY_IF_POSSIBLE:
1294 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1295 			break;
1296 		}
1297 		break;
1298 	case FSF_ELS_COMMAND_REJECTED:
1299 	case FSF_PAYLOAD_SIZE_MISMATCH:
1300 	case FSF_REQUEST_SIZE_TOO_LARGE:
1301 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1302 		break;
1303 	case FSF_SBAL_MISMATCH:
1304 		/* should never occur, avoided in zfcp_fsf_send_els */
1305 		fallthrough;
1306 	default:
1307 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1308 		break;
1309 	}
1310 skip_fsfstatus:
1311 	if (send_els->handler)
1312 		send_els->handler(send_els->handler_data);
1313 }
1314 
1315 /**
1316  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1317  * @adapter: pointer to zfcp adapter
1318  * @d_id: N_Port_ID to send ELS to
1319  * @els: pointer to struct zfcp_send_els with data for the command
1320  * @timeout: timeout that hardware should use, and a later software timeout
1321  */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1322 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1323 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1324 {
1325 	struct zfcp_fsf_req *req;
1326 	struct zfcp_qdio *qdio = adapter->qdio;
1327 	int ret = -EIO;
1328 
1329 	spin_lock_irq(&qdio->req_q_lock);
1330 	if (zfcp_qdio_sbal_get(qdio))
1331 		goto out;
1332 
1333 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1334 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1335 
1336 	if (IS_ERR(req)) {
1337 		ret = PTR_ERR(req);
1338 		goto out;
1339 	}
1340 
1341 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1342 
1343 	if (!zfcp_adapter_multi_buffer_active(adapter))
1344 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1345 
1346 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1347 
1348 	if (ret)
1349 		goto failed_send;
1350 
1351 	hton24(req->qtcb->bottom.support.d_id, d_id);
1352 	req->handler = zfcp_fsf_send_els_handler;
1353 	els->d_id = d_id;
1354 	req->data = els;
1355 
1356 	zfcp_dbf_san_req("fssels1", req, d_id);
1357 
1358 	ret = zfcp_fsf_req_send(req);
1359 	if (ret)
1360 		goto failed_send;
1361 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1362 
1363 	goto out;
1364 
1365 failed_send:
1366 	zfcp_fsf_req_free(req);
1367 out:
1368 	spin_unlock_irq(&qdio->req_q_lock);
1369 	return ret;
1370 }
1371 
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1372 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1373 {
1374 	struct zfcp_fsf_req *req;
1375 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1376 	int retval = -EIO;
1377 
1378 	spin_lock_irq(&qdio->req_q_lock);
1379 	if (zfcp_qdio_sbal_get(qdio))
1380 		goto out;
1381 
1382 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1383 				  SBAL_SFLAGS0_TYPE_READ,
1384 				  qdio->adapter->pool.erp_req);
1385 
1386 	if (IS_ERR(req)) {
1387 		retval = PTR_ERR(req);
1388 		goto out;
1389 	}
1390 
1391 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1392 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1393 
1394 	req->qtcb->bottom.config.feature_selection =
1395 			FSF_FEATURE_NOTIFICATION_LOST |
1396 			FSF_FEATURE_UPDATE_ALERT |
1397 			FSF_FEATURE_REQUEST_SFP_DATA |
1398 			FSF_FEATURE_FC_SECURITY;
1399 	req->erp_action = erp_action;
1400 	req->handler = zfcp_fsf_exchange_config_data_handler;
1401 	erp_action->fsf_req_id = req->req_id;
1402 
1403 	zfcp_fsf_start_erp_timer(req);
1404 	retval = zfcp_fsf_req_send(req);
1405 	if (retval) {
1406 		zfcp_fsf_req_free(req);
1407 		erp_action->fsf_req_id = 0;
1408 	}
1409 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1410 out:
1411 	spin_unlock_irq(&qdio->req_q_lock);
1412 	return retval;
1413 }
1414 
1415 
1416 /**
1417  * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
1418  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1419  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1420  *	  might be %NULL.
1421  *
1422  * Returns:
1423  * * 0		- Exchange Config Data was successful, @data is complete
1424  * * -EIO	- Exchange Config Data was not successful, @data is invalid
1425  * * -EAGAIN	- @data contains incomplete data
1426  * * -ENOMEM	- Some memory allocation failed along the way
1427  */
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1428 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1429 				       struct fsf_qtcb_bottom_config *data)
1430 {
1431 	struct zfcp_fsf_req *req = NULL;
1432 	int retval = -EIO;
1433 
1434 	spin_lock_irq(&qdio->req_q_lock);
1435 	if (zfcp_qdio_sbal_get(qdio))
1436 		goto out_unlock;
1437 
1438 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1439 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1440 
1441 	if (IS_ERR(req)) {
1442 		retval = PTR_ERR(req);
1443 		goto out_unlock;
1444 	}
1445 
1446 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1447 	req->handler = zfcp_fsf_exchange_config_data_handler;
1448 
1449 	req->qtcb->bottom.config.feature_selection =
1450 			FSF_FEATURE_NOTIFICATION_LOST |
1451 			FSF_FEATURE_UPDATE_ALERT |
1452 			FSF_FEATURE_REQUEST_SFP_DATA |
1453 			FSF_FEATURE_FC_SECURITY;
1454 
1455 	if (data)
1456 		req->data = data;
1457 
1458 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1459 	retval = zfcp_fsf_req_send(req);
1460 	spin_unlock_irq(&qdio->req_q_lock);
1461 
1462 	if (!retval) {
1463 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1464 		wait_for_completion(&req->completion);
1465 
1466 		if (req->status &
1467 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1468 			retval = -EIO;
1469 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1470 			retval = -EAGAIN;
1471 	}
1472 
1473 	zfcp_fsf_req_free(req);
1474 	return retval;
1475 
1476 out_unlock:
1477 	spin_unlock_irq(&qdio->req_q_lock);
1478 	return retval;
1479 }
1480 
1481 /**
1482  * zfcp_fsf_exchange_port_data - request information about local port
1483  * @erp_action: ERP action for the adapter for which port data is requested
1484  * Returns: 0 on success, error otherwise
1485  */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1486 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1487 {
1488 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1489 	struct zfcp_fsf_req *req;
1490 	int retval = -EIO;
1491 
1492 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1493 		return -EOPNOTSUPP;
1494 
1495 	spin_lock_irq(&qdio->req_q_lock);
1496 	if (zfcp_qdio_sbal_get(qdio))
1497 		goto out;
1498 
1499 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1500 				  SBAL_SFLAGS0_TYPE_READ,
1501 				  qdio->adapter->pool.erp_req);
1502 
1503 	if (IS_ERR(req)) {
1504 		retval = PTR_ERR(req);
1505 		goto out;
1506 	}
1507 
1508 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1509 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1510 
1511 	req->handler = zfcp_fsf_exchange_port_data_handler;
1512 	req->erp_action = erp_action;
1513 	erp_action->fsf_req_id = req->req_id;
1514 
1515 	zfcp_fsf_start_erp_timer(req);
1516 	retval = zfcp_fsf_req_send(req);
1517 	if (retval) {
1518 		zfcp_fsf_req_free(req);
1519 		erp_action->fsf_req_id = 0;
1520 	}
1521 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1522 out:
1523 	spin_unlock_irq(&qdio->req_q_lock);
1524 	return retval;
1525 }
1526 
1527 /**
1528  * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
1529  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1530  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1531  *	  might be %NULL.
1532  *
1533  * Returns:
1534  * * 0		- Exchange Port Data was successful, @data is complete
1535  * * -EIO	- Exchange Port Data was not successful, @data is invalid
1536  * * -EAGAIN	- @data contains incomplete data
1537  * * -ENOMEM	- Some memory allocation failed along the way
1538  * * -EOPNOTSUPP	- This operation is not supported
1539  */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1540 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1541 				     struct fsf_qtcb_bottom_port *data)
1542 {
1543 	struct zfcp_fsf_req *req = NULL;
1544 	int retval = -EIO;
1545 
1546 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1547 		return -EOPNOTSUPP;
1548 
1549 	spin_lock_irq(&qdio->req_q_lock);
1550 	if (zfcp_qdio_sbal_get(qdio))
1551 		goto out_unlock;
1552 
1553 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1554 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1555 
1556 	if (IS_ERR(req)) {
1557 		retval = PTR_ERR(req);
1558 		goto out_unlock;
1559 	}
1560 
1561 	if (data)
1562 		req->data = data;
1563 
1564 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1565 
1566 	req->handler = zfcp_fsf_exchange_port_data_handler;
1567 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1568 	retval = zfcp_fsf_req_send(req);
1569 	spin_unlock_irq(&qdio->req_q_lock);
1570 
1571 	if (!retval) {
1572 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1573 		wait_for_completion(&req->completion);
1574 
1575 		if (req->status &
1576 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1577 			retval = -EIO;
1578 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1579 			retval = -EAGAIN;
1580 	}
1581 
1582 	zfcp_fsf_req_free(req);
1583 	return retval;
1584 
1585 out_unlock:
1586 	spin_unlock_irq(&qdio->req_q_lock);
1587 	return retval;
1588 }
1589 
zfcp_fsf_log_port_fc_security(struct zfcp_port * port,struct zfcp_fsf_req * req)1590 static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
1591 					  struct zfcp_fsf_req *req)
1592 {
1593 	char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1594 	char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1595 
1596 	if (port->connection_info == port->connection_info_old) {
1597 		/* no change, no log nor trace */
1598 		return;
1599 	}
1600 
1601 	zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
1602 			      port->connection_info_old,
1603 			      port->connection_info);
1604 
1605 	zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
1606 				      port->connection_info_old,
1607 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1608 	zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
1609 				      port->connection_info,
1610 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1611 
1612 	if (strncmp(mnemonic_old, mnemonic_new,
1613 		    ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
1614 		/* no change in string representation, no log */
1615 		goto out;
1616 	}
1617 
1618 	if (port->connection_info_old == 0) {
1619 		/* activation */
1620 		dev_info(&port->adapter->ccw_device->dev,
1621 			 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
1622 			 port->wwpn, mnemonic_new);
1623 	} else if (port->connection_info == 0) {
1624 		/* deactivation */
1625 		dev_warn(&port->adapter->ccw_device->dev,
1626 			 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
1627 			 port->wwpn, mnemonic_old);
1628 	} else {
1629 		/* change */
1630 		dev_warn(&port->adapter->ccw_device->dev,
1631 			 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
1632 			 port->wwpn, mnemonic_old, mnemonic_new);
1633 	}
1634 
1635 out:
1636 	port->connection_info_old = port->connection_info;
1637 }
1638 
zfcp_fsf_log_security_error(const struct device * dev,u32 fsf_sqw0,u64 wwpn)1639 static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
1640 					u64 wwpn)
1641 {
1642 	switch (fsf_sqw0) {
1643 
1644 	/*
1645 	 * Open Port command error codes
1646 	 */
1647 
1648 	case FSF_SQ_SECURITY_REQUIRED:
1649 		dev_warn_ratelimited(dev,
1650 				     "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
1651 				     wwpn);
1652 		break;
1653 	case FSF_SQ_SECURITY_TIMEOUT:
1654 		dev_warn_ratelimited(dev,
1655 				     "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
1656 				     wwpn);
1657 		break;
1658 	case FSF_SQ_SECURITY_KM_UNAVAILABLE:
1659 		dev_warn_ratelimited(dev,
1660 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
1661 				     wwpn);
1662 		break;
1663 	case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
1664 		dev_warn_ratelimited(dev,
1665 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
1666 				     wwpn);
1667 		break;
1668 	case FSF_SQ_SECURITY_AUTH_FAILURE:
1669 		dev_warn_ratelimited(dev,
1670 				     "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
1671 				     wwpn);
1672 		break;
1673 
1674 	/*
1675 	 * Send FCP command error codes
1676 	 */
1677 
1678 	case FSF_SQ_SECURITY_ENC_FAILURE:
1679 		dev_warn_ratelimited(dev,
1680 				     "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
1681 				     wwpn);
1682 		break;
1683 
1684 	/*
1685 	 * Unknown error codes
1686 	 */
1687 
1688 	default:
1689 		dev_warn_ratelimited(dev,
1690 				     "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
1691 				     fsf_sqw0, wwpn);
1692 	}
1693 }
1694 
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1695 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1696 {
1697 	struct zfcp_adapter *adapter = req->adapter;
1698 	struct zfcp_port *port = req->data;
1699 	struct fsf_qtcb_header *header = &req->qtcb->header;
1700 	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1701 	struct fc_els_flogi *plogi;
1702 
1703 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1704 		goto out;
1705 
1706 	switch (header->fsf_status) {
1707 	case FSF_PORT_ALREADY_OPEN:
1708 		break;
1709 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1710 		dev_warn(&adapter->ccw_device->dev,
1711 			 "Not enough FCP adapter resources to open "
1712 			 "remote port 0x%016Lx\n",
1713 			 (unsigned long long)port->wwpn);
1714 		zfcp_erp_set_port_status(port,
1715 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1716 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1717 		break;
1718 	case FSF_SECURITY_ERROR:
1719 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
1720 					    header->fsf_status_qual.word[0],
1721 					    port->wwpn);
1722 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1723 		break;
1724 	case FSF_ADAPTER_STATUS_AVAILABLE:
1725 		switch (header->fsf_status_qual.word[0]) {
1726 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1727 			/* no zfcp_fc_test_link() with failed open port */
1728 			fallthrough;
1729 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1730 		case FSF_SQ_NO_RETRY_POSSIBLE:
1731 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1732 			break;
1733 		}
1734 		break;
1735 	case FSF_GOOD:
1736 		port->handle = header->port_handle;
1737 		if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
1738 			port->connection_info = bottom->connection_info;
1739 		else
1740 			port->connection_info = 0;
1741 		zfcp_fsf_log_port_fc_security(port, req);
1742 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1743 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1744 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1745 		                  &port->status);
1746 		/* check whether D_ID has changed during open */
1747 		/*
1748 		 * FIXME: This check is not airtight, as the FCP channel does
1749 		 * not monitor closures of target port connections caused on
1750 		 * the remote side. Thus, they might miss out on invalidating
1751 		 * locally cached WWPNs (and other N_Port parameters) of gone
1752 		 * target ports. So, our heroic attempt to make things safe
1753 		 * could be undermined by 'open port' response data tagged with
1754 		 * obsolete WWPNs. Another reason to monitor potential
1755 		 * connection closures ourself at least (by interpreting
1756 		 * incoming ELS' and unsolicited status). It just crosses my
1757 		 * mind that one should be able to cross-check by means of
1758 		 * another GID_PN straight after a port has been opened.
1759 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1760 		 */
1761 		plogi = (struct fc_els_flogi *) bottom->els;
1762 		if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
1763 			zfcp_fc_plogi_evaluate(port, plogi);
1764 		break;
1765 	case FSF_UNKNOWN_OP_SUBTYPE:
1766 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1767 		break;
1768 	}
1769 
1770 out:
1771 	put_device(&port->dev);
1772 }
1773 
1774 /**
1775  * zfcp_fsf_open_port - create and send open port request
1776  * @erp_action: pointer to struct zfcp_erp_action
1777  * Returns: 0 on success, error otherwise
1778  */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1779 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1780 {
1781 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1782 	struct zfcp_port *port = erp_action->port;
1783 	struct zfcp_fsf_req *req;
1784 	int retval = -EIO;
1785 
1786 	spin_lock_irq(&qdio->req_q_lock);
1787 	if (zfcp_qdio_sbal_get(qdio))
1788 		goto out;
1789 
1790 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1791 				  SBAL_SFLAGS0_TYPE_READ,
1792 				  qdio->adapter->pool.erp_req);
1793 
1794 	if (IS_ERR(req)) {
1795 		retval = PTR_ERR(req);
1796 		goto out;
1797 	}
1798 
1799 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1800 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1801 
1802 	req->handler = zfcp_fsf_open_port_handler;
1803 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1804 	req->data = port;
1805 	req->erp_action = erp_action;
1806 	erp_action->fsf_req_id = req->req_id;
1807 	get_device(&port->dev);
1808 
1809 	zfcp_fsf_start_erp_timer(req);
1810 	retval = zfcp_fsf_req_send(req);
1811 	if (retval) {
1812 		zfcp_fsf_req_free(req);
1813 		erp_action->fsf_req_id = 0;
1814 		put_device(&port->dev);
1815 	}
1816 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1817 out:
1818 	spin_unlock_irq(&qdio->req_q_lock);
1819 	return retval;
1820 }
1821 
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1822 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1823 {
1824 	struct zfcp_port *port = req->data;
1825 
1826 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1827 		return;
1828 
1829 	switch (req->qtcb->header.fsf_status) {
1830 	case FSF_PORT_HANDLE_NOT_VALID:
1831 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1832 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1833 		break;
1834 	case FSF_ADAPTER_STATUS_AVAILABLE:
1835 		break;
1836 	case FSF_GOOD:
1837 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1838 		break;
1839 	}
1840 }
1841 
1842 /**
1843  * zfcp_fsf_close_port - create and send close port request
1844  * @erp_action: pointer to struct zfcp_erp_action
1845  * Returns: 0 on success, error otherwise
1846  */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1847 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1848 {
1849 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1850 	struct zfcp_fsf_req *req;
1851 	int retval = -EIO;
1852 
1853 	spin_lock_irq(&qdio->req_q_lock);
1854 	if (zfcp_qdio_sbal_get(qdio))
1855 		goto out;
1856 
1857 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1858 				  SBAL_SFLAGS0_TYPE_READ,
1859 				  qdio->adapter->pool.erp_req);
1860 
1861 	if (IS_ERR(req)) {
1862 		retval = PTR_ERR(req);
1863 		goto out;
1864 	}
1865 
1866 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1867 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1868 
1869 	req->handler = zfcp_fsf_close_port_handler;
1870 	req->data = erp_action->port;
1871 	req->erp_action = erp_action;
1872 	req->qtcb->header.port_handle = erp_action->port->handle;
1873 	erp_action->fsf_req_id = req->req_id;
1874 
1875 	zfcp_fsf_start_erp_timer(req);
1876 	retval = zfcp_fsf_req_send(req);
1877 	if (retval) {
1878 		zfcp_fsf_req_free(req);
1879 		erp_action->fsf_req_id = 0;
1880 	}
1881 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1882 out:
1883 	spin_unlock_irq(&qdio->req_q_lock);
1884 	return retval;
1885 }
1886 
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1887 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1888 {
1889 	struct zfcp_fc_wka_port *wka_port = req->data;
1890 	struct fsf_qtcb_header *header = &req->qtcb->header;
1891 
1892 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1893 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1894 		goto out;
1895 	}
1896 
1897 	switch (header->fsf_status) {
1898 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1899 		dev_warn(&req->adapter->ccw_device->dev,
1900 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1901 		fallthrough;
1902 	case FSF_ADAPTER_STATUS_AVAILABLE:
1903 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1904 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1905 		break;
1906 	case FSF_GOOD:
1907 		wka_port->handle = header->port_handle;
1908 		fallthrough;
1909 	case FSF_PORT_ALREADY_OPEN:
1910 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1911 	}
1912 out:
1913 	wake_up(&wka_port->opened);
1914 }
1915 
1916 /**
1917  * zfcp_fsf_open_wka_port - create and send open wka-port request
1918  * @wka_port: pointer to struct zfcp_fc_wka_port
1919  * Returns: 0 on success, error otherwise
1920  */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1921 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1922 {
1923 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1924 	struct zfcp_fsf_req *req;
1925 	u64 req_id = 0;
1926 	int retval = -EIO;
1927 
1928 	spin_lock_irq(&qdio->req_q_lock);
1929 	if (zfcp_qdio_sbal_get(qdio))
1930 		goto out;
1931 
1932 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1933 				  SBAL_SFLAGS0_TYPE_READ,
1934 				  qdio->adapter->pool.erp_req);
1935 
1936 	if (IS_ERR(req)) {
1937 		retval = PTR_ERR(req);
1938 		goto out;
1939 	}
1940 
1941 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1942 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1943 
1944 	req->handler = zfcp_fsf_open_wka_port_handler;
1945 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1946 	req->data = wka_port;
1947 
1948 	req_id = req->req_id;
1949 
1950 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1951 	retval = zfcp_fsf_req_send(req);
1952 	if (retval)
1953 		zfcp_fsf_req_free(req);
1954 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1955 out:
1956 	spin_unlock_irq(&qdio->req_q_lock);
1957 	if (!retval)
1958 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1959 	return retval;
1960 }
1961 
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1962 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1963 {
1964 	struct zfcp_fc_wka_port *wka_port = req->data;
1965 
1966 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1967 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1968 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1969 	}
1970 
1971 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1972 	wake_up(&wka_port->closed);
1973 }
1974 
1975 /**
1976  * zfcp_fsf_close_wka_port - create and send close wka port request
1977  * @wka_port: WKA port to open
1978  * Returns: 0 on success, error otherwise
1979  */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1980 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1981 {
1982 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1983 	struct zfcp_fsf_req *req;
1984 	u64 req_id = 0;
1985 	int retval = -EIO;
1986 
1987 	spin_lock_irq(&qdio->req_q_lock);
1988 	if (zfcp_qdio_sbal_get(qdio))
1989 		goto out;
1990 
1991 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1992 				  SBAL_SFLAGS0_TYPE_READ,
1993 				  qdio->adapter->pool.erp_req);
1994 
1995 	if (IS_ERR(req)) {
1996 		retval = PTR_ERR(req);
1997 		goto out;
1998 	}
1999 
2000 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2001 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2002 
2003 	req->handler = zfcp_fsf_close_wka_port_handler;
2004 	req->data = wka_port;
2005 	req->qtcb->header.port_handle = wka_port->handle;
2006 
2007 	req_id = req->req_id;
2008 
2009 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2010 	retval = zfcp_fsf_req_send(req);
2011 	if (retval)
2012 		zfcp_fsf_req_free(req);
2013 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2014 out:
2015 	spin_unlock_irq(&qdio->req_q_lock);
2016 	if (!retval)
2017 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
2018 	return retval;
2019 }
2020 
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)2021 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
2022 {
2023 	struct zfcp_port *port = req->data;
2024 	struct fsf_qtcb_header *header = &req->qtcb->header;
2025 	struct scsi_device *sdev;
2026 
2027 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2028 		return;
2029 
2030 	switch (header->fsf_status) {
2031 	case FSF_PORT_HANDLE_NOT_VALID:
2032 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
2033 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2034 		break;
2035 	case FSF_PORT_BOXED:
2036 		/* can't use generic zfcp_erp_modify_port_status because
2037 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
2038 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2039 		shost_for_each_device(sdev, port->adapter->scsi_host)
2040 			if (sdev_to_zfcp(sdev)->port == port)
2041 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2042 						  &sdev_to_zfcp(sdev)->status);
2043 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2044 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
2045 				     "fscpph2");
2046 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2047 		break;
2048 	case FSF_ADAPTER_STATUS_AVAILABLE:
2049 		switch (header->fsf_status_qual.word[0]) {
2050 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2051 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2052 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2053 			break;
2054 		}
2055 		break;
2056 	case FSF_GOOD:
2057 		/* can't use generic zfcp_erp_modify_port_status because
2058 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2059 		 */
2060 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2061 		shost_for_each_device(sdev, port->adapter->scsi_host)
2062 			if (sdev_to_zfcp(sdev)->port == port)
2063 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2064 						  &sdev_to_zfcp(sdev)->status);
2065 		break;
2066 	}
2067 }
2068 
2069 /**
2070  * zfcp_fsf_close_physical_port - close physical port
2071  * @erp_action: pointer to struct zfcp_erp_action
2072  * Returns: 0 on success
2073  */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)2074 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2075 {
2076 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2077 	struct zfcp_fsf_req *req;
2078 	int retval = -EIO;
2079 
2080 	spin_lock_irq(&qdio->req_q_lock);
2081 	if (zfcp_qdio_sbal_get(qdio))
2082 		goto out;
2083 
2084 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
2085 				  SBAL_SFLAGS0_TYPE_READ,
2086 				  qdio->adapter->pool.erp_req);
2087 
2088 	if (IS_ERR(req)) {
2089 		retval = PTR_ERR(req);
2090 		goto out;
2091 	}
2092 
2093 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2094 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2095 
2096 	req->data = erp_action->port;
2097 	req->qtcb->header.port_handle = erp_action->port->handle;
2098 	req->erp_action = erp_action;
2099 	req->handler = zfcp_fsf_close_physical_port_handler;
2100 	erp_action->fsf_req_id = req->req_id;
2101 
2102 	zfcp_fsf_start_erp_timer(req);
2103 	retval = zfcp_fsf_req_send(req);
2104 	if (retval) {
2105 		zfcp_fsf_req_free(req);
2106 		erp_action->fsf_req_id = 0;
2107 	}
2108 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2109 out:
2110 	spin_unlock_irq(&qdio->req_q_lock);
2111 	return retval;
2112 }
2113 
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)2114 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
2115 {
2116 	struct zfcp_adapter *adapter = req->adapter;
2117 	struct scsi_device *sdev = req->data;
2118 	struct zfcp_scsi_dev *zfcp_sdev;
2119 	struct fsf_qtcb_header *header = &req->qtcb->header;
2120 	union fsf_status_qual *qual = &header->fsf_status_qual;
2121 
2122 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2123 		return;
2124 
2125 	zfcp_sdev = sdev_to_zfcp(sdev);
2126 
2127 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2128 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
2129 			  &zfcp_sdev->status);
2130 
2131 	switch (header->fsf_status) {
2132 
2133 	case FSF_PORT_HANDLE_NOT_VALID:
2134 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
2135 		fallthrough;
2136 	case FSF_LUN_ALREADY_OPEN:
2137 		break;
2138 	case FSF_PORT_BOXED:
2139 		zfcp_erp_set_port_status(zfcp_sdev->port,
2140 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2141 		zfcp_erp_port_reopen(zfcp_sdev->port,
2142 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
2143 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2144 		break;
2145 	case FSF_LUN_SHARING_VIOLATION:
2146 		if (qual->word[0])
2147 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
2148 				 "LUN 0x%016Lx on port 0x%016Lx is already in "
2149 				 "use by CSS%d, MIF Image ID %x\n",
2150 				 zfcp_scsi_dev_lun(sdev),
2151 				 (unsigned long long)zfcp_sdev->port->wwpn,
2152 				 qual->fsf_queue_designator.cssid,
2153 				 qual->fsf_queue_designator.hla);
2154 		zfcp_erp_set_lun_status(sdev,
2155 					ZFCP_STATUS_COMMON_ERP_FAILED |
2156 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
2157 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2158 		break;
2159 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
2160 		dev_warn(&adapter->ccw_device->dev,
2161 			 "No handle is available for LUN "
2162 			 "0x%016Lx on port 0x%016Lx\n",
2163 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2164 			 (unsigned long long)zfcp_sdev->port->wwpn);
2165 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
2166 		fallthrough;
2167 	case FSF_INVALID_COMMAND_OPTION:
2168 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2169 		break;
2170 	case FSF_ADAPTER_STATUS_AVAILABLE:
2171 		switch (header->fsf_status_qual.word[0]) {
2172 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2173 			zfcp_fc_test_link(zfcp_sdev->port);
2174 			fallthrough;
2175 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2176 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2177 			break;
2178 		}
2179 		break;
2180 
2181 	case FSF_GOOD:
2182 		zfcp_sdev->lun_handle = header->lun_handle;
2183 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2184 		break;
2185 	}
2186 }
2187 
2188 /**
2189  * zfcp_fsf_open_lun - open LUN
2190  * @erp_action: pointer to struct zfcp_erp_action
2191  * Returns: 0 on success, error otherwise
2192  */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)2193 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
2194 {
2195 	struct zfcp_adapter *adapter = erp_action->adapter;
2196 	struct zfcp_qdio *qdio = adapter->qdio;
2197 	struct zfcp_fsf_req *req;
2198 	int retval = -EIO;
2199 
2200 	spin_lock_irq(&qdio->req_q_lock);
2201 	if (zfcp_qdio_sbal_get(qdio))
2202 		goto out;
2203 
2204 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
2205 				  SBAL_SFLAGS0_TYPE_READ,
2206 				  adapter->pool.erp_req);
2207 
2208 	if (IS_ERR(req)) {
2209 		retval = PTR_ERR(req);
2210 		goto out;
2211 	}
2212 
2213 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2214 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2215 
2216 	req->qtcb->header.port_handle = erp_action->port->handle;
2217 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
2218 	req->handler = zfcp_fsf_open_lun_handler;
2219 	req->data = erp_action->sdev;
2220 	req->erp_action = erp_action;
2221 	erp_action->fsf_req_id = req->req_id;
2222 
2223 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2224 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
2225 
2226 	zfcp_fsf_start_erp_timer(req);
2227 	retval = zfcp_fsf_req_send(req);
2228 	if (retval) {
2229 		zfcp_fsf_req_free(req);
2230 		erp_action->fsf_req_id = 0;
2231 	}
2232 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2233 out:
2234 	spin_unlock_irq(&qdio->req_q_lock);
2235 	return retval;
2236 }
2237 
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)2238 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
2239 {
2240 	struct scsi_device *sdev = req->data;
2241 	struct zfcp_scsi_dev *zfcp_sdev;
2242 
2243 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2244 		return;
2245 
2246 	zfcp_sdev = sdev_to_zfcp(sdev);
2247 
2248 	switch (req->qtcb->header.fsf_status) {
2249 	case FSF_PORT_HANDLE_NOT_VALID:
2250 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
2251 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2252 		break;
2253 	case FSF_LUN_HANDLE_NOT_VALID:
2254 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
2255 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2256 		break;
2257 	case FSF_PORT_BOXED:
2258 		zfcp_erp_set_port_status(zfcp_sdev->port,
2259 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2260 		zfcp_erp_port_reopen(zfcp_sdev->port,
2261 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
2262 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2263 		break;
2264 	case FSF_ADAPTER_STATUS_AVAILABLE:
2265 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
2266 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2267 			zfcp_fc_test_link(zfcp_sdev->port);
2268 			fallthrough;
2269 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2270 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2271 			break;
2272 		}
2273 		break;
2274 	case FSF_GOOD:
2275 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2276 		break;
2277 	}
2278 }
2279 
2280 /**
2281  * zfcp_fsf_close_lun - close LUN
2282  * @erp_action: pointer to erp_action triggering the "close LUN"
2283  * Returns: 0 on success, error otherwise
2284  */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)2285 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
2286 {
2287 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2288 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
2289 	struct zfcp_fsf_req *req;
2290 	int retval = -EIO;
2291 
2292 	spin_lock_irq(&qdio->req_q_lock);
2293 	if (zfcp_qdio_sbal_get(qdio))
2294 		goto out;
2295 
2296 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2297 				  SBAL_SFLAGS0_TYPE_READ,
2298 				  qdio->adapter->pool.erp_req);
2299 
2300 	if (IS_ERR(req)) {
2301 		retval = PTR_ERR(req);
2302 		goto out;
2303 	}
2304 
2305 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2306 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2307 
2308 	req->qtcb->header.port_handle = erp_action->port->handle;
2309 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2310 	req->handler = zfcp_fsf_close_lun_handler;
2311 	req->data = erp_action->sdev;
2312 	req->erp_action = erp_action;
2313 	erp_action->fsf_req_id = req->req_id;
2314 
2315 	zfcp_fsf_start_erp_timer(req);
2316 	retval = zfcp_fsf_req_send(req);
2317 	if (retval) {
2318 		zfcp_fsf_req_free(req);
2319 		erp_action->fsf_req_id = 0;
2320 	}
2321 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2322 out:
2323 	spin_unlock_irq(&qdio->req_q_lock);
2324 	return retval;
2325 }
2326 
zfcp_fsf_update_lat(struct zfcp_latency_record * lat_rec,u32 lat)2327 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
2328 {
2329 	lat_rec->sum += lat;
2330 	lat_rec->min = min(lat_rec->min, lat);
2331 	lat_rec->max = max(lat_rec->max, lat);
2332 }
2333 
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)2334 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2335 {
2336 	struct fsf_qual_latency_info *lat_in;
2337 	struct zfcp_latency_cont *lat = NULL;
2338 	struct zfcp_scsi_dev *zfcp_sdev;
2339 	struct zfcp_blk_drv_data blktrc;
2340 	int ticks = req->adapter->timer_ticks;
2341 
2342 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2343 
2344 	blktrc.flags = 0;
2345 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2346 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2347 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2348 	blktrc.inb_usage = 0;
2349 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2350 
2351 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2352 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2353 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2354 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2355 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2356 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2357 
2358 		switch (req->qtcb->bottom.io.data_direction) {
2359 		case FSF_DATADIR_DIF_READ_STRIP:
2360 		case FSF_DATADIR_DIF_READ_CONVERT:
2361 		case FSF_DATADIR_READ:
2362 			lat = &zfcp_sdev->latencies.read;
2363 			break;
2364 		case FSF_DATADIR_DIF_WRITE_INSERT:
2365 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2366 		case FSF_DATADIR_WRITE:
2367 			lat = &zfcp_sdev->latencies.write;
2368 			break;
2369 		case FSF_DATADIR_CMND:
2370 			lat = &zfcp_sdev->latencies.cmd;
2371 			break;
2372 		}
2373 
2374 		if (lat) {
2375 			spin_lock(&zfcp_sdev->latencies.lock);
2376 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2377 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2378 			lat->counter++;
2379 			spin_unlock(&zfcp_sdev->latencies.lock);
2380 		}
2381 	}
2382 
2383 	blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
2384 }
2385 
2386 /**
2387  * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2388  * @req: Pointer to FSF request.
2389  * @sdev: Pointer to SCSI device as request context.
2390  */
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req,struct scsi_device * sdev)2391 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2392 					struct scsi_device *sdev)
2393 {
2394 	struct zfcp_scsi_dev *zfcp_sdev;
2395 	struct fsf_qtcb_header *header = &req->qtcb->header;
2396 
2397 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2398 		return;
2399 
2400 	zfcp_sdev = sdev_to_zfcp(sdev);
2401 
2402 	switch (header->fsf_status) {
2403 	case FSF_HANDLE_MISMATCH:
2404 	case FSF_PORT_HANDLE_NOT_VALID:
2405 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2406 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2407 		break;
2408 	case FSF_FCPLUN_NOT_VALID:
2409 	case FSF_LUN_HANDLE_NOT_VALID:
2410 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2411 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2412 		break;
2413 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2414 		zfcp_fsf_class_not_supp(req);
2415 		break;
2416 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2417 		dev_err(&req->adapter->ccw_device->dev,
2418 			"Incorrect direction %d, LUN 0x%016Lx on port "
2419 			"0x%016Lx closed\n",
2420 			req->qtcb->bottom.io.data_direction,
2421 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2422 			(unsigned long long)zfcp_sdev->port->wwpn);
2423 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2424 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2425 		break;
2426 	case FSF_CMND_LENGTH_NOT_VALID:
2427 		dev_err(&req->adapter->ccw_device->dev,
2428 			"Incorrect FCP_CMND length %d, FCP device closed\n",
2429 			req->qtcb->bottom.io.fcp_cmnd_length);
2430 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2431 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2432 		break;
2433 	case FSF_PORT_BOXED:
2434 		zfcp_erp_set_port_status(zfcp_sdev->port,
2435 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2436 		zfcp_erp_port_reopen(zfcp_sdev->port,
2437 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2438 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2439 		break;
2440 	case FSF_LUN_BOXED:
2441 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2442 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2443 				    "fssfch6");
2444 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2445 		break;
2446 	case FSF_ADAPTER_STATUS_AVAILABLE:
2447 		if (header->fsf_status_qual.word[0] ==
2448 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2449 			zfcp_fc_test_link(zfcp_sdev->port);
2450 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2451 		break;
2452 	case FSF_SECURITY_ERROR:
2453 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
2454 					    header->fsf_status_qual.word[0],
2455 					    zfcp_sdev->port->wwpn);
2456 		zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
2457 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2458 		break;
2459 	}
2460 }
2461 
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2462 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2463 {
2464 	struct scsi_cmnd *scpnt;
2465 	struct fcp_resp_with_ext *fcp_rsp;
2466 	unsigned long flags;
2467 
2468 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2469 
2470 	scpnt = req->data;
2471 	if (unlikely(!scpnt)) {
2472 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2473 		return;
2474 	}
2475 
2476 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
2477 
2478 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2479 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2480 		goto skip_fsfstatus;
2481 	}
2482 
2483 	switch (req->qtcb->header.fsf_status) {
2484 	case FSF_INCONSISTENT_PROT_DATA:
2485 	case FSF_INVALID_PROT_PARM:
2486 		set_host_byte(scpnt, DID_ERROR);
2487 		goto skip_fsfstatus;
2488 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2489 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2490 		goto skip_fsfstatus;
2491 	case FSF_APP_TAG_CHECK_FAILURE:
2492 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2493 		goto skip_fsfstatus;
2494 	case FSF_REF_TAG_CHECK_FAILURE:
2495 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2496 		goto skip_fsfstatus;
2497 	}
2498 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2499 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2500 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2501 
2502 skip_fsfstatus:
2503 	zfcp_fsf_req_trace(req, scpnt);
2504 	zfcp_dbf_scsi_result(scpnt, req);
2505 
2506 	scpnt->host_scribble = NULL;
2507 	scsi_done(scpnt);
2508 	/*
2509 	 * We must hold this lock until scsi_done has been called.
2510 	 * Otherwise we may call scsi_done after abort regarding this
2511 	 * command has completed.
2512 	 * Note: scsi_done must not block!
2513 	 */
2514 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2515 }
2516 
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2517 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2518 {
2519 	switch (scsi_get_prot_op(scsi_cmnd)) {
2520 	case SCSI_PROT_NORMAL:
2521 		switch (scsi_cmnd->sc_data_direction) {
2522 		case DMA_NONE:
2523 			*data_dir = FSF_DATADIR_CMND;
2524 			break;
2525 		case DMA_FROM_DEVICE:
2526 			*data_dir = FSF_DATADIR_READ;
2527 			break;
2528 		case DMA_TO_DEVICE:
2529 			*data_dir = FSF_DATADIR_WRITE;
2530 			break;
2531 		case DMA_BIDIRECTIONAL:
2532 			return -EINVAL;
2533 		}
2534 		break;
2535 
2536 	case SCSI_PROT_READ_STRIP:
2537 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2538 		break;
2539 	case SCSI_PROT_WRITE_INSERT:
2540 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2541 		break;
2542 	case SCSI_PROT_READ_PASS:
2543 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2544 		break;
2545 	case SCSI_PROT_WRITE_PASS:
2546 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2547 		break;
2548 	default:
2549 		return -EINVAL;
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 /**
2556  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2557  * @scsi_cmnd: scsi command to be sent
2558  */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2559 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2560 {
2561 	struct zfcp_fsf_req *req;
2562 	struct fcp_cmnd *fcp_cmnd;
2563 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2564 	int retval = -EIO;
2565 	struct scsi_device *sdev = scsi_cmnd->device;
2566 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2567 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2568 	struct zfcp_qdio *qdio = adapter->qdio;
2569 	struct fsf_qtcb_bottom_io *io;
2570 	unsigned long flags;
2571 
2572 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2573 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2574 		return -EBUSY;
2575 
2576 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2577 	if (atomic_read(&qdio->req_q_free) <= 0) {
2578 		atomic_inc(&qdio->req_q_full);
2579 		goto out;
2580 	}
2581 
2582 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2583 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2584 
2585 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2586 				  sbtype, adapter->pool.scsi_req);
2587 
2588 	if (IS_ERR(req)) {
2589 		retval = PTR_ERR(req);
2590 		goto out;
2591 	}
2592 
2593 	BUILD_BUG_ON(sizeof(scsi_cmnd->host_scribble) < sizeof(req->req_id));
2594 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2595 
2596 	io = &req->qtcb->bottom.io;
2597 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2598 	req->data = scsi_cmnd;
2599 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2600 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2601 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2602 	io->service_class = FSF_CLASS_3;
2603 	io->fcp_cmnd_length = FCP_CMND_LEN;
2604 
2605 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2606 		io->data_block_length = scsi_prot_interval(scsi_cmnd);
2607 		io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
2608 	}
2609 
2610 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2611 		goto failed_scsi_cmnd;
2612 
2613 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2614 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2615 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2616 
2617 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2618 	    scsi_prot_sg_count(scsi_cmnd)) {
2619 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2620 				       scsi_prot_sg_count(scsi_cmnd));
2621 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2622 						 scsi_prot_sglist(scsi_cmnd));
2623 		if (retval)
2624 			goto failed_scsi_cmnd;
2625 		io->prot_data_length = zfcp_qdio_real_bytes(
2626 						scsi_prot_sglist(scsi_cmnd));
2627 	}
2628 
2629 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2630 					 scsi_sglist(scsi_cmnd));
2631 	if (unlikely(retval))
2632 		goto failed_scsi_cmnd;
2633 
2634 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2635 	if (zfcp_adapter_multi_buffer_active(adapter))
2636 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2637 
2638 	retval = zfcp_fsf_req_send(req);
2639 	if (unlikely(retval))
2640 		goto failed_scsi_cmnd;
2641 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2642 
2643 	goto out;
2644 
2645 failed_scsi_cmnd:
2646 	zfcp_fsf_req_free(req);
2647 	scsi_cmnd->host_scribble = NULL;
2648 out:
2649 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2650 	return retval;
2651 }
2652 
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2653 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2654 {
2655 	struct scsi_device *sdev = req->data;
2656 	struct fcp_resp_with_ext *fcp_rsp;
2657 	struct fcp_resp_rsp_info *rsp_info;
2658 
2659 	zfcp_fsf_fcp_handler_common(req, sdev);
2660 
2661 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2662 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2663 
2664 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2665 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2666 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2667 }
2668 
2669 /**
2670  * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2671  * @sdev: Pointer to SCSI device to send the task management command to.
2672  * @tm_flags: Unsigned byte for task management flags.
2673  *
2674  * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2675  */
zfcp_fsf_fcp_task_mgmt(struct scsi_device * sdev,u8 tm_flags)2676 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2677 					    u8 tm_flags)
2678 {
2679 	struct zfcp_fsf_req *req = NULL;
2680 	struct fcp_cmnd *fcp_cmnd;
2681 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2682 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2683 
2684 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2685 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2686 		return NULL;
2687 
2688 	spin_lock_irq(&qdio->req_q_lock);
2689 	if (zfcp_qdio_sbal_get(qdio))
2690 		goto out;
2691 
2692 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2693 				  SBAL_SFLAGS0_TYPE_WRITE,
2694 				  qdio->adapter->pool.scsi_req);
2695 
2696 	if (IS_ERR(req)) {
2697 		req = NULL;
2698 		goto out;
2699 	}
2700 
2701 	req->data = sdev;
2702 
2703 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2704 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2705 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2706 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2707 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2708 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2709 
2710 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2711 
2712 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2713 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2714 
2715 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
2716 	if (!zfcp_fsf_req_send(req)) {
2717 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
2718 		goto out;
2719 	}
2720 
2721 	zfcp_fsf_req_free(req);
2722 	req = NULL;
2723 out:
2724 	spin_unlock_irq(&qdio->req_q_lock);
2725 	return req;
2726 }
2727 
2728 /**
2729  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2730  * @qdio: pointer to struct zfcp_qdio
2731  * @sbal_idx: response queue index of SBAL to be processed
2732  */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2733 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2734 {
2735 	struct zfcp_adapter *adapter = qdio->adapter;
2736 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2737 	struct qdio_buffer_element *sbale;
2738 	struct zfcp_fsf_req *fsf_req;
2739 	u64 req_id;
2740 	int idx;
2741 
2742 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2743 
2744 		sbale = &sbal->element[idx];
2745 		req_id = sbale->addr;
2746 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2747 
2748 		if (!fsf_req) {
2749 			/*
2750 			 * Unknown request means that we have potentially memory
2751 			 * corruption and must stop the machine immediately.
2752 			 */
2753 			zfcp_qdio_siosl(adapter);
2754 			panic("error: unknown req_id (%llx) on adapter %s.\n",
2755 			      req_id, dev_name(&adapter->ccw_device->dev));
2756 		}
2757 
2758 		zfcp_fsf_req_complete(fsf_req);
2759 
2760 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2761 			break;
2762 	}
2763 }
2764