xref: /openbmc/linux/drivers/s390/scsi/zfcp_dbf.c (revision 82e6fdd6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Debug traces for zfcp.
6  *
7  * Copyright IBM Corp. 2002, 2017
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/module.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <asm/debug.h>
17 #include "zfcp_dbf.h"
18 #include "zfcp_ext.h"
19 #include "zfcp_fc.h"
20 
21 static u32 dbfsize = 4;
22 
23 module_param(dbfsize, uint, 0400);
24 MODULE_PARM_DESC(dbfsize,
25 		 "number of pages for each debug feature area (default 4)");
26 
27 static u32 dbflevel = 3;
28 
29 module_param(dbflevel, uint, 0400);
30 MODULE_PARM_DESC(dbflevel,
31 		 "log level for each debug feature area "
32 		 "(default 3, range 0..6)");
33 
34 static inline unsigned int zfcp_dbf_plen(unsigned int offset)
35 {
36 	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
37 }
38 
39 static inline
40 void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
41 		       u64 req_id)
42 {
43 	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
44 	u16 offset = 0, rec_length;
45 
46 	spin_lock(&dbf->pay_lock);
47 	memset(pl, 0, sizeof(*pl));
48 	pl->fsf_req_id = req_id;
49 	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
50 
51 	while (offset < length) {
52 		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
53 				 (u16) (length - offset));
54 		memcpy(pl->data, data + offset, rec_length);
55 		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
56 
57 		offset += rec_length;
58 		pl->counter++;
59 	}
60 
61 	spin_unlock(&dbf->pay_lock);
62 }
63 
64 /**
65  * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66  * @tag: tag indicating which kind of unsolicited status has been received
67  * @req: request for which a response was received
68  */
69 void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
70 {
71 	struct zfcp_dbf *dbf = req->adapter->dbf;
72 	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
73 	struct fsf_qtcb_header *q_head = &req->qtcb->header;
74 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
75 	unsigned long flags;
76 
77 	spin_lock_irqsave(&dbf->hba_lock, flags);
78 	memset(rec, 0, sizeof(*rec));
79 
80 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
81 	rec->id = ZFCP_DBF_HBA_RES;
82 	rec->fsf_req_id = req->req_id;
83 	rec->fsf_req_status = req->status;
84 	rec->fsf_cmd = req->fsf_command;
85 	rec->fsf_seq_no = req->seq_no;
86 	rec->u.res.req_issued = req->issued;
87 	rec->u.res.prot_status = q_pref->prot_status;
88 	rec->u.res.fsf_status = q_head->fsf_status;
89 	rec->u.res.port_handle = q_head->port_handle;
90 	rec->u.res.lun_handle = q_head->lun_handle;
91 
92 	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
93 	       FSF_PROT_STATUS_QUAL_SIZE);
94 	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
95 	       FSF_STATUS_QUALIFIER_SIZE);
96 
97 	if (req->fsf_command != FSF_QTCB_FCP_CMND) {
98 		rec->pl_len = q_head->log_length;
99 		zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
100 				  rec->pl_len, "fsf_res", req->req_id);
101 	}
102 
103 	debug_event(dbf->hba, level, rec, sizeof(*rec));
104 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
105 }
106 
107 /**
108  * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
109  * @tag: tag indicating which kind of unsolicited status has been received
110  * @req: request providing the unsolicited status
111  */
112 void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
113 {
114 	struct zfcp_dbf *dbf = req->adapter->dbf;
115 	struct fsf_status_read_buffer *srb = req->data;
116 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
117 	static int const level = 2;
118 	unsigned long flags;
119 
120 	if (unlikely(!debug_level_enabled(dbf->hba, level)))
121 		return;
122 
123 	spin_lock_irqsave(&dbf->hba_lock, flags);
124 	memset(rec, 0, sizeof(*rec));
125 
126 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
127 	rec->id = ZFCP_DBF_HBA_USS;
128 	rec->fsf_req_id = req->req_id;
129 	rec->fsf_req_status = req->status;
130 	rec->fsf_cmd = req->fsf_command;
131 
132 	if (!srb)
133 		goto log;
134 
135 	rec->u.uss.status_type = srb->status_type;
136 	rec->u.uss.status_subtype = srb->status_subtype;
137 	rec->u.uss.d_id = ntoh24(srb->d_id);
138 	rec->u.uss.lun = srb->fcp_lun;
139 	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
140 	       sizeof(rec->u.uss.queue_designator));
141 
142 	/* status read buffer payload length */
143 	rec->pl_len = (!srb->length) ? 0 : srb->length -
144 			offsetof(struct fsf_status_read_buffer, payload);
145 
146 	if (rec->pl_len)
147 		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
148 				  "fsf_uss", req->req_id);
149 log:
150 	debug_event(dbf->hba, level, rec, sizeof(*rec));
151 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
152 }
153 
154 /**
155  * zfcp_dbf_hba_bit_err - trace event for bit error conditions
156  * @tag: tag indicating which kind of unsolicited status has been received
157  * @req: request which caused the bit_error condition
158  */
159 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
160 {
161 	struct zfcp_dbf *dbf = req->adapter->dbf;
162 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
163 	struct fsf_status_read_buffer *sr_buf = req->data;
164 	static int const level = 1;
165 	unsigned long flags;
166 
167 	if (unlikely(!debug_level_enabled(dbf->hba, level)))
168 		return;
169 
170 	spin_lock_irqsave(&dbf->hba_lock, flags);
171 	memset(rec, 0, sizeof(*rec));
172 
173 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
174 	rec->id = ZFCP_DBF_HBA_BIT;
175 	rec->fsf_req_id = req->req_id;
176 	rec->fsf_req_status = req->status;
177 	rec->fsf_cmd = req->fsf_command;
178 	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
179 	       sizeof(struct fsf_bit_error_payload));
180 
181 	debug_event(dbf->hba, level, rec, sizeof(*rec));
182 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
183 }
184 
185 /**
186  * zfcp_dbf_hba_def_err - trace event for deferred error messages
187  * @adapter: pointer to struct zfcp_adapter
188  * @req_id: request id which caused the deferred error message
189  * @scount: number of sbals incl. the signaling sbal
190  * @pl: array of all involved sbals
191  */
192 void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
193 			  void **pl)
194 {
195 	struct zfcp_dbf *dbf = adapter->dbf;
196 	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
197 	unsigned long flags;
198 	static int const level = 1;
199 	u16 length;
200 
201 	if (unlikely(!debug_level_enabled(dbf->pay, level)))
202 		return;
203 
204 	if (!pl)
205 		return;
206 
207 	spin_lock_irqsave(&dbf->pay_lock, flags);
208 	memset(payload, 0, sizeof(*payload));
209 
210 	memcpy(payload->area, "def_err", 7);
211 	payload->fsf_req_id = req_id;
212 	payload->counter = 0;
213 	length = min((u16)sizeof(struct qdio_buffer),
214 		     (u16)ZFCP_DBF_PAY_MAX_REC);
215 
216 	while (payload->counter < scount && (char *)pl[payload->counter]) {
217 		memcpy(payload->data, (char *)pl[payload->counter], length);
218 		debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
219 		payload->counter++;
220 	}
221 
222 	spin_unlock_irqrestore(&dbf->pay_lock, flags);
223 }
224 
225 /**
226  * zfcp_dbf_hba_basic - trace event for basic adapter events
227  * @adapter: pointer to struct zfcp_adapter
228  */
229 void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
230 {
231 	struct zfcp_dbf *dbf = adapter->dbf;
232 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
233 	static int const level = 1;
234 	unsigned long flags;
235 
236 	if (unlikely(!debug_level_enabled(dbf->hba, level)))
237 		return;
238 
239 	spin_lock_irqsave(&dbf->hba_lock, flags);
240 	memset(rec, 0, sizeof(*rec));
241 
242 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
243 	rec->id = ZFCP_DBF_HBA_BASIC;
244 
245 	debug_event(dbf->hba, level, rec, sizeof(*rec));
246 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
247 }
248 
249 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
250 				struct zfcp_adapter *adapter,
251 				struct zfcp_port *port,
252 				struct scsi_device *sdev)
253 {
254 	rec->adapter_status = atomic_read(&adapter->status);
255 	if (port) {
256 		rec->port_status = atomic_read(&port->status);
257 		rec->wwpn = port->wwpn;
258 		rec->d_id = port->d_id;
259 	}
260 	if (sdev) {
261 		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
262 		rec->lun = zfcp_scsi_dev_lun(sdev);
263 	} else
264 		rec->lun = ZFCP_DBF_INVALID_LUN;
265 }
266 
267 /**
268  * zfcp_dbf_rec_trig - trace event related to triggered recovery
269  * @tag: identifier for event
270  * @adapter: adapter on which the erp_action should run
271  * @port: remote port involved in the erp_action
272  * @sdev: scsi device involved in the erp_action
273  * @want: wanted erp_action
274  * @need: required erp_action
275  *
276  * The adapter->erp_lock has to be held.
277  */
278 void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
279 		       struct zfcp_port *port, struct scsi_device *sdev,
280 		       u8 want, u8 need)
281 {
282 	struct zfcp_dbf *dbf = adapter->dbf;
283 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
284 	static int const level = 1;
285 	struct list_head *entry;
286 	unsigned long flags;
287 
288 	if (unlikely(!debug_level_enabled(dbf->rec, level)))
289 		return;
290 
291 	spin_lock_irqsave(&dbf->rec_lock, flags);
292 	memset(rec, 0, sizeof(*rec));
293 
294 	rec->id = ZFCP_DBF_REC_TRIG;
295 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
296 	zfcp_dbf_set_common(rec, adapter, port, sdev);
297 
298 	list_for_each(entry, &adapter->erp_ready_head)
299 		rec->u.trig.ready++;
300 
301 	list_for_each(entry, &adapter->erp_running_head)
302 		rec->u.trig.running++;
303 
304 	rec->u.trig.want = want;
305 	rec->u.trig.need = need;
306 
307 	debug_event(dbf->rec, level, rec, sizeof(*rec));
308 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
309 }
310 
311 
312 /**
313  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
314  * @level: trace level to be used for event
315  * @tag: identifier for event
316  * @erp: erp_action running
317  */
318 void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
319 {
320 	struct zfcp_dbf *dbf = erp->adapter->dbf;
321 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
322 	unsigned long flags;
323 
324 	if (!debug_level_enabled(dbf->rec, level))
325 		return;
326 
327 	spin_lock_irqsave(&dbf->rec_lock, flags);
328 	memset(rec, 0, sizeof(*rec));
329 
330 	rec->id = ZFCP_DBF_REC_RUN;
331 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
332 	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
333 
334 	rec->u.run.fsf_req_id = erp->fsf_req_id;
335 	rec->u.run.rec_status = erp->status;
336 	rec->u.run.rec_step = erp->step;
337 	rec->u.run.rec_action = erp->action;
338 
339 	if (erp->sdev)
340 		rec->u.run.rec_count =
341 			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
342 	else if (erp->port)
343 		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
344 	else
345 		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
346 
347 	debug_event(dbf->rec, level, rec, sizeof(*rec));
348 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
349 }
350 
351 /**
352  * zfcp_dbf_rec_run - trace event related to running recovery
353  * @tag: identifier for event
354  * @erp: erp_action running
355  */
356 void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
357 {
358 	zfcp_dbf_rec_run_lvl(1, tag, erp);
359 }
360 
361 /**
362  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
363  * @tag: identifier for event
364  * @wka_port: well known address port
365  * @req_id: request ID to correlate with potential HBA trace record
366  */
367 void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
368 			  u64 req_id)
369 {
370 	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
371 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
372 	static int const level = 1;
373 	unsigned long flags;
374 
375 	if (unlikely(!debug_level_enabled(dbf->rec, level)))
376 		return;
377 
378 	spin_lock_irqsave(&dbf->rec_lock, flags);
379 	memset(rec, 0, sizeof(*rec));
380 
381 	rec->id = ZFCP_DBF_REC_RUN;
382 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
383 	rec->port_status = wka_port->status;
384 	rec->d_id = wka_port->d_id;
385 	rec->lun = ZFCP_DBF_INVALID_LUN;
386 
387 	rec->u.run.fsf_req_id = req_id;
388 	rec->u.run.rec_status = ~0;
389 	rec->u.run.rec_step = ~0;
390 	rec->u.run.rec_action = ~0;
391 	rec->u.run.rec_count = ~0;
392 
393 	debug_event(dbf->rec, level, rec, sizeof(*rec));
394 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
395 }
396 
397 #define ZFCP_DBF_SAN_LEVEL 1
398 
399 static inline
400 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
401 		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
402 		  u64 req_id, u32 d_id, u16 cap_len)
403 {
404 	struct zfcp_dbf_san *rec = &dbf->san_buf;
405 	u16 rec_len;
406 	unsigned long flags;
407 	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
408 	u16 pay_sum = 0;
409 
410 	spin_lock_irqsave(&dbf->san_lock, flags);
411 	memset(rec, 0, sizeof(*rec));
412 
413 	rec->id = id;
414 	rec->fsf_req_id = req_id;
415 	rec->d_id = d_id;
416 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
417 	rec->pl_len = len; /* full length even if we cap pay below */
418 	if (!sg)
419 		goto out;
420 	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
421 	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
422 	if (len <= rec_len)
423 		goto out; /* skip pay record if full content in rec->payload */
424 
425 	/* if (len > rec_len):
426 	 * dump data up to cap_len ignoring small duplicate in rec->payload
427 	 */
428 	spin_lock(&dbf->pay_lock);
429 	memset(payload, 0, sizeof(*payload));
430 	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
431 	payload->fsf_req_id = req_id;
432 	payload->counter = 0;
433 	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
434 		u16 pay_len, offset = 0;
435 
436 		while (offset < sg->length && pay_sum < cap_len) {
437 			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
438 				      (u16)(sg->length - offset));
439 			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
440 			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
441 			debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
442 				    zfcp_dbf_plen(pay_len));
443 			payload->counter++;
444 			offset += pay_len;
445 			pay_sum += pay_len;
446 		}
447 	}
448 	spin_unlock(&dbf->pay_lock);
449 
450 out:
451 	debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
452 	spin_unlock_irqrestore(&dbf->san_lock, flags);
453 }
454 
455 /**
456  * zfcp_dbf_san_req - trace event for issued SAN request
457  * @tag: identifier for event
458  * @fsf_req: request containing issued CT data
459  * d_id: destination ID
460  */
461 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
462 {
463 	struct zfcp_dbf *dbf = fsf->adapter->dbf;
464 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
465 	u16 length;
466 
467 	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
468 		return;
469 
470 	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
471 	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
472 		     length, fsf->req_id, d_id, length);
473 }
474 
475 static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
476 					      struct zfcp_fsf_req *fsf,
477 					      u16 len)
478 {
479 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
480 	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
481 	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
482 	struct scatterlist *resp_entry = ct_els->resp;
483 	struct fc_ct_hdr *resph;
484 	struct fc_gpn_ft_resp *acc;
485 	int max_entries, x, last = 0;
486 
487 	if (!(memcmp(tag, "fsscth2", 7) == 0
488 	      && ct_els->d_id == FC_FID_DIR_SERV
489 	      && reqh->ct_rev == FC_CT_REV
490 	      && reqh->ct_in_id[0] == 0
491 	      && reqh->ct_in_id[1] == 0
492 	      && reqh->ct_in_id[2] == 0
493 	      && reqh->ct_fs_type == FC_FST_DIR
494 	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
495 	      && reqh->ct_options == 0
496 	      && reqh->_ct_resvd1 == 0
497 	      && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
498 	      /* reqh->ct_mr_size can vary so do not match but read below */
499 	      && reqh->_ct_resvd2 == 0
500 	      && reqh->ct_reason == 0
501 	      && reqh->ct_explan == 0
502 	      && reqh->ct_vendor == 0
503 	      && reqn->fn_resvd == 0
504 	      && reqn->fn_domain_id_scope == 0
505 	      && reqn->fn_area_id_scope == 0
506 	      && reqn->fn_fc4_type == FC_TYPE_FCP))
507 		return len; /* not GPN_FT response so do not cap */
508 
509 	acc = sg_virt(resp_entry);
510 
511 	/* cap all but accept CT responses to at least the CT header */
512 	resph = (struct fc_ct_hdr *)acc;
513 	if ((ct_els->status) ||
514 	    (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
515 		return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
516 
517 	max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
518 		       sizeof(struct fc_gpn_ft_resp))
519 		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
520 		     * to account for header as 1st pseudo "entry" */;
521 
522 	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
523 	 * response, allowing us to skip special handling for it - just skip it
524 	 */
525 	for (x = 1; x < max_entries && !last; x++) {
526 		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
527 			acc++;
528 		else
529 			acc = sg_virt(++resp_entry);
530 
531 		last = acc->fp_flags & FC_NS_FID_LAST;
532 	}
533 	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
534 	return len; /* cap after last entry */
535 }
536 
537 /**
538  * zfcp_dbf_san_res - trace event for received SAN request
539  * @tag: identifier for event
540  * @fsf_req: request containing issued CT data
541  */
542 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
543 {
544 	struct zfcp_dbf *dbf = fsf->adapter->dbf;
545 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
546 	u16 length;
547 
548 	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
549 		return;
550 
551 	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
552 	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
553 		     length, fsf->req_id, ct_els->d_id,
554 		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
555 }
556 
557 /**
558  * zfcp_dbf_san_in_els - trace event for incoming ELS
559  * @tag: identifier for event
560  * @fsf_req: request containing issued CT data
561  */
562 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
563 {
564 	struct zfcp_dbf *dbf = fsf->adapter->dbf;
565 	struct fsf_status_read_buffer *srb =
566 		(struct fsf_status_read_buffer *) fsf->data;
567 	u16 length;
568 	struct scatterlist sg;
569 
570 	if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
571 		return;
572 
573 	length = (u16)(srb->length -
574 			offsetof(struct fsf_status_read_buffer, payload));
575 	sg_init_one(&sg, srb->payload.data, length);
576 	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
577 		     fsf->req_id, ntoh24(srb->d_id), length);
578 }
579 
580 /**
581  * zfcp_dbf_scsi - trace event for scsi commands
582  * @tag: identifier for event
583  * @sc: pointer to struct scsi_cmnd
584  * @fsf: pointer to struct zfcp_fsf_req
585  */
586 void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
587 		   struct zfcp_fsf_req *fsf)
588 {
589 	struct zfcp_adapter *adapter =
590 		(struct zfcp_adapter *) sc->device->host->hostdata[0];
591 	struct zfcp_dbf *dbf = adapter->dbf;
592 	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
593 	struct fcp_resp_with_ext *fcp_rsp;
594 	struct fcp_resp_rsp_info *fcp_rsp_info;
595 	unsigned long flags;
596 
597 	spin_lock_irqsave(&dbf->scsi_lock, flags);
598 	memset(rec, 0, sizeof(*rec));
599 
600 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
601 	rec->id = ZFCP_DBF_SCSI_CMND;
602 	rec->scsi_result = sc->result;
603 	rec->scsi_retries = sc->retries;
604 	rec->scsi_allowed = sc->allowed;
605 	rec->scsi_id = sc->device->id;
606 	rec->scsi_lun = (u32)sc->device->lun;
607 	rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
608 	rec->host_scribble = (unsigned long)sc->host_scribble;
609 
610 	memcpy(rec->scsi_opcode, sc->cmnd,
611 	       min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
612 
613 	if (fsf) {
614 		rec->fsf_req_id = fsf->req_id;
615 		rec->pl_len = FCP_RESP_WITH_EXT;
616 		fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
617 		/* mandatory parts of FCP_RSP IU in this SCSI record */
618 		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
619 		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
620 			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
621 			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
622 			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
623 		}
624 		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
625 			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
626 		}
627 		/* complete FCP_RSP IU in associated PAYload record
628 		 * but only if there are optional parts
629 		 */
630 		if (fcp_rsp->resp.fr_flags != 0)
631 			zfcp_dbf_pl_write(
632 				dbf, fcp_rsp,
633 				/* at least one full PAY record
634 				 * but not beyond hardware response field
635 				 */
636 				min_t(u16, max_t(u16, rec->pl_len,
637 						 ZFCP_DBF_PAY_MAX_REC),
638 				      FSF_FCP_RSP_SIZE),
639 				"fcp_riu", fsf->req_id);
640 	}
641 
642 	debug_event(dbf->scsi, level, rec, sizeof(*rec));
643 	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
644 }
645 
646 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
647 {
648 	struct debug_info *d;
649 
650 	d = debug_register(name, size, 1, rec_size);
651 	if (!d)
652 		return NULL;
653 
654 	debug_register_view(d, &debug_hex_ascii_view);
655 	debug_set_level(d, dbflevel);
656 
657 	return d;
658 }
659 
660 static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
661 {
662 	if (!dbf)
663 		return;
664 
665 	debug_unregister(dbf->scsi);
666 	debug_unregister(dbf->san);
667 	debug_unregister(dbf->hba);
668 	debug_unregister(dbf->pay);
669 	debug_unregister(dbf->rec);
670 	kfree(dbf);
671 }
672 
673 /**
674  * zfcp_adapter_debug_register - registers debug feature for an adapter
675  * @adapter: pointer to adapter for which debug features should be registered
676  * return: -ENOMEM on error, 0 otherwise
677  */
678 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
679 {
680 	char name[DEBUG_MAX_NAME_LEN];
681 	struct zfcp_dbf *dbf;
682 
683 	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
684 	if (!dbf)
685 		return -ENOMEM;
686 
687 	spin_lock_init(&dbf->pay_lock);
688 	spin_lock_init(&dbf->hba_lock);
689 	spin_lock_init(&dbf->san_lock);
690 	spin_lock_init(&dbf->scsi_lock);
691 	spin_lock_init(&dbf->rec_lock);
692 
693 	/* debug feature area which records recovery activity */
694 	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
695 	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
696 	if (!dbf->rec)
697 		goto err_out;
698 
699 	/* debug feature area which records HBA (FSF and QDIO) conditions */
700 	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
701 	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
702 	if (!dbf->hba)
703 		goto err_out;
704 
705 	/* debug feature area which records payload info */
706 	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
707 	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
708 	if (!dbf->pay)
709 		goto err_out;
710 
711 	/* debug feature area which records SAN command failures and recovery */
712 	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
713 	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
714 	if (!dbf->san)
715 		goto err_out;
716 
717 	/* debug feature area which records SCSI command failures and recovery */
718 	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
719 	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
720 	if (!dbf->scsi)
721 		goto err_out;
722 
723 	adapter->dbf = dbf;
724 
725 	return 0;
726 err_out:
727 	zfcp_dbf_unregister(dbf);
728 	return -ENOMEM;
729 }
730 
731 /**
732  * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
733  * @adapter: pointer to adapter for which debug features should be unregistered
734  */
735 void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
736 {
737 	struct zfcp_dbf *dbf = adapter->dbf;
738 
739 	adapter->dbf = NULL;
740 	zfcp_dbf_unregister(dbf);
741 }
742 
743