xref: /openbmc/linux/drivers/s390/scsi/zfcp_dbf.c (revision 4f6cce39)
1 /*
2  * zfcp device driver
3  *
4  * Debug traces for zfcp.
5  *
6  * Copyright IBM Corp. 2002, 2016
7  */
8 
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/module.h>
13 #include <linux/ctype.h>
14 #include <linux/slab.h>
15 #include <asm/debug.h>
16 #include "zfcp_dbf.h"
17 #include "zfcp_ext.h"
18 #include "zfcp_fc.h"
19 
20 static u32 dbfsize = 4;
21 
22 module_param(dbfsize, uint, 0400);
23 MODULE_PARM_DESC(dbfsize,
24 		 "number of pages for each debug feature area (default 4)");
25 
26 static u32 dbflevel = 3;
27 
28 module_param(dbflevel, uint, 0400);
29 MODULE_PARM_DESC(dbflevel,
30 		 "log level for each debug feature area "
31 		 "(default 3, range 0..6)");
32 
33 static inline unsigned int zfcp_dbf_plen(unsigned int offset)
34 {
35 	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
36 }
37 
38 static inline
39 void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
40 		       u64 req_id)
41 {
42 	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
43 	u16 offset = 0, rec_length;
44 
45 	spin_lock(&dbf->pay_lock);
46 	memset(pl, 0, sizeof(*pl));
47 	pl->fsf_req_id = req_id;
48 	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
49 
50 	while (offset < length) {
51 		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
52 				 (u16) (length - offset));
53 		memcpy(pl->data, data + offset, rec_length);
54 		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
55 
56 		offset += rec_length;
57 		pl->counter++;
58 	}
59 
60 	spin_unlock(&dbf->pay_lock);
61 }
62 
63 /**
64  * zfcp_dbf_hba_fsf_res - trace event for fsf responses
65  * @tag: tag indicating which kind of unsolicited status has been received
66  * @req: request for which a response was received
67  */
68 void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
69 {
70 	struct zfcp_dbf *dbf = req->adapter->dbf;
71 	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
72 	struct fsf_qtcb_header *q_head = &req->qtcb->header;
73 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
74 	unsigned long flags;
75 
76 	spin_lock_irqsave(&dbf->hba_lock, flags);
77 	memset(rec, 0, sizeof(*rec));
78 
79 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
80 	rec->id = ZFCP_DBF_HBA_RES;
81 	rec->fsf_req_id = req->req_id;
82 	rec->fsf_req_status = req->status;
83 	rec->fsf_cmd = req->fsf_command;
84 	rec->fsf_seq_no = req->seq_no;
85 	rec->u.res.req_issued = req->issued;
86 	rec->u.res.prot_status = q_pref->prot_status;
87 	rec->u.res.fsf_status = q_head->fsf_status;
88 	rec->u.res.port_handle = q_head->port_handle;
89 	rec->u.res.lun_handle = q_head->lun_handle;
90 
91 	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
92 	       FSF_PROT_STATUS_QUAL_SIZE);
93 	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
94 	       FSF_STATUS_QUALIFIER_SIZE);
95 
96 	if (req->fsf_command != FSF_QTCB_FCP_CMND) {
97 		rec->pl_len = q_head->log_length;
98 		zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
99 				  rec->pl_len, "fsf_res", req->req_id);
100 	}
101 
102 	debug_event(dbf->hba, level, rec, sizeof(*rec));
103 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
104 }
105 
106 /**
107  * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
108  * @tag: tag indicating which kind of unsolicited status has been received
109  * @req: request providing the unsolicited status
110  */
111 void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
112 {
113 	struct zfcp_dbf *dbf = req->adapter->dbf;
114 	struct fsf_status_read_buffer *srb = req->data;
115 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
116 	unsigned long flags;
117 
118 	spin_lock_irqsave(&dbf->hba_lock, flags);
119 	memset(rec, 0, sizeof(*rec));
120 
121 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
122 	rec->id = ZFCP_DBF_HBA_USS;
123 	rec->fsf_req_id = req->req_id;
124 	rec->fsf_req_status = req->status;
125 	rec->fsf_cmd = req->fsf_command;
126 
127 	if (!srb)
128 		goto log;
129 
130 	rec->u.uss.status_type = srb->status_type;
131 	rec->u.uss.status_subtype = srb->status_subtype;
132 	rec->u.uss.d_id = ntoh24(srb->d_id);
133 	rec->u.uss.lun = srb->fcp_lun;
134 	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
135 	       sizeof(rec->u.uss.queue_designator));
136 
137 	/* status read buffer payload length */
138 	rec->pl_len = (!srb->length) ? 0 : srb->length -
139 			offsetof(struct fsf_status_read_buffer, payload);
140 
141 	if (rec->pl_len)
142 		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
143 				  "fsf_uss", req->req_id);
144 log:
145 	debug_event(dbf->hba, 2, rec, sizeof(*rec));
146 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
147 }
148 
149 /**
150  * zfcp_dbf_hba_bit_err - trace event for bit error conditions
151  * @tag: tag indicating which kind of unsolicited status has been received
152  * @req: request which caused the bit_error condition
153  */
154 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
155 {
156 	struct zfcp_dbf *dbf = req->adapter->dbf;
157 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
158 	struct fsf_status_read_buffer *sr_buf = req->data;
159 	unsigned long flags;
160 
161 	spin_lock_irqsave(&dbf->hba_lock, flags);
162 	memset(rec, 0, sizeof(*rec));
163 
164 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
165 	rec->id = ZFCP_DBF_HBA_BIT;
166 	rec->fsf_req_id = req->req_id;
167 	rec->fsf_req_status = req->status;
168 	rec->fsf_cmd = req->fsf_command;
169 	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
170 	       sizeof(struct fsf_bit_error_payload));
171 
172 	debug_event(dbf->hba, 1, rec, sizeof(*rec));
173 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
174 }
175 
176 /**
177  * zfcp_dbf_hba_def_err - trace event for deferred error messages
178  * @adapter: pointer to struct zfcp_adapter
179  * @req_id: request id which caused the deferred error message
180  * @scount: number of sbals incl. the signaling sbal
181  * @pl: array of all involved sbals
182  */
183 void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
184 			  void **pl)
185 {
186 	struct zfcp_dbf *dbf = adapter->dbf;
187 	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
188 	unsigned long flags;
189 	u16 length;
190 
191 	if (!pl)
192 		return;
193 
194 	spin_lock_irqsave(&dbf->pay_lock, flags);
195 	memset(payload, 0, sizeof(*payload));
196 
197 	memcpy(payload->area, "def_err", 7);
198 	payload->fsf_req_id = req_id;
199 	payload->counter = 0;
200 	length = min((u16)sizeof(struct qdio_buffer),
201 		     (u16)ZFCP_DBF_PAY_MAX_REC);
202 
203 	while (payload->counter < scount && (char *)pl[payload->counter]) {
204 		memcpy(payload->data, (char *)pl[payload->counter], length);
205 		debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
206 		payload->counter++;
207 	}
208 
209 	spin_unlock_irqrestore(&dbf->pay_lock, flags);
210 }
211 
212 /**
213  * zfcp_dbf_hba_basic - trace event for basic adapter events
214  * @adapter: pointer to struct zfcp_adapter
215  */
216 void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
217 {
218 	struct zfcp_dbf *dbf = adapter->dbf;
219 	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
220 	unsigned long flags;
221 
222 	spin_lock_irqsave(&dbf->hba_lock, flags);
223 	memset(rec, 0, sizeof(*rec));
224 
225 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
226 	rec->id = ZFCP_DBF_HBA_BASIC;
227 
228 	debug_event(dbf->hba, 1, rec, sizeof(*rec));
229 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
230 }
231 
232 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
233 				struct zfcp_adapter *adapter,
234 				struct zfcp_port *port,
235 				struct scsi_device *sdev)
236 {
237 	rec->adapter_status = atomic_read(&adapter->status);
238 	if (port) {
239 		rec->port_status = atomic_read(&port->status);
240 		rec->wwpn = port->wwpn;
241 		rec->d_id = port->d_id;
242 	}
243 	if (sdev) {
244 		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
245 		rec->lun = zfcp_scsi_dev_lun(sdev);
246 	} else
247 		rec->lun = ZFCP_DBF_INVALID_LUN;
248 }
249 
250 /**
251  * zfcp_dbf_rec_trig - trace event related to triggered recovery
252  * @tag: identifier for event
253  * @adapter: adapter on which the erp_action should run
254  * @port: remote port involved in the erp_action
255  * @sdev: scsi device involved in the erp_action
256  * @want: wanted erp_action
257  * @need: required erp_action
258  *
259  * The adapter->erp_lock has to be held.
260  */
261 void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
262 		       struct zfcp_port *port, struct scsi_device *sdev,
263 		       u8 want, u8 need)
264 {
265 	struct zfcp_dbf *dbf = adapter->dbf;
266 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
267 	struct list_head *entry;
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&dbf->rec_lock, flags);
271 	memset(rec, 0, sizeof(*rec));
272 
273 	rec->id = ZFCP_DBF_REC_TRIG;
274 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
275 	zfcp_dbf_set_common(rec, adapter, port, sdev);
276 
277 	list_for_each(entry, &adapter->erp_ready_head)
278 		rec->u.trig.ready++;
279 
280 	list_for_each(entry, &adapter->erp_running_head)
281 		rec->u.trig.running++;
282 
283 	rec->u.trig.want = want;
284 	rec->u.trig.need = need;
285 
286 	debug_event(dbf->rec, 1, rec, sizeof(*rec));
287 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
288 }
289 
290 
291 /**
292  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
293  * @level: trace level to be used for event
294  * @tag: identifier for event
295  * @erp: erp_action running
296  */
297 void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
298 {
299 	struct zfcp_dbf *dbf = erp->adapter->dbf;
300 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
301 	unsigned long flags;
302 
303 	spin_lock_irqsave(&dbf->rec_lock, flags);
304 	memset(rec, 0, sizeof(*rec));
305 
306 	rec->id = ZFCP_DBF_REC_RUN;
307 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
308 	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
309 
310 	rec->u.run.fsf_req_id = erp->fsf_req_id;
311 	rec->u.run.rec_status = erp->status;
312 	rec->u.run.rec_step = erp->step;
313 	rec->u.run.rec_action = erp->action;
314 
315 	if (erp->sdev)
316 		rec->u.run.rec_count =
317 			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
318 	else if (erp->port)
319 		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
320 	else
321 		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
322 
323 	debug_event(dbf->rec, level, rec, sizeof(*rec));
324 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
325 }
326 
327 /**
328  * zfcp_dbf_rec_run - trace event related to running recovery
329  * @tag: identifier for event
330  * @erp: erp_action running
331  */
332 void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
333 {
334 	zfcp_dbf_rec_run_lvl(1, tag, erp);
335 }
336 
337 /**
338  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
339  * @tag: identifier for event
340  * @wka_port: well known address port
341  * @req_id: request ID to correlate with potential HBA trace record
342  */
343 void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
344 			  u64 req_id)
345 {
346 	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
347 	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
348 	unsigned long flags;
349 
350 	spin_lock_irqsave(&dbf->rec_lock, flags);
351 	memset(rec, 0, sizeof(*rec));
352 
353 	rec->id = ZFCP_DBF_REC_RUN;
354 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
355 	rec->port_status = wka_port->status;
356 	rec->d_id = wka_port->d_id;
357 	rec->lun = ZFCP_DBF_INVALID_LUN;
358 
359 	rec->u.run.fsf_req_id = req_id;
360 	rec->u.run.rec_status = ~0;
361 	rec->u.run.rec_step = ~0;
362 	rec->u.run.rec_action = ~0;
363 	rec->u.run.rec_count = ~0;
364 
365 	debug_event(dbf->rec, 1, rec, sizeof(*rec));
366 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
367 }
368 
369 static inline
370 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
371 		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
372 		  u64 req_id, u32 d_id, u16 cap_len)
373 {
374 	struct zfcp_dbf_san *rec = &dbf->san_buf;
375 	u16 rec_len;
376 	unsigned long flags;
377 	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
378 	u16 pay_sum = 0;
379 
380 	spin_lock_irqsave(&dbf->san_lock, flags);
381 	memset(rec, 0, sizeof(*rec));
382 
383 	rec->id = id;
384 	rec->fsf_req_id = req_id;
385 	rec->d_id = d_id;
386 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
387 	rec->pl_len = len; /* full length even if we cap pay below */
388 	if (!sg)
389 		goto out;
390 	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
391 	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
392 	if (len <= rec_len)
393 		goto out; /* skip pay record if full content in rec->payload */
394 
395 	/* if (len > rec_len):
396 	 * dump data up to cap_len ignoring small duplicate in rec->payload
397 	 */
398 	spin_lock(&dbf->pay_lock);
399 	memset(payload, 0, sizeof(*payload));
400 	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
401 	payload->fsf_req_id = req_id;
402 	payload->counter = 0;
403 	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
404 		u16 pay_len, offset = 0;
405 
406 		while (offset < sg->length && pay_sum < cap_len) {
407 			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
408 				      (u16)(sg->length - offset));
409 			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
410 			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
411 			debug_event(dbf->pay, 1, payload,
412 				    zfcp_dbf_plen(pay_len));
413 			payload->counter++;
414 			offset += pay_len;
415 			pay_sum += pay_len;
416 		}
417 	}
418 	spin_unlock(&dbf->pay_lock);
419 
420 out:
421 	debug_event(dbf->san, 1, rec, sizeof(*rec));
422 	spin_unlock_irqrestore(&dbf->san_lock, flags);
423 }
424 
425 /**
426  * zfcp_dbf_san_req - trace event for issued SAN request
427  * @tag: identifier for event
428  * @fsf_req: request containing issued CT data
429  * d_id: destination ID
430  */
431 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
432 {
433 	struct zfcp_dbf *dbf = fsf->adapter->dbf;
434 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
435 	u16 length;
436 
437 	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
438 	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
439 		     length, fsf->req_id, d_id, length);
440 }
441 
442 static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
443 					      struct zfcp_fsf_req *fsf,
444 					      u16 len)
445 {
446 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
447 	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
448 	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
449 	struct scatterlist *resp_entry = ct_els->resp;
450 	struct fc_gpn_ft_resp *acc;
451 	int max_entries, x, last = 0;
452 
453 	if (!(memcmp(tag, "fsscth2", 7) == 0
454 	      && ct_els->d_id == FC_FID_DIR_SERV
455 	      && reqh->ct_rev == FC_CT_REV
456 	      && reqh->ct_in_id[0] == 0
457 	      && reqh->ct_in_id[1] == 0
458 	      && reqh->ct_in_id[2] == 0
459 	      && reqh->ct_fs_type == FC_FST_DIR
460 	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
461 	      && reqh->ct_options == 0
462 	      && reqh->_ct_resvd1 == 0
463 	      && reqh->ct_cmd == FC_NS_GPN_FT
464 	      /* reqh->ct_mr_size can vary so do not match but read below */
465 	      && reqh->_ct_resvd2 == 0
466 	      && reqh->ct_reason == 0
467 	      && reqh->ct_explan == 0
468 	      && reqh->ct_vendor == 0
469 	      && reqn->fn_resvd == 0
470 	      && reqn->fn_domain_id_scope == 0
471 	      && reqn->fn_area_id_scope == 0
472 	      && reqn->fn_fc4_type == FC_TYPE_FCP))
473 		return len; /* not GPN_FT response so do not cap */
474 
475 	acc = sg_virt(resp_entry);
476 	max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
477 		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
478 		     * to account for header as 1st pseudo "entry" */;
479 
480 	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
481 	 * response, allowing us to skip special handling for it - just skip it
482 	 */
483 	for (x = 1; x < max_entries && !last; x++) {
484 		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
485 			acc++;
486 		else
487 			acc = sg_virt(++resp_entry);
488 
489 		last = acc->fp_flags & FC_NS_FID_LAST;
490 	}
491 	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
492 	return len; /* cap after last entry */
493 }
494 
495 /**
496  * zfcp_dbf_san_res - trace event for received SAN request
497  * @tag: identifier for event
498  * @fsf_req: request containing issued CT data
499  */
500 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
501 {
502 	struct zfcp_dbf *dbf = fsf->adapter->dbf;
503 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
504 	u16 length;
505 
506 	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
507 	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
508 		     length, fsf->req_id, ct_els->d_id,
509 		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
510 }
511 
512 /**
513  * zfcp_dbf_san_in_els - trace event for incoming ELS
514  * @tag: identifier for event
515  * @fsf_req: request containing issued CT data
516  */
517 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
518 {
519 	struct zfcp_dbf *dbf = fsf->adapter->dbf;
520 	struct fsf_status_read_buffer *srb =
521 		(struct fsf_status_read_buffer *) fsf->data;
522 	u16 length;
523 	struct scatterlist sg;
524 
525 	length = (u16)(srb->length -
526 			offsetof(struct fsf_status_read_buffer, payload));
527 	sg_init_one(&sg, srb->payload.data, length);
528 	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
529 		     fsf->req_id, ntoh24(srb->d_id), length);
530 }
531 
532 /**
533  * zfcp_dbf_scsi - trace event for scsi commands
534  * @tag: identifier for event
535  * @sc: pointer to struct scsi_cmnd
536  * @fsf: pointer to struct zfcp_fsf_req
537  */
538 void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
539 		   struct zfcp_fsf_req *fsf)
540 {
541 	struct zfcp_adapter *adapter =
542 		(struct zfcp_adapter *) sc->device->host->hostdata[0];
543 	struct zfcp_dbf *dbf = adapter->dbf;
544 	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
545 	struct fcp_resp_with_ext *fcp_rsp;
546 	struct fcp_resp_rsp_info *fcp_rsp_info;
547 	unsigned long flags;
548 
549 	spin_lock_irqsave(&dbf->scsi_lock, flags);
550 	memset(rec, 0, sizeof(*rec));
551 
552 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
553 	rec->id = ZFCP_DBF_SCSI_CMND;
554 	rec->scsi_result = sc->result;
555 	rec->scsi_retries = sc->retries;
556 	rec->scsi_allowed = sc->allowed;
557 	rec->scsi_id = sc->device->id;
558 	/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
559 	rec->scsi_lun = (u32)sc->device->lun;
560 	rec->host_scribble = (unsigned long)sc->host_scribble;
561 
562 	memcpy(rec->scsi_opcode, sc->cmnd,
563 	       min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
564 
565 	if (fsf) {
566 		rec->fsf_req_id = fsf->req_id;
567 		fcp_rsp = (struct fcp_resp_with_ext *)
568 				&(fsf->qtcb->bottom.io.fcp_rsp);
569 		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
570 		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
571 			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
572 			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
573 		}
574 		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
575 			rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
576 					  (u16)ZFCP_DBF_PAY_MAX_REC);
577 			zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
578 					  "fcp_sns", fsf->req_id);
579 		}
580 	}
581 
582 	debug_event(dbf->scsi, level, rec, sizeof(*rec));
583 	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
584 }
585 
586 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
587 {
588 	struct debug_info *d;
589 
590 	d = debug_register(name, size, 1, rec_size);
591 	if (!d)
592 		return NULL;
593 
594 	debug_register_view(d, &debug_hex_ascii_view);
595 	debug_set_level(d, dbflevel);
596 
597 	return d;
598 }
599 
600 static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
601 {
602 	if (!dbf)
603 		return;
604 
605 	debug_unregister(dbf->scsi);
606 	debug_unregister(dbf->san);
607 	debug_unregister(dbf->hba);
608 	debug_unregister(dbf->pay);
609 	debug_unregister(dbf->rec);
610 	kfree(dbf);
611 }
612 
613 /**
614  * zfcp_adapter_debug_register - registers debug feature for an adapter
615  * @adapter: pointer to adapter for which debug features should be registered
616  * return: -ENOMEM on error, 0 otherwise
617  */
618 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
619 {
620 	char name[DEBUG_MAX_NAME_LEN];
621 	struct zfcp_dbf *dbf;
622 
623 	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
624 	if (!dbf)
625 		return -ENOMEM;
626 
627 	spin_lock_init(&dbf->pay_lock);
628 	spin_lock_init(&dbf->hba_lock);
629 	spin_lock_init(&dbf->san_lock);
630 	spin_lock_init(&dbf->scsi_lock);
631 	spin_lock_init(&dbf->rec_lock);
632 
633 	/* debug feature area which records recovery activity */
634 	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
635 	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
636 	if (!dbf->rec)
637 		goto err_out;
638 
639 	/* debug feature area which records HBA (FSF and QDIO) conditions */
640 	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
641 	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
642 	if (!dbf->hba)
643 		goto err_out;
644 
645 	/* debug feature area which records payload info */
646 	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
647 	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
648 	if (!dbf->pay)
649 		goto err_out;
650 
651 	/* debug feature area which records SAN command failures and recovery */
652 	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
653 	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
654 	if (!dbf->san)
655 		goto err_out;
656 
657 	/* debug feature area which records SCSI command failures and recovery */
658 	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
659 	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
660 	if (!dbf->scsi)
661 		goto err_out;
662 
663 	adapter->dbf = dbf;
664 
665 	return 0;
666 err_out:
667 	zfcp_dbf_unregister(dbf);
668 	return -ENOMEM;
669 }
670 
671 /**
672  * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
673  * @adapter: pointer to adapter for which debug features should be unregistered
674  */
675 void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
676 {
677 	struct zfcp_dbf *dbf = adapter->dbf;
678 
679 	adapter->dbf = NULL;
680 	zfcp_dbf_unregister(dbf);
681 }
682 
683