1 /* 2 * zfcp device driver 3 * 4 * Debug traces for zfcp. 5 * 6 * Copyright IBM Corp. 2002, 2010 7 */ 8 9 #define KMSG_COMPONENT "zfcp" 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 12 #include <linux/module.h> 13 #include <linux/ctype.h> 14 #include <linux/slab.h> 15 #include <asm/debug.h> 16 #include "zfcp_dbf.h" 17 #include "zfcp_ext.h" 18 #include "zfcp_fc.h" 19 20 static u32 dbfsize = 4; 21 22 module_param(dbfsize, uint, 0400); 23 MODULE_PARM_DESC(dbfsize, 24 "number of pages for each debug feature area (default 4)"); 25 26 static inline unsigned int zfcp_dbf_plen(unsigned int offset) 27 { 28 return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC; 29 } 30 31 static inline 32 void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, 33 u64 req_id) 34 { 35 struct zfcp_dbf_pay *pl = &dbf->pay_buf; 36 u16 offset = 0, rec_length; 37 38 spin_lock(&dbf->pay_lock); 39 memset(pl, 0, sizeof(*pl)); 40 pl->fsf_req_id = req_id; 41 memcpy(pl->area, area, ZFCP_DBF_TAG_LEN); 42 43 while (offset < length) { 44 rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC, 45 (u16) (length - offset)); 46 memcpy(pl->data, data + offset, rec_length); 47 debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length)); 48 49 offset += rec_length; 50 pl->counter++; 51 } 52 53 spin_unlock(&dbf->pay_lock); 54 } 55 56 /** 57 * zfcp_dbf_hba_fsf_res - trace event for fsf responses 58 * @tag: tag indicating which kind of unsolicited status has been received 59 * @req: request for which a response was received 60 */ 61 void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) 62 { 63 struct zfcp_dbf *dbf = req->adapter->dbf; 64 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; 65 struct fsf_qtcb_header *q_head = &req->qtcb->header; 66 struct zfcp_dbf_hba *rec = &dbf->hba_buf; 67 unsigned long flags; 68 69 spin_lock_irqsave(&dbf->hba_lock, flags); 70 memset(rec, 0, sizeof(*rec)); 71 72 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 73 rec->id = ZFCP_DBF_HBA_RES; 74 rec->fsf_req_id = req->req_id; 75 rec->fsf_req_status = req->status; 76 rec->fsf_cmd = req->fsf_command; 77 rec->fsf_seq_no = req->seq_no; 78 rec->u.res.req_issued = req->issued; 79 rec->u.res.prot_status = q_pref->prot_status; 80 rec->u.res.fsf_status = q_head->fsf_status; 81 82 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, 83 FSF_PROT_STATUS_QUAL_SIZE); 84 memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, 85 FSF_STATUS_QUALIFIER_SIZE); 86 87 if (req->fsf_command != FSF_QTCB_FCP_CMND) { 88 rec->pl_len = q_head->log_length; 89 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, 90 rec->pl_len, "fsf_res", req->req_id); 91 } 92 93 debug_event(dbf->hba, 1, rec, sizeof(*rec)); 94 spin_unlock_irqrestore(&dbf->hba_lock, flags); 95 } 96 97 /** 98 * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer 99 * @tag: tag indicating which kind of unsolicited status has been received 100 * @req: request providing the unsolicited status 101 */ 102 void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) 103 { 104 struct zfcp_dbf *dbf = req->adapter->dbf; 105 struct fsf_status_read_buffer *srb = req->data; 106 struct zfcp_dbf_hba *rec = &dbf->hba_buf; 107 unsigned long flags; 108 109 spin_lock_irqsave(&dbf->hba_lock, flags); 110 memset(rec, 0, sizeof(*rec)); 111 112 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 113 rec->id = ZFCP_DBF_HBA_USS; 114 rec->fsf_req_id = req->req_id; 115 rec->fsf_req_status = req->status; 116 rec->fsf_cmd = req->fsf_command; 117 118 if (!srb) 119 goto log; 120 121 rec->u.uss.status_type = srb->status_type; 122 rec->u.uss.status_subtype = srb->status_subtype; 123 rec->u.uss.d_id = ntoh24(srb->d_id); 124 rec->u.uss.lun = srb->fcp_lun; 125 memcpy(&rec->u.uss.queue_designator, &srb->queue_designator, 126 sizeof(rec->u.uss.queue_designator)); 127 128 /* status read buffer payload length */ 129 rec->pl_len = (!srb->length) ? 0 : srb->length - 130 offsetof(struct fsf_status_read_buffer, payload); 131 132 if (rec->pl_len) 133 zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len, 134 "fsf_uss", req->req_id); 135 log: 136 debug_event(dbf->hba, 2, rec, sizeof(*rec)); 137 spin_unlock_irqrestore(&dbf->hba_lock, flags); 138 } 139 140 /** 141 * zfcp_dbf_hba_bit_err - trace event for bit error conditions 142 * @tag: tag indicating which kind of unsolicited status has been received 143 * @req: request which caused the bit_error condition 144 */ 145 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) 146 { 147 struct zfcp_dbf *dbf = req->adapter->dbf; 148 struct zfcp_dbf_hba *rec = &dbf->hba_buf; 149 struct fsf_status_read_buffer *sr_buf = req->data; 150 unsigned long flags; 151 152 spin_lock_irqsave(&dbf->hba_lock, flags); 153 memset(rec, 0, sizeof(*rec)); 154 155 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 156 rec->id = ZFCP_DBF_HBA_BIT; 157 rec->fsf_req_id = req->req_id; 158 rec->fsf_req_status = req->status; 159 rec->fsf_cmd = req->fsf_command; 160 memcpy(&rec->u.be, &sr_buf->payload.bit_error, 161 sizeof(struct fsf_bit_error_payload)); 162 163 debug_event(dbf->hba, 1, rec, sizeof(*rec)); 164 spin_unlock_irqrestore(&dbf->hba_lock, flags); 165 } 166 167 /** 168 * zfcp_dbf_hba_def_err - trace event for deferred error messages 169 * @adapter: pointer to struct zfcp_adapter 170 * @req_id: request id which caused the deferred error message 171 * @scount: number of sbals incl. the signaling sbal 172 * @pl: array of all involved sbals 173 */ 174 void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, 175 void **pl) 176 { 177 struct zfcp_dbf *dbf = adapter->dbf; 178 struct zfcp_dbf_pay *payload = &dbf->pay_buf; 179 unsigned long flags; 180 u16 length; 181 182 if (!pl) 183 return; 184 185 spin_lock_irqsave(&dbf->pay_lock, flags); 186 memset(payload, 0, sizeof(*payload)); 187 188 memcpy(payload->area, "def_err", 7); 189 payload->fsf_req_id = req_id; 190 payload->counter = 0; 191 length = min((u16)sizeof(struct qdio_buffer), 192 (u16)ZFCP_DBF_PAY_MAX_REC); 193 194 while ((char *)pl[payload->counter] && payload->counter < scount) { 195 memcpy(payload->data, (char *)pl[payload->counter], length); 196 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); 197 payload->counter++; 198 } 199 200 spin_unlock_irqrestore(&dbf->pay_lock, flags); 201 } 202 203 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, 204 struct zfcp_adapter *adapter, 205 struct zfcp_port *port, 206 struct scsi_device *sdev) 207 { 208 rec->adapter_status = atomic_read(&adapter->status); 209 if (port) { 210 rec->port_status = atomic_read(&port->status); 211 rec->wwpn = port->wwpn; 212 rec->d_id = port->d_id; 213 } 214 if (sdev) { 215 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); 216 rec->lun = zfcp_scsi_dev_lun(sdev); 217 } 218 } 219 220 /** 221 * zfcp_dbf_rec_trig - trace event related to triggered recovery 222 * @tag: identifier for event 223 * @adapter: adapter on which the erp_action should run 224 * @port: remote port involved in the erp_action 225 * @sdev: scsi device involved in the erp_action 226 * @want: wanted erp_action 227 * @need: required erp_action 228 * 229 * The adapter->erp_lock has to be held. 230 */ 231 void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, 232 struct zfcp_port *port, struct scsi_device *sdev, 233 u8 want, u8 need) 234 { 235 struct zfcp_dbf *dbf = adapter->dbf; 236 struct zfcp_dbf_rec *rec = &dbf->rec_buf; 237 struct list_head *entry; 238 unsigned long flags; 239 240 spin_lock_irqsave(&dbf->rec_lock, flags); 241 memset(rec, 0, sizeof(*rec)); 242 243 rec->id = ZFCP_DBF_REC_TRIG; 244 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 245 zfcp_dbf_set_common(rec, adapter, port, sdev); 246 247 list_for_each(entry, &adapter->erp_ready_head) 248 rec->u.trig.ready++; 249 250 list_for_each(entry, &adapter->erp_running_head) 251 rec->u.trig.running++; 252 253 rec->u.trig.want = want; 254 rec->u.trig.need = need; 255 256 debug_event(dbf->rec, 1, rec, sizeof(*rec)); 257 spin_unlock_irqrestore(&dbf->rec_lock, flags); 258 } 259 260 261 /** 262 * zfcp_dbf_rec_run - trace event related to running recovery 263 * @tag: identifier for event 264 * @erp: erp_action running 265 */ 266 void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) 267 { 268 struct zfcp_dbf *dbf = erp->adapter->dbf; 269 struct zfcp_dbf_rec *rec = &dbf->rec_buf; 270 unsigned long flags; 271 272 spin_lock_irqsave(&dbf->rec_lock, flags); 273 memset(rec, 0, sizeof(*rec)); 274 275 rec->id = ZFCP_DBF_REC_RUN; 276 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 277 zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev); 278 279 rec->u.run.fsf_req_id = erp->fsf_req_id; 280 rec->u.run.rec_status = erp->status; 281 rec->u.run.rec_step = erp->step; 282 rec->u.run.rec_action = erp->action; 283 284 if (erp->sdev) 285 rec->u.run.rec_count = 286 atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter); 287 else if (erp->port) 288 rec->u.run.rec_count = atomic_read(&erp->port->erp_counter); 289 else 290 rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); 291 292 debug_event(dbf->rec, 1, rec, sizeof(*rec)); 293 spin_unlock_irqrestore(&dbf->rec_lock, flags); 294 } 295 296 static inline 297 void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, 298 u64 req_id, u32 d_id) 299 { 300 struct zfcp_dbf_san *rec = &dbf->san_buf; 301 u16 rec_len; 302 unsigned long flags; 303 304 spin_lock_irqsave(&dbf->san_lock, flags); 305 memset(rec, 0, sizeof(*rec)); 306 307 rec->id = id; 308 rec->fsf_req_id = req_id; 309 rec->d_id = d_id; 310 rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); 311 memcpy(rec->payload, data, rec_len); 312 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 313 314 debug_event(dbf->san, 1, rec, sizeof(*rec)); 315 spin_unlock_irqrestore(&dbf->san_lock, flags); 316 } 317 318 /** 319 * zfcp_dbf_san_req - trace event for issued SAN request 320 * @tag: indentifier for event 321 * @fsf_req: request containing issued CT data 322 * d_id: destination ID 323 */ 324 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) 325 { 326 struct zfcp_dbf *dbf = fsf->adapter->dbf; 327 struct zfcp_fsf_ct_els *ct_els = fsf->data; 328 u16 length; 329 330 length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); 331 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, 332 fsf->req_id, d_id); 333 } 334 335 /** 336 * zfcp_dbf_san_res - trace event for received SAN request 337 * @tag: indentifier for event 338 * @fsf_req: request containing issued CT data 339 */ 340 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) 341 { 342 struct zfcp_dbf *dbf = fsf->adapter->dbf; 343 struct zfcp_fsf_ct_els *ct_els = fsf->data; 344 u16 length; 345 346 length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); 347 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, 348 fsf->req_id, 0); 349 } 350 351 /** 352 * zfcp_dbf_san_in_els - trace event for incoming ELS 353 * @tag: indentifier for event 354 * @fsf_req: request containing issued CT data 355 */ 356 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) 357 { 358 struct zfcp_dbf *dbf = fsf->adapter->dbf; 359 struct fsf_status_read_buffer *srb = 360 (struct fsf_status_read_buffer *) fsf->data; 361 u16 length; 362 363 length = (u16)(srb->length - 364 offsetof(struct fsf_status_read_buffer, payload)); 365 zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, 366 fsf->req_id, ntoh24(srb->d_id)); 367 } 368 369 /** 370 * zfcp_dbf_scsi - trace event for scsi commands 371 * @tag: identifier for event 372 * @sc: pointer to struct scsi_cmnd 373 * @fsf: pointer to struct zfcp_fsf_req 374 */ 375 void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) 376 { 377 struct zfcp_adapter *adapter = 378 (struct zfcp_adapter *) sc->device->host->hostdata[0]; 379 struct zfcp_dbf *dbf = adapter->dbf; 380 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; 381 struct fcp_resp_with_ext *fcp_rsp; 382 struct fcp_resp_rsp_info *fcp_rsp_info; 383 unsigned long flags; 384 385 spin_lock_irqsave(&dbf->scsi_lock, flags); 386 memset(rec, 0, sizeof(*rec)); 387 388 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 389 rec->id = ZFCP_DBF_SCSI_CMND; 390 rec->scsi_result = sc->result; 391 rec->scsi_retries = sc->retries; 392 rec->scsi_allowed = sc->allowed; 393 rec->scsi_id = sc->device->id; 394 rec->scsi_lun = sc->device->lun; 395 rec->host_scribble = (unsigned long)sc->host_scribble; 396 397 memcpy(rec->scsi_opcode, sc->cmnd, 398 min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); 399 400 if (fsf) { 401 rec->fsf_req_id = fsf->req_id; 402 fcp_rsp = (struct fcp_resp_with_ext *) 403 &(fsf->qtcb->bottom.io.fcp_rsp); 404 memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT); 405 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) { 406 fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 407 rec->fcp_rsp_info = fcp_rsp_info->rsp_code; 408 } 409 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { 410 rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE, 411 (u16)ZFCP_DBF_PAY_MAX_REC); 412 zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len, 413 "fcp_sns", fsf->req_id); 414 } 415 } 416 417 debug_event(dbf->scsi, 1, rec, sizeof(*rec)); 418 spin_unlock_irqrestore(&dbf->scsi_lock, flags); 419 } 420 421 static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) 422 { 423 struct debug_info *d; 424 425 d = debug_register(name, size, 1, rec_size); 426 if (!d) 427 return NULL; 428 429 debug_register_view(d, &debug_hex_ascii_view); 430 debug_set_level(d, 3); 431 432 return d; 433 } 434 435 static void zfcp_dbf_unregister(struct zfcp_dbf *dbf) 436 { 437 if (!dbf) 438 return; 439 440 debug_unregister(dbf->scsi); 441 debug_unregister(dbf->san); 442 debug_unregister(dbf->hba); 443 debug_unregister(dbf->pay); 444 debug_unregister(dbf->rec); 445 kfree(dbf); 446 } 447 448 /** 449 * zfcp_adapter_debug_register - registers debug feature for an adapter 450 * @adapter: pointer to adapter for which debug features should be registered 451 * return: -ENOMEM on error, 0 otherwise 452 */ 453 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) 454 { 455 char name[DEBUG_MAX_NAME_LEN]; 456 struct zfcp_dbf *dbf; 457 458 dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); 459 if (!dbf) 460 return -ENOMEM; 461 462 spin_lock_init(&dbf->pay_lock); 463 spin_lock_init(&dbf->hba_lock); 464 spin_lock_init(&dbf->san_lock); 465 spin_lock_init(&dbf->scsi_lock); 466 spin_lock_init(&dbf->rec_lock); 467 468 /* debug feature area which records recovery activity */ 469 sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); 470 dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec)); 471 if (!dbf->rec) 472 goto err_out; 473 474 /* debug feature area which records HBA (FSF and QDIO) conditions */ 475 sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); 476 dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba)); 477 if (!dbf->hba) 478 goto err_out; 479 480 /* debug feature area which records payload info */ 481 sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev)); 482 dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay)); 483 if (!dbf->pay) 484 goto err_out; 485 486 /* debug feature area which records SAN command failures and recovery */ 487 sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); 488 dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san)); 489 if (!dbf->san) 490 goto err_out; 491 492 /* debug feature area which records SCSI command failures and recovery */ 493 sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); 494 dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi)); 495 if (!dbf->scsi) 496 goto err_out; 497 498 adapter->dbf = dbf; 499 500 return 0; 501 err_out: 502 zfcp_dbf_unregister(dbf); 503 return -ENOMEM; 504 } 505 506 /** 507 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter 508 * @adapter: pointer to adapter for which debug features should be unregistered 509 */ 510 void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter) 511 { 512 struct zfcp_dbf *dbf = adapter->dbf; 513 514 adapter->dbf = NULL; 515 zfcp_dbf_unregister(dbf); 516 } 517 518