1 /* 2 * Copyright 2012 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mempool.h> 20 #include <linux/errno.h> 21 #include <linux/spinlock.h> 22 #include <linux/kallsyms.h> 23 #include <linux/time.h> 24 #include "fnic_io.h" 25 #include "fnic.h" 26 27 unsigned int trace_max_pages; 28 static int fnic_max_trace_entries; 29 30 static unsigned long fnic_trace_buf_p; 31 static DEFINE_SPINLOCK(fnic_trace_lock); 32 33 static fnic_trace_dbg_t fnic_trace_entries; 34 int fnic_tracing_enabled = 1; 35 36 /* static char *fnic_fc_ctlr_trace_buf_p; */ 37 38 static int fc_trace_max_entries; 39 static unsigned long fnic_fc_ctlr_trace_buf_p; 40 static fnic_trace_dbg_t fc_trace_entries; 41 int fnic_fc_tracing_enabled = 1; 42 int fnic_fc_trace_cleared = 1; 43 static DEFINE_SPINLOCK(fnic_fc_trace_lock); 44 45 46 /* 47 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information 48 * 49 * Description: 50 * This routine gets next available trace buffer entry location @wr_idx 51 * from allocated trace buffer pages and give that memory location 52 * to user to store the trace information. 53 * 54 * Return Value: 55 * This routine returns pointer to next available trace entry 56 * @fnic_buf_head for user to fill trace information. 57 */ 58 fnic_trace_data_t *fnic_trace_get_buf(void) 59 { 60 unsigned long fnic_buf_head; 61 unsigned long flags; 62 63 spin_lock_irqsave(&fnic_trace_lock, flags); 64 65 /* 66 * Get next available memory location for writing trace information 67 * at @wr_idx and increment @wr_idx 68 */ 69 fnic_buf_head = 70 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; 71 fnic_trace_entries.wr_idx++; 72 73 /* 74 * Verify if trace buffer is full then change wd_idx to 75 * start from zero 76 */ 77 if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries) 78 fnic_trace_entries.wr_idx = 0; 79 80 /* 81 * Verify if write index @wr_idx and read index @rd_idx are same then 82 * increment @rd_idx to move to next entry in trace buffer 83 */ 84 if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) { 85 fnic_trace_entries.rd_idx++; 86 if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries) 87 fnic_trace_entries.rd_idx = 0; 88 } 89 spin_unlock_irqrestore(&fnic_trace_lock, flags); 90 return (fnic_trace_data_t *)fnic_buf_head; 91 } 92 93 /* 94 * fnic_get_trace_data - Copy trace buffer to a memory file 95 * @fnic_dbgfs_t: pointer to debugfs trace buffer 96 * 97 * Description: 98 * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t 99 * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in 100 * the log and process the log until the end of the buffer. Then it will gather 101 * from the beginning of the log and process until the current entry @wr_idx. 102 * 103 * Return Value: 104 * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t 105 */ 106 int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) 107 { 108 int rd_idx; 109 int wr_idx; 110 int len = 0; 111 unsigned long flags; 112 char str[KSYM_SYMBOL_LEN]; 113 struct timespec val; 114 fnic_trace_data_t *tbp; 115 116 spin_lock_irqsave(&fnic_trace_lock, flags); 117 rd_idx = fnic_trace_entries.rd_idx; 118 wr_idx = fnic_trace_entries.wr_idx; 119 if (wr_idx < rd_idx) { 120 while (1) { 121 /* Start from read index @rd_idx */ 122 tbp = (fnic_trace_data_t *) 123 fnic_trace_entries.page_offset[rd_idx]; 124 if (!tbp) { 125 spin_unlock_irqrestore(&fnic_trace_lock, flags); 126 return 0; 127 } 128 /* Convert function pointer to function name */ 129 if (sizeof(unsigned long) < 8) { 130 sprint_symbol(str, tbp->fnaddr.low); 131 jiffies_to_timespec(tbp->timestamp.low, &val); 132 } else { 133 sprint_symbol(str, tbp->fnaddr.val); 134 jiffies_to_timespec(tbp->timestamp.val, &val); 135 } 136 /* 137 * Dump trace buffer entry to memory file 138 * and increment read index @rd_idx 139 */ 140 len += snprintf(fnic_dbgfs_prt->buffer + len, 141 (trace_max_pages * PAGE_SIZE * 3) - len, 142 "%16lu.%16lu %-50s %8x %8x %16llx %16llx " 143 "%16llx %16llx %16llx\n", val.tv_sec, 144 val.tv_nsec, str, tbp->host_no, tbp->tag, 145 tbp->data[0], tbp->data[1], tbp->data[2], 146 tbp->data[3], tbp->data[4]); 147 rd_idx++; 148 /* 149 * If rd_idx is reached to maximum trace entries 150 * then move rd_idx to zero 151 */ 152 if (rd_idx > (fnic_max_trace_entries-1)) 153 rd_idx = 0; 154 /* 155 * Continure dumpping trace buffer entries into 156 * memory file till rd_idx reaches write index 157 */ 158 if (rd_idx == wr_idx) 159 break; 160 } 161 } else if (wr_idx > rd_idx) { 162 while (1) { 163 /* Start from read index @rd_idx */ 164 tbp = (fnic_trace_data_t *) 165 fnic_trace_entries.page_offset[rd_idx]; 166 if (!tbp) { 167 spin_unlock_irqrestore(&fnic_trace_lock, flags); 168 return 0; 169 } 170 /* Convert function pointer to function name */ 171 if (sizeof(unsigned long) < 8) { 172 sprint_symbol(str, tbp->fnaddr.low); 173 jiffies_to_timespec(tbp->timestamp.low, &val); 174 } else { 175 sprint_symbol(str, tbp->fnaddr.val); 176 jiffies_to_timespec(tbp->timestamp.val, &val); 177 } 178 /* 179 * Dump trace buffer entry to memory file 180 * and increment read index @rd_idx 181 */ 182 len += snprintf(fnic_dbgfs_prt->buffer + len, 183 (trace_max_pages * PAGE_SIZE * 3) - len, 184 "%16lu.%16lu %-50s %8x %8x %16llx %16llx " 185 "%16llx %16llx %16llx\n", val.tv_sec, 186 val.tv_nsec, str, tbp->host_no, tbp->tag, 187 tbp->data[0], tbp->data[1], tbp->data[2], 188 tbp->data[3], tbp->data[4]); 189 rd_idx++; 190 /* 191 * Continue dumpping trace buffer entries into 192 * memory file till rd_idx reaches write index 193 */ 194 if (rd_idx == wr_idx) 195 break; 196 } 197 } 198 spin_unlock_irqrestore(&fnic_trace_lock, flags); 199 return len; 200 } 201 202 /* 203 * fnic_get_stats_data - Copy fnic stats buffer to a memory file 204 * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer 205 * 206 * Description: 207 * This routine gathers the fnic stats debugfs data from the fnic_stats struct 208 * and dumps it to stats_debug_info. 209 * 210 * Return Value: 211 * This routine returns the amount of bytes that were dumped into 212 * stats_debug_info 213 */ 214 int fnic_get_stats_data(struct stats_debug_info *debug, 215 struct fnic_stats *stats) 216 { 217 int len = 0; 218 int buf_size = debug->buf_size; 219 struct timespec val1, val2; 220 221 len = snprintf(debug->debug_buffer + len, buf_size - len, 222 "------------------------------------------\n" 223 "\t\tIO Statistics\n" 224 "------------------------------------------\n"); 225 len += snprintf(debug->debug_buffer + len, buf_size - len, 226 "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" 227 "Number of IOs: %lld\nNumber of IO Completions: %lld\n" 228 "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" 229 "Number of Memory alloc Failures: %lld\n" 230 "Number of IOREQ Null: %lld\n" 231 "Number of SCSI cmd pointer Null: %lld\n", 232 (u64)atomic64_read(&stats->io_stats.active_ios), 233 (u64)atomic64_read(&stats->io_stats.max_active_ios), 234 (u64)atomic64_read(&stats->io_stats.num_ios), 235 (u64)atomic64_read(&stats->io_stats.io_completions), 236 (u64)atomic64_read(&stats->io_stats.io_failures), 237 (u64)atomic64_read(&stats->io_stats.io_not_found), 238 (u64)atomic64_read(&stats->io_stats.alloc_failures), 239 (u64)atomic64_read(&stats->io_stats.ioreq_null), 240 (u64)atomic64_read(&stats->io_stats.sc_null)); 241 242 len += snprintf(debug->debug_buffer + len, buf_size - len, 243 "\n------------------------------------------\n" 244 "\t\tAbort Statistics\n" 245 "------------------------------------------\n"); 246 len += snprintf(debug->debug_buffer + len, buf_size - len, 247 "Number of Aborts: %lld\n" 248 "Number of Abort Failures: %lld\n" 249 "Number of Abort Driver Timeouts: %lld\n" 250 "Number of Abort FW Timeouts: %lld\n" 251 "Number of Abort IO NOT Found: %lld\n", 252 (u64)atomic64_read(&stats->abts_stats.aborts), 253 (u64)atomic64_read(&stats->abts_stats.abort_failures), 254 (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), 255 (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), 256 (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); 257 258 len += snprintf(debug->debug_buffer + len, buf_size - len, 259 "\n------------------------------------------\n" 260 "\t\tTerminate Statistics\n" 261 "------------------------------------------\n"); 262 len += snprintf(debug->debug_buffer + len, buf_size - len, 263 "Number of Terminates: %lld\n" 264 "Maximum Terminates: %lld\n" 265 "Number of Terminate Driver Timeouts: %lld\n" 266 "Number of Terminate FW Timeouts: %lld\n" 267 "Number of Terminate IO NOT Found: %lld\n" 268 "Number of Terminate Failures: %lld\n", 269 (u64)atomic64_read(&stats->term_stats.terminates), 270 (u64)atomic64_read(&stats->term_stats.max_terminates), 271 (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), 272 (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), 273 (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), 274 (u64)atomic64_read(&stats->term_stats.terminate_failures)); 275 276 len += snprintf(debug->debug_buffer + len, buf_size - len, 277 "\n------------------------------------------\n" 278 "\t\tReset Statistics\n" 279 "------------------------------------------\n"); 280 281 len += snprintf(debug->debug_buffer + len, buf_size - len, 282 "Number of Device Resets: %lld\n" 283 "Number of Device Reset Failures: %lld\n" 284 "Number of Device Reset Aborts: %lld\n" 285 "Number of Device Reset Timeouts: %lld\n" 286 "Number of Device Reset Terminates: %lld\n" 287 "Number of FW Resets: %lld\n" 288 "Number of FW Reset Completions: %lld\n" 289 "Number of FW Reset Failures: %lld\n" 290 "Number of Fnic Reset: %lld\n" 291 "Number of Fnic Reset Completions: %lld\n" 292 "Number of Fnic Reset Failures: %lld\n", 293 (u64)atomic64_read(&stats->reset_stats.device_resets), 294 (u64)atomic64_read(&stats->reset_stats.device_reset_failures), 295 (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), 296 (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), 297 (u64)atomic64_read( 298 &stats->reset_stats.device_reset_terminates), 299 (u64)atomic64_read(&stats->reset_stats.fw_resets), 300 (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), 301 (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), 302 (u64)atomic64_read(&stats->reset_stats.fnic_resets), 303 (u64)atomic64_read( 304 &stats->reset_stats.fnic_reset_completions), 305 (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); 306 307 len += snprintf(debug->debug_buffer + len, buf_size - len, 308 "\n------------------------------------------\n" 309 "\t\tFirmware Statistics\n" 310 "------------------------------------------\n"); 311 312 len += snprintf(debug->debug_buffer + len, buf_size - len, 313 "Number of Active FW Requests %lld\n" 314 "Maximum FW Requests: %lld\n" 315 "Number of FW out of resources: %lld\n" 316 "Number of FW IO errors: %lld\n", 317 (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), 318 (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), 319 (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), 320 (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); 321 322 len += snprintf(debug->debug_buffer + len, buf_size - len, 323 "\n------------------------------------------\n" 324 "\t\tVlan Discovery Statistics\n" 325 "------------------------------------------\n"); 326 327 len += snprintf(debug->debug_buffer + len, buf_size - len, 328 "Number of Vlan Discovery Requests Sent %lld\n" 329 "Vlan Response Received with no FCF VLAN ID: %lld\n" 330 "No solicitations recvd after vlan set, expiry count: %lld\n" 331 "Flogi rejects count: %lld\n", 332 (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), 333 (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), 334 (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), 335 (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); 336 337 len += snprintf(debug->debug_buffer + len, buf_size - len, 338 "\n------------------------------------------\n" 339 "\t\tOther Important Statistics\n" 340 "------------------------------------------\n"); 341 342 jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); 343 jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); 344 345 len += snprintf(debug->debug_buffer + len, buf_size - len, 346 "Last ISR time: %llu (%8lu.%8lu)\n" 347 "Last ACK time: %llu (%8lu.%8lu)\n" 348 "Number of ISRs: %lld\n" 349 "Maximum CQ Entries: %lld\n" 350 "Number of ACK index out of range: %lld\n" 351 "Number of data count mismatch: %lld\n" 352 "Number of FCPIO Timeouts: %lld\n" 353 "Number of FCPIO Aborted: %lld\n" 354 "Number of SGL Invalid: %lld\n" 355 "Number of Copy WQ Alloc Failures for ABTs: %lld\n" 356 "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" 357 "Number of Copy WQ Alloc Failures for IOs: %lld\n" 358 "Number of no icmnd itmf Completions: %lld\n" 359 "Number of QUEUE Fulls: %lld\n" 360 "Number of rport not ready: %lld\n" 361 "Number of receive frame errors: %lld\n", 362 (u64)stats->misc_stats.last_isr_time, 363 val1.tv_sec, val1.tv_nsec, 364 (u64)stats->misc_stats.last_ack_time, 365 val2.tv_sec, val2.tv_nsec, 366 (u64)atomic64_read(&stats->misc_stats.isr_count), 367 (u64)atomic64_read(&stats->misc_stats.max_cq_entries), 368 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), 369 (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), 370 (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), 371 (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), 372 (u64)atomic64_read(&stats->misc_stats.sgl_invalid), 373 (u64)atomic64_read( 374 &stats->misc_stats.abts_cpwq_alloc_failures), 375 (u64)atomic64_read( 376 &stats->misc_stats.devrst_cpwq_alloc_failures), 377 (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), 378 (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), 379 (u64)atomic64_read(&stats->misc_stats.queue_fulls), 380 (u64)atomic64_read(&stats->misc_stats.rport_not_ready), 381 (u64)atomic64_read(&stats->misc_stats.frame_errors)); 382 383 return len; 384 385 } 386 387 /* 388 * fnic_trace_buf_init - Initialize fnic trace buffer logging facility 389 * 390 * Description: 391 * Initialize trace buffer data structure by allocating required memory and 392 * setting page_offset information for every trace entry by adding trace entry 393 * length to previous page_offset value. 394 */ 395 int fnic_trace_buf_init(void) 396 { 397 unsigned long fnic_buf_head; 398 int i; 399 int err = 0; 400 401 trace_max_pages = fnic_trace_max_pages; 402 fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ 403 FNIC_ENTRY_SIZE_BYTES; 404 405 fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE)); 406 if (!fnic_trace_buf_p) { 407 printk(KERN_ERR PFX "Failed to allocate memory " 408 "for fnic_trace_buf_p\n"); 409 err = -ENOMEM; 410 goto err_fnic_trace_buf_init; 411 } 412 memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE)); 413 414 fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries * 415 sizeof(unsigned long)); 416 if (!fnic_trace_entries.page_offset) { 417 printk(KERN_ERR PFX "Failed to allocate memory for" 418 " page_offset\n"); 419 if (fnic_trace_buf_p) { 420 vfree((void *)fnic_trace_buf_p); 421 fnic_trace_buf_p = 0; 422 } 423 err = -ENOMEM; 424 goto err_fnic_trace_buf_init; 425 } 426 memset((void *)fnic_trace_entries.page_offset, 0, 427 (fnic_max_trace_entries * sizeof(unsigned long))); 428 fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; 429 fnic_buf_head = fnic_trace_buf_p; 430 431 /* 432 * Set page_offset field of fnic_trace_entries struct by 433 * calculating memory location for every trace entry using 434 * length of each trace entry 435 */ 436 for (i = 0; i < fnic_max_trace_entries; i++) { 437 fnic_trace_entries.page_offset[i] = fnic_buf_head; 438 fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; 439 } 440 err = fnic_trace_debugfs_init(); 441 if (err < 0) { 442 pr_err("fnic: Failed to initialize debugfs for tracing\n"); 443 goto err_fnic_trace_debugfs_init; 444 } 445 pr_info("fnic: Successfully Initialized Trace Buffer\n"); 446 return err; 447 err_fnic_trace_debugfs_init: 448 fnic_trace_free(); 449 err_fnic_trace_buf_init: 450 return err; 451 } 452 453 /* 454 * fnic_trace_free - Free memory of fnic trace data structures. 455 */ 456 void fnic_trace_free(void) 457 { 458 fnic_tracing_enabled = 0; 459 fnic_trace_debugfs_terminate(); 460 if (fnic_trace_entries.page_offset) { 461 vfree((void *)fnic_trace_entries.page_offset); 462 fnic_trace_entries.page_offset = NULL; 463 } 464 if (fnic_trace_buf_p) { 465 vfree((void *)fnic_trace_buf_p); 466 fnic_trace_buf_p = 0; 467 } 468 printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); 469 } 470 471 /* 472 * fnic_fc_ctlr_trace_buf_init - 473 * Initialize trace buffer to log fnic control frames 474 * Description: 475 * Initialize trace buffer data structure by allocating 476 * required memory for trace data as well as for Indexes. 477 * Frame size is 256 bytes and 478 * memory is allocated for 1024 entries of 256 bytes. 479 * Page_offset(Index) is set to the address of trace entry 480 * and page_offset is initialized by adding frame size 481 * to the previous page_offset entry. 482 */ 483 484 int fnic_fc_trace_init(void) 485 { 486 unsigned long fc_trace_buf_head; 487 int err = 0; 488 int i; 489 490 fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ 491 FC_TRC_SIZE_BYTES; 492 fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc( 493 fnic_fc_trace_max_pages * PAGE_SIZE); 494 if (!fnic_fc_ctlr_trace_buf_p) { 495 pr_err("fnic: Failed to allocate memory for " 496 "FC Control Trace Buf\n"); 497 err = -ENOMEM; 498 goto err_fnic_fc_ctlr_trace_buf_init; 499 } 500 501 memset((void *)fnic_fc_ctlr_trace_buf_p, 0, 502 fnic_fc_trace_max_pages * PAGE_SIZE); 503 504 /* Allocate memory for page offset */ 505 fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries * 506 sizeof(unsigned long)); 507 if (!fc_trace_entries.page_offset) { 508 pr_err("fnic:Failed to allocate memory for page_offset\n"); 509 if (fnic_fc_ctlr_trace_buf_p) { 510 pr_err("fnic: Freeing FC Control Trace Buf\n"); 511 vfree((void *)fnic_fc_ctlr_trace_buf_p); 512 fnic_fc_ctlr_trace_buf_p = 0; 513 } 514 err = -ENOMEM; 515 goto err_fnic_fc_ctlr_trace_buf_init; 516 } 517 memset((void *)fc_trace_entries.page_offset, 0, 518 (fc_trace_max_entries * sizeof(unsigned long))); 519 520 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; 521 fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; 522 523 /* 524 * Set up fc_trace_entries.page_offset field with memory location 525 * for every trace entry 526 */ 527 for (i = 0; i < fc_trace_max_entries; i++) { 528 fc_trace_entries.page_offset[i] = fc_trace_buf_head; 529 fc_trace_buf_head += FC_TRC_SIZE_BYTES; 530 } 531 err = fnic_fc_trace_debugfs_init(); 532 if (err < 0) { 533 pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); 534 goto err_fnic_fc_ctlr_trace_debugfs_init; 535 } 536 pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); 537 return err; 538 539 err_fnic_fc_ctlr_trace_debugfs_init: 540 fnic_fc_trace_free(); 541 err_fnic_fc_ctlr_trace_buf_init: 542 return err; 543 } 544 545 /* 546 * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. 547 */ 548 void fnic_fc_trace_free(void) 549 { 550 fnic_fc_tracing_enabled = 0; 551 fnic_fc_trace_debugfs_terminate(); 552 if (fc_trace_entries.page_offset) { 553 vfree((void *)fc_trace_entries.page_offset); 554 fc_trace_entries.page_offset = NULL; 555 } 556 if (fnic_fc_ctlr_trace_buf_p) { 557 vfree((void *)fnic_fc_ctlr_trace_buf_p); 558 fnic_fc_ctlr_trace_buf_p = 0; 559 } 560 pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); 561 } 562 563 /* 564 * fnic_fc_ctlr_set_trace_data: 565 * Maintain rd & wr idx accordingly and set data 566 * Passed parameters: 567 * host_no: host number accociated with fnic 568 * frame_type: send_frame, rece_frame or link event 569 * fc_frame: pointer to fc_frame 570 * frame_len: Length of the fc_frame 571 * Description: 572 * This routine will get next available wr_idx and 573 * copy all passed trace data to the buffer pointed by wr_idx 574 * and increment wr_idx. It will also make sure that we dont 575 * overwrite the entry which we are reading and also 576 * wrap around if we reach the maximum entries. 577 * Returned Value: 578 * It will return 0 for success or -1 for failure 579 */ 580 int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, 581 char *frame, u32 fc_trc_frame_len) 582 { 583 unsigned long flags; 584 struct fc_trace_hdr *fc_buf; 585 unsigned long eth_fcoe_hdr_len; 586 char *fc_trace; 587 588 if (fnic_fc_tracing_enabled == 0) 589 return 0; 590 591 spin_lock_irqsave(&fnic_fc_trace_lock, flags); 592 593 if (fnic_fc_trace_cleared == 1) { 594 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; 595 pr_info("fnic: Resetting the read idx\n"); 596 memset((void *)fnic_fc_ctlr_trace_buf_p, 0, 597 fnic_fc_trace_max_pages * PAGE_SIZE); 598 fnic_fc_trace_cleared = 0; 599 } 600 601 fc_buf = (struct fc_trace_hdr *) 602 fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; 603 604 fc_trace_entries.wr_idx++; 605 606 if (fc_trace_entries.wr_idx >= fc_trace_max_entries) 607 fc_trace_entries.wr_idx = 0; 608 609 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { 610 fc_trace_entries.rd_idx++; 611 if (fc_trace_entries.rd_idx >= fc_trace_max_entries) 612 fc_trace_entries.rd_idx = 0; 613 } 614 615 fc_buf->time_stamp = CURRENT_TIME; 616 fc_buf->host_no = host_no; 617 fc_buf->frame_type = frame_type; 618 619 fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); 620 621 /* During the receive path, we do not have eth hdr as well as fcoe hdr 622 * at trace entry point so we will stuff 0xff just to make it generic. 623 */ 624 if (frame_type == FNIC_FC_RECV) { 625 eth_fcoe_hdr_len = sizeof(struct ethhdr) + 626 sizeof(struct fcoe_hdr); 627 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); 628 /* Copy the rest of data frame */ 629 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, 630 min_t(u8, fc_trc_frame_len, 631 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE 632 - eth_fcoe_hdr_len))); 633 } else { 634 memcpy((char *)fc_trace, (void *)frame, 635 min_t(u8, fc_trc_frame_len, 636 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); 637 } 638 639 /* Store the actual received length */ 640 fc_buf->frame_len = fc_trc_frame_len; 641 642 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 643 return 0; 644 } 645 646 /* 647 * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file 648 * Passed parameter: 649 * @fnic_dbgfs_t: pointer to debugfs trace buffer 650 * rdata_flag: 1 => Unformated file 651 * 0 => formated file 652 * Description: 653 * This routine will copy the trace data to memory file with 654 * proper formatting and also copy to another memory 655 * file without formatting for further procesing. 656 * Retrun Value: 657 * Number of bytes that were dumped into fnic_dbgfs_t 658 */ 659 660 int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) 661 { 662 int rd_idx, wr_idx; 663 unsigned long flags; 664 int len = 0, j; 665 struct fc_trace_hdr *tdata; 666 char *fc_trace; 667 668 spin_lock_irqsave(&fnic_fc_trace_lock, flags); 669 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { 670 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 671 pr_info("fnic: Buffer is empty\n"); 672 return 0; 673 } 674 rd_idx = fc_trace_entries.rd_idx; 675 wr_idx = fc_trace_entries.wr_idx; 676 if (rdata_flag == 0) { 677 len += snprintf(fnic_dbgfs_prt->buffer + len, 678 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 679 "Time Stamp (UTC)\t\t" 680 "Host No: F Type: len: FCoE_FRAME:\n"); 681 } 682 683 while (rd_idx != wr_idx) { 684 tdata = (struct fc_trace_hdr *) 685 fc_trace_entries.page_offset[rd_idx]; 686 if (!tdata) { 687 pr_info("fnic: Rd data is NULL\n"); 688 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 689 return 0; 690 } 691 if (rdata_flag == 0) { 692 copy_and_format_trace_data(tdata, 693 fnic_dbgfs_prt, &len, rdata_flag); 694 } else { 695 fc_trace = (char *)tdata; 696 for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { 697 len += snprintf(fnic_dbgfs_prt->buffer + len, 698 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) 699 - len, "%02x", fc_trace[j] & 0xff); 700 } /* for loop */ 701 len += snprintf(fnic_dbgfs_prt->buffer + len, 702 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 703 "\n"); 704 } 705 rd_idx++; 706 if (rd_idx > (fc_trace_max_entries - 1)) 707 rd_idx = 0; 708 } 709 710 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 711 return len; 712 } 713 714 /* 715 * copy_and_format_trace_data: Copy formatted data to char * buffer 716 * Passed Parameter: 717 * @fc_trace_hdr_t: pointer to trace data 718 * @fnic_dbgfs_t: pointer to debugfs trace buffer 719 * @orig_len: pointer to len 720 * rdata_flag: 0 => Formated file, 1 => Unformated file 721 * Description: 722 * This routine will format and copy the passed trace data 723 * for formated file or unformated file accordingly. 724 */ 725 726 void copy_and_format_trace_data(struct fc_trace_hdr *tdata, 727 fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, 728 u8 rdata_flag) 729 { 730 struct tm tm; 731 int j, i = 1, len; 732 char *fc_trace, *fmt; 733 int ethhdr_len = sizeof(struct ethhdr) - 1; 734 int fcoehdr_len = sizeof(struct fcoe_hdr); 735 int fchdr_len = sizeof(struct fc_frame_header); 736 int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; 737 738 tdata->frame_type = tdata->frame_type & 0x7F; 739 740 len = *orig_len; 741 742 time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); 743 744 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; 745 len += snprintf(fnic_dbgfs_prt->buffer + len, 746 max_size - len, 747 fmt, 748 tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, 749 tm.tm_hour, tm.tm_min, tm.tm_sec, 750 tdata->time_stamp.tv_nsec, tdata->host_no, 751 tdata->frame_type, tdata->frame_len); 752 753 fc_trace = (char *)FC_TRACE_ADDRESS(tdata); 754 755 for (j = 0; j < min_t(u8, tdata->frame_len, 756 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { 757 if (tdata->frame_type == FNIC_FC_LE) { 758 len += snprintf(fnic_dbgfs_prt->buffer + len, 759 max_size - len, "%c", fc_trace[j]); 760 } else { 761 len += snprintf(fnic_dbgfs_prt->buffer + len, 762 max_size - len, "%02x", fc_trace[j] & 0xff); 763 len += snprintf(fnic_dbgfs_prt->buffer + len, 764 max_size - len, " "); 765 if (j == ethhdr_len || 766 j == ethhdr_len + fcoehdr_len || 767 j == ethhdr_len + fcoehdr_len + fchdr_len || 768 (i > 3 && j%fchdr_len == 0)) { 769 len += snprintf(fnic_dbgfs_prt->buffer 770 + len, max_size - len, 771 "\n\t\t\t\t\t\t\t\t"); 772 i++; 773 } 774 } /* end of else*/ 775 } /* End of for loop*/ 776 len += snprintf(fnic_dbgfs_prt->buffer + len, 777 max_size - len, "\n"); 778 *orig_len = len; 779 } 780