1 /* 2 * Copyright 2012 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mempool.h> 20 #include <linux/errno.h> 21 #include <linux/spinlock.h> 22 #include <linux/kallsyms.h> 23 #include <linux/time.h> 24 #include <linux/vmalloc.h> 25 #include "fnic_io.h" 26 #include "fnic.h" 27 28 unsigned int trace_max_pages; 29 static int fnic_max_trace_entries; 30 31 static unsigned long fnic_trace_buf_p; 32 static DEFINE_SPINLOCK(fnic_trace_lock); 33 34 static fnic_trace_dbg_t fnic_trace_entries; 35 int fnic_tracing_enabled = 1; 36 37 /* static char *fnic_fc_ctlr_trace_buf_p; */ 38 39 static int fc_trace_max_entries; 40 static unsigned long fnic_fc_ctlr_trace_buf_p; 41 static fnic_trace_dbg_t fc_trace_entries; 42 int fnic_fc_tracing_enabled = 1; 43 int fnic_fc_trace_cleared = 1; 44 static DEFINE_SPINLOCK(fnic_fc_trace_lock); 45 46 47 /* 48 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information 49 * 50 * Description: 51 * This routine gets next available trace buffer entry location @wr_idx 52 * from allocated trace buffer pages and give that memory location 53 * to user to store the trace information. 54 * 55 * Return Value: 56 * This routine returns pointer to next available trace entry 57 * @fnic_buf_head for user to fill trace information. 58 */ 59 fnic_trace_data_t *fnic_trace_get_buf(void) 60 { 61 unsigned long fnic_buf_head; 62 unsigned long flags; 63 64 spin_lock_irqsave(&fnic_trace_lock, flags); 65 66 /* 67 * Get next available memory location for writing trace information 68 * at @wr_idx and increment @wr_idx 69 */ 70 fnic_buf_head = 71 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; 72 fnic_trace_entries.wr_idx++; 73 74 /* 75 * Verify if trace buffer is full then change wd_idx to 76 * start from zero 77 */ 78 if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries) 79 fnic_trace_entries.wr_idx = 0; 80 81 /* 82 * Verify if write index @wr_idx and read index @rd_idx are same then 83 * increment @rd_idx to move to next entry in trace buffer 84 */ 85 if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) { 86 fnic_trace_entries.rd_idx++; 87 if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries) 88 fnic_trace_entries.rd_idx = 0; 89 } 90 spin_unlock_irqrestore(&fnic_trace_lock, flags); 91 return (fnic_trace_data_t *)fnic_buf_head; 92 } 93 94 /* 95 * fnic_get_trace_data - Copy trace buffer to a memory file 96 * @fnic_dbgfs_t: pointer to debugfs trace buffer 97 * 98 * Description: 99 * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t 100 * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in 101 * the log and process the log until the end of the buffer. Then it will gather 102 * from the beginning of the log and process until the current entry @wr_idx. 103 * 104 * Return Value: 105 * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t 106 */ 107 int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) 108 { 109 int rd_idx; 110 int wr_idx; 111 int len = 0; 112 unsigned long flags; 113 char str[KSYM_SYMBOL_LEN]; 114 struct timespec64 val; 115 fnic_trace_data_t *tbp; 116 117 spin_lock_irqsave(&fnic_trace_lock, flags); 118 rd_idx = fnic_trace_entries.rd_idx; 119 wr_idx = fnic_trace_entries.wr_idx; 120 if (wr_idx < rd_idx) { 121 while (1) { 122 /* Start from read index @rd_idx */ 123 tbp = (fnic_trace_data_t *) 124 fnic_trace_entries.page_offset[rd_idx]; 125 if (!tbp) { 126 spin_unlock_irqrestore(&fnic_trace_lock, flags); 127 return 0; 128 } 129 /* Convert function pointer to function name */ 130 if (sizeof(unsigned long) < 8) { 131 sprint_symbol(str, tbp->fnaddr.low); 132 jiffies_to_timespec64(tbp->timestamp.low, &val); 133 } else { 134 sprint_symbol(str, tbp->fnaddr.val); 135 jiffies_to_timespec64(tbp->timestamp.val, &val); 136 } 137 /* 138 * Dump trace buffer entry to memory file 139 * and increment read index @rd_idx 140 */ 141 len += snprintf(fnic_dbgfs_prt->buffer + len, 142 (trace_max_pages * PAGE_SIZE * 3) - len, 143 "%16llu.%09lu %-50s %8x %8x %16llx %16llx " 144 "%16llx %16llx %16llx\n", (u64)val.tv_sec, 145 val.tv_nsec, str, tbp->host_no, tbp->tag, 146 tbp->data[0], tbp->data[1], tbp->data[2], 147 tbp->data[3], tbp->data[4]); 148 rd_idx++; 149 /* 150 * If rd_idx is reached to maximum trace entries 151 * then move rd_idx to zero 152 */ 153 if (rd_idx > (fnic_max_trace_entries-1)) 154 rd_idx = 0; 155 /* 156 * Continure dumpping trace buffer entries into 157 * memory file till rd_idx reaches write index 158 */ 159 if (rd_idx == wr_idx) 160 break; 161 } 162 } else if (wr_idx > rd_idx) { 163 while (1) { 164 /* Start from read index @rd_idx */ 165 tbp = (fnic_trace_data_t *) 166 fnic_trace_entries.page_offset[rd_idx]; 167 if (!tbp) { 168 spin_unlock_irqrestore(&fnic_trace_lock, flags); 169 return 0; 170 } 171 /* Convert function pointer to function name */ 172 if (sizeof(unsigned long) < 8) { 173 sprint_symbol(str, tbp->fnaddr.low); 174 jiffies_to_timespec64(tbp->timestamp.low, &val); 175 } else { 176 sprint_symbol(str, tbp->fnaddr.val); 177 jiffies_to_timespec64(tbp->timestamp.val, &val); 178 } 179 /* 180 * Dump trace buffer entry to memory file 181 * and increment read index @rd_idx 182 */ 183 len += snprintf(fnic_dbgfs_prt->buffer + len, 184 (trace_max_pages * PAGE_SIZE * 3) - len, 185 "%16llu.%09lu %-50s %8x %8x %16llx %16llx " 186 "%16llx %16llx %16llx\n", (u64)val.tv_sec, 187 val.tv_nsec, str, tbp->host_no, tbp->tag, 188 tbp->data[0], tbp->data[1], tbp->data[2], 189 tbp->data[3], tbp->data[4]); 190 rd_idx++; 191 /* 192 * Continue dumpping trace buffer entries into 193 * memory file till rd_idx reaches write index 194 */ 195 if (rd_idx == wr_idx) 196 break; 197 } 198 } 199 spin_unlock_irqrestore(&fnic_trace_lock, flags); 200 return len; 201 } 202 203 /* 204 * fnic_get_stats_data - Copy fnic stats buffer to a memory file 205 * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer 206 * 207 * Description: 208 * This routine gathers the fnic stats debugfs data from the fnic_stats struct 209 * and dumps it to stats_debug_info. 210 * 211 * Return Value: 212 * This routine returns the amount of bytes that were dumped into 213 * stats_debug_info 214 */ 215 int fnic_get_stats_data(struct stats_debug_info *debug, 216 struct fnic_stats *stats) 217 { 218 int len = 0; 219 int buf_size = debug->buf_size; 220 struct timespec64 val1, val2; 221 222 ktime_get_real_ts64(&val1); 223 len = snprintf(debug->debug_buffer + len, buf_size - len, 224 "------------------------------------------\n" 225 "\t\tTime\n" 226 "------------------------------------------\n"); 227 228 len += snprintf(debug->debug_buffer + len, buf_size - len, 229 "Current time : [%lld:%ld]\n" 230 "Last stats reset time: [%lld:%09ld]\n" 231 "Last stats read time: [%lld:%ld]\n" 232 "delta since last reset: [%lld:%ld]\n" 233 "delta since last read: [%lld:%ld]\n", 234 (s64)val1.tv_sec, val1.tv_nsec, 235 (s64)stats->stats_timestamps.last_reset_time.tv_sec, 236 stats->stats_timestamps.last_reset_time.tv_nsec, 237 (s64)stats->stats_timestamps.last_read_time.tv_sec, 238 stats->stats_timestamps.last_read_time.tv_nsec, 239 (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, 240 timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, 241 (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, 242 timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); 243 244 stats->stats_timestamps.last_read_time = val1; 245 246 len += snprintf(debug->debug_buffer + len, buf_size - len, 247 "------------------------------------------\n" 248 "\t\tIO Statistics\n" 249 "------------------------------------------\n"); 250 len += snprintf(debug->debug_buffer + len, buf_size - len, 251 "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" 252 "Number of IOs: %lld\nNumber of IO Completions: %lld\n" 253 "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" 254 "Number of Memory alloc Failures: %lld\n" 255 "Number of IOREQ Null: %lld\n" 256 "Number of SCSI cmd pointer Null: %lld\n" 257 258 "\nIO completion times: \n" 259 " < 10 ms : %lld\n" 260 " 10 ms - 100 ms : %lld\n" 261 " 100 ms - 500 ms : %lld\n" 262 " 500 ms - 5 sec: %lld\n" 263 " 5 sec - 10 sec: %lld\n" 264 " 10 sec - 30 sec: %lld\n" 265 " > 30 sec: %lld\n", 266 (u64)atomic64_read(&stats->io_stats.active_ios), 267 (u64)atomic64_read(&stats->io_stats.max_active_ios), 268 (u64)atomic64_read(&stats->io_stats.num_ios), 269 (u64)atomic64_read(&stats->io_stats.io_completions), 270 (u64)atomic64_read(&stats->io_stats.io_failures), 271 (u64)atomic64_read(&stats->io_stats.io_not_found), 272 (u64)atomic64_read(&stats->io_stats.alloc_failures), 273 (u64)atomic64_read(&stats->io_stats.ioreq_null), 274 (u64)atomic64_read(&stats->io_stats.sc_null), 275 (u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec), 276 (u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec), 277 (u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec), 278 (u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec), 279 (u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec), 280 (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec), 281 (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec)); 282 283 len += snprintf(debug->debug_buffer + len, buf_size - len, 284 "\nCurrent Max IO time : %lld\n", 285 (u64)atomic64_read(&stats->io_stats.current_max_io_time)); 286 287 len += snprintf(debug->debug_buffer + len, buf_size - len, 288 "\n------------------------------------------\n" 289 "\t\tAbort Statistics\n" 290 "------------------------------------------\n"); 291 292 len += snprintf(debug->debug_buffer + len, buf_size - len, 293 "Number of Aborts: %lld\n" 294 "Number of Abort Failures: %lld\n" 295 "Number of Abort Driver Timeouts: %lld\n" 296 "Number of Abort FW Timeouts: %lld\n" 297 "Number of Abort IO NOT Found: %lld\n" 298 299 "Abord issued times: \n" 300 " < 6 sec : %lld\n" 301 " 6 sec - 20 sec : %lld\n" 302 " 20 sec - 30 sec : %lld\n" 303 " 30 sec - 40 sec : %lld\n" 304 " 40 sec - 50 sec : %lld\n" 305 " 50 sec - 60 sec : %lld\n" 306 " > 60 sec: %lld\n", 307 308 (u64)atomic64_read(&stats->abts_stats.aborts), 309 (u64)atomic64_read(&stats->abts_stats.abort_failures), 310 (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), 311 (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), 312 (u64)atomic64_read(&stats->abts_stats.abort_io_not_found), 313 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec), 314 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec), 315 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec), 316 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec), 317 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec), 318 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec), 319 (u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec)); 320 321 len += snprintf(debug->debug_buffer + len, buf_size - len, 322 "\n------------------------------------------\n" 323 "\t\tTerminate Statistics\n" 324 "------------------------------------------\n"); 325 326 len += snprintf(debug->debug_buffer + len, buf_size - len, 327 "Number of Terminates: %lld\n" 328 "Maximum Terminates: %lld\n" 329 "Number of Terminate Driver Timeouts: %lld\n" 330 "Number of Terminate FW Timeouts: %lld\n" 331 "Number of Terminate IO NOT Found: %lld\n" 332 "Number of Terminate Failures: %lld\n", 333 (u64)atomic64_read(&stats->term_stats.terminates), 334 (u64)atomic64_read(&stats->term_stats.max_terminates), 335 (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), 336 (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), 337 (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), 338 (u64)atomic64_read(&stats->term_stats.terminate_failures)); 339 340 len += snprintf(debug->debug_buffer + len, buf_size - len, 341 "\n------------------------------------------\n" 342 "\t\tReset Statistics\n" 343 "------------------------------------------\n"); 344 345 len += snprintf(debug->debug_buffer + len, buf_size - len, 346 "Number of Device Resets: %lld\n" 347 "Number of Device Reset Failures: %lld\n" 348 "Number of Device Reset Aborts: %lld\n" 349 "Number of Device Reset Timeouts: %lld\n" 350 "Number of Device Reset Terminates: %lld\n" 351 "Number of FW Resets: %lld\n" 352 "Number of FW Reset Completions: %lld\n" 353 "Number of FW Reset Failures: %lld\n" 354 "Number of Fnic Reset: %lld\n" 355 "Number of Fnic Reset Completions: %lld\n" 356 "Number of Fnic Reset Failures: %lld\n", 357 (u64)atomic64_read(&stats->reset_stats.device_resets), 358 (u64)atomic64_read(&stats->reset_stats.device_reset_failures), 359 (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), 360 (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), 361 (u64)atomic64_read( 362 &stats->reset_stats.device_reset_terminates), 363 (u64)atomic64_read(&stats->reset_stats.fw_resets), 364 (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), 365 (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), 366 (u64)atomic64_read(&stats->reset_stats.fnic_resets), 367 (u64)atomic64_read( 368 &stats->reset_stats.fnic_reset_completions), 369 (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); 370 371 len += snprintf(debug->debug_buffer + len, buf_size - len, 372 "\n------------------------------------------\n" 373 "\t\tFirmware Statistics\n" 374 "------------------------------------------\n"); 375 376 len += snprintf(debug->debug_buffer + len, buf_size - len, 377 "Number of Active FW Requests %lld\n" 378 "Maximum FW Requests: %lld\n" 379 "Number of FW out of resources: %lld\n" 380 "Number of FW IO errors: %lld\n", 381 (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), 382 (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), 383 (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), 384 (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); 385 386 len += snprintf(debug->debug_buffer + len, buf_size - len, 387 "\n------------------------------------------\n" 388 "\t\tVlan Discovery Statistics\n" 389 "------------------------------------------\n"); 390 391 len += snprintf(debug->debug_buffer + len, buf_size - len, 392 "Number of Vlan Discovery Requests Sent %lld\n" 393 "Vlan Response Received with no FCF VLAN ID: %lld\n" 394 "No solicitations recvd after vlan set, expiry count: %lld\n" 395 "Flogi rejects count: %lld\n", 396 (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), 397 (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), 398 (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), 399 (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); 400 401 len += snprintf(debug->debug_buffer + len, buf_size - len, 402 "\n------------------------------------------\n" 403 "\t\tOther Important Statistics\n" 404 "------------------------------------------\n"); 405 406 jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1); 407 jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2); 408 409 len += snprintf(debug->debug_buffer + len, buf_size - len, 410 "Last ISR time: %llu (%8llu.%09lu)\n" 411 "Last ACK time: %llu (%8llu.%09lu)\n" 412 "Number of ISRs: %lld\n" 413 "Maximum CQ Entries: %lld\n" 414 "Number of ACK index out of range: %lld\n" 415 "Number of data count mismatch: %lld\n" 416 "Number of FCPIO Timeouts: %lld\n" 417 "Number of FCPIO Aborted: %lld\n" 418 "Number of SGL Invalid: %lld\n" 419 "Number of Copy WQ Alloc Failures for ABTs: %lld\n" 420 "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" 421 "Number of Copy WQ Alloc Failures for IOs: %lld\n" 422 "Number of no icmnd itmf Completions: %lld\n" 423 "Number of Check Conditions encountered: %lld\n" 424 "Number of QUEUE Fulls: %lld\n" 425 "Number of rport not ready: %lld\n" 426 "Number of receive frame errors: %lld\n", 427 (u64)stats->misc_stats.last_isr_time, 428 (s64)val1.tv_sec, val1.tv_nsec, 429 (u64)stats->misc_stats.last_ack_time, 430 (s64)val2.tv_sec, val2.tv_nsec, 431 (u64)atomic64_read(&stats->misc_stats.isr_count), 432 (u64)atomic64_read(&stats->misc_stats.max_cq_entries), 433 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), 434 (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), 435 (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), 436 (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), 437 (u64)atomic64_read(&stats->misc_stats.sgl_invalid), 438 (u64)atomic64_read( 439 &stats->misc_stats.abts_cpwq_alloc_failures), 440 (u64)atomic64_read( 441 &stats->misc_stats.devrst_cpwq_alloc_failures), 442 (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), 443 (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), 444 (u64)atomic64_read(&stats->misc_stats.check_condition), 445 (u64)atomic64_read(&stats->misc_stats.queue_fulls), 446 (u64)atomic64_read(&stats->misc_stats.rport_not_ready), 447 (u64)atomic64_read(&stats->misc_stats.frame_errors)); 448 449 return len; 450 451 } 452 453 /* 454 * fnic_trace_buf_init - Initialize fnic trace buffer logging facility 455 * 456 * Description: 457 * Initialize trace buffer data structure by allocating required memory and 458 * setting page_offset information for every trace entry by adding trace entry 459 * length to previous page_offset value. 460 */ 461 int fnic_trace_buf_init(void) 462 { 463 unsigned long fnic_buf_head; 464 int i; 465 int err = 0; 466 467 trace_max_pages = fnic_trace_max_pages; 468 fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ 469 FNIC_ENTRY_SIZE_BYTES; 470 471 fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE)); 472 if (!fnic_trace_buf_p) { 473 printk(KERN_ERR PFX "Failed to allocate memory " 474 "for fnic_trace_buf_p\n"); 475 err = -ENOMEM; 476 goto err_fnic_trace_buf_init; 477 } 478 memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE)); 479 480 fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries * 481 sizeof(unsigned long)); 482 if (!fnic_trace_entries.page_offset) { 483 printk(KERN_ERR PFX "Failed to allocate memory for" 484 " page_offset\n"); 485 if (fnic_trace_buf_p) { 486 vfree((void *)fnic_trace_buf_p); 487 fnic_trace_buf_p = 0; 488 } 489 err = -ENOMEM; 490 goto err_fnic_trace_buf_init; 491 } 492 memset((void *)fnic_trace_entries.page_offset, 0, 493 (fnic_max_trace_entries * sizeof(unsigned long))); 494 fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; 495 fnic_buf_head = fnic_trace_buf_p; 496 497 /* 498 * Set page_offset field of fnic_trace_entries struct by 499 * calculating memory location for every trace entry using 500 * length of each trace entry 501 */ 502 for (i = 0; i < fnic_max_trace_entries; i++) { 503 fnic_trace_entries.page_offset[i] = fnic_buf_head; 504 fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; 505 } 506 err = fnic_trace_debugfs_init(); 507 if (err < 0) { 508 pr_err("fnic: Failed to initialize debugfs for tracing\n"); 509 goto err_fnic_trace_debugfs_init; 510 } 511 pr_info("fnic: Successfully Initialized Trace Buffer\n"); 512 return err; 513 err_fnic_trace_debugfs_init: 514 fnic_trace_free(); 515 err_fnic_trace_buf_init: 516 return err; 517 } 518 519 /* 520 * fnic_trace_free - Free memory of fnic trace data structures. 521 */ 522 void fnic_trace_free(void) 523 { 524 fnic_tracing_enabled = 0; 525 fnic_trace_debugfs_terminate(); 526 if (fnic_trace_entries.page_offset) { 527 vfree((void *)fnic_trace_entries.page_offset); 528 fnic_trace_entries.page_offset = NULL; 529 } 530 if (fnic_trace_buf_p) { 531 vfree((void *)fnic_trace_buf_p); 532 fnic_trace_buf_p = 0; 533 } 534 printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); 535 } 536 537 /* 538 * fnic_fc_ctlr_trace_buf_init - 539 * Initialize trace buffer to log fnic control frames 540 * Description: 541 * Initialize trace buffer data structure by allocating 542 * required memory for trace data as well as for Indexes. 543 * Frame size is 256 bytes and 544 * memory is allocated for 1024 entries of 256 bytes. 545 * Page_offset(Index) is set to the address of trace entry 546 * and page_offset is initialized by adding frame size 547 * to the previous page_offset entry. 548 */ 549 550 int fnic_fc_trace_init(void) 551 { 552 unsigned long fc_trace_buf_head; 553 int err = 0; 554 int i; 555 556 fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ 557 FC_TRC_SIZE_BYTES; 558 fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc( 559 fnic_fc_trace_max_pages * PAGE_SIZE); 560 if (!fnic_fc_ctlr_trace_buf_p) { 561 pr_err("fnic: Failed to allocate memory for " 562 "FC Control Trace Buf\n"); 563 err = -ENOMEM; 564 goto err_fnic_fc_ctlr_trace_buf_init; 565 } 566 567 memset((void *)fnic_fc_ctlr_trace_buf_p, 0, 568 fnic_fc_trace_max_pages * PAGE_SIZE); 569 570 /* Allocate memory for page offset */ 571 fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries * 572 sizeof(unsigned long)); 573 if (!fc_trace_entries.page_offset) { 574 pr_err("fnic:Failed to allocate memory for page_offset\n"); 575 if (fnic_fc_ctlr_trace_buf_p) { 576 pr_err("fnic: Freeing FC Control Trace Buf\n"); 577 vfree((void *)fnic_fc_ctlr_trace_buf_p); 578 fnic_fc_ctlr_trace_buf_p = 0; 579 } 580 err = -ENOMEM; 581 goto err_fnic_fc_ctlr_trace_buf_init; 582 } 583 memset((void *)fc_trace_entries.page_offset, 0, 584 (fc_trace_max_entries * sizeof(unsigned long))); 585 586 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; 587 fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; 588 589 /* 590 * Set up fc_trace_entries.page_offset field with memory location 591 * for every trace entry 592 */ 593 for (i = 0; i < fc_trace_max_entries; i++) { 594 fc_trace_entries.page_offset[i] = fc_trace_buf_head; 595 fc_trace_buf_head += FC_TRC_SIZE_BYTES; 596 } 597 err = fnic_fc_trace_debugfs_init(); 598 if (err < 0) { 599 pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); 600 goto err_fnic_fc_ctlr_trace_debugfs_init; 601 } 602 pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); 603 return err; 604 605 err_fnic_fc_ctlr_trace_debugfs_init: 606 fnic_fc_trace_free(); 607 err_fnic_fc_ctlr_trace_buf_init: 608 return err; 609 } 610 611 /* 612 * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. 613 */ 614 void fnic_fc_trace_free(void) 615 { 616 fnic_fc_tracing_enabled = 0; 617 fnic_fc_trace_debugfs_terminate(); 618 if (fc_trace_entries.page_offset) { 619 vfree((void *)fc_trace_entries.page_offset); 620 fc_trace_entries.page_offset = NULL; 621 } 622 if (fnic_fc_ctlr_trace_buf_p) { 623 vfree((void *)fnic_fc_ctlr_trace_buf_p); 624 fnic_fc_ctlr_trace_buf_p = 0; 625 } 626 pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); 627 } 628 629 /* 630 * fnic_fc_ctlr_set_trace_data: 631 * Maintain rd & wr idx accordingly and set data 632 * Passed parameters: 633 * host_no: host number accociated with fnic 634 * frame_type: send_frame, rece_frame or link event 635 * fc_frame: pointer to fc_frame 636 * frame_len: Length of the fc_frame 637 * Description: 638 * This routine will get next available wr_idx and 639 * copy all passed trace data to the buffer pointed by wr_idx 640 * and increment wr_idx. It will also make sure that we dont 641 * overwrite the entry which we are reading and also 642 * wrap around if we reach the maximum entries. 643 * Returned Value: 644 * It will return 0 for success or -1 for failure 645 */ 646 int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, 647 char *frame, u32 fc_trc_frame_len) 648 { 649 unsigned long flags; 650 struct fc_trace_hdr *fc_buf; 651 unsigned long eth_fcoe_hdr_len; 652 char *fc_trace; 653 654 if (fnic_fc_tracing_enabled == 0) 655 return 0; 656 657 spin_lock_irqsave(&fnic_fc_trace_lock, flags); 658 659 if (fnic_fc_trace_cleared == 1) { 660 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; 661 pr_info("fnic: Resetting the read idx\n"); 662 memset((void *)fnic_fc_ctlr_trace_buf_p, 0, 663 fnic_fc_trace_max_pages * PAGE_SIZE); 664 fnic_fc_trace_cleared = 0; 665 } 666 667 fc_buf = (struct fc_trace_hdr *) 668 fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; 669 670 fc_trace_entries.wr_idx++; 671 672 if (fc_trace_entries.wr_idx >= fc_trace_max_entries) 673 fc_trace_entries.wr_idx = 0; 674 675 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { 676 fc_trace_entries.rd_idx++; 677 if (fc_trace_entries.rd_idx >= fc_trace_max_entries) 678 fc_trace_entries.rd_idx = 0; 679 } 680 681 ktime_get_real_ts64(&fc_buf->time_stamp); 682 fc_buf->host_no = host_no; 683 fc_buf->frame_type = frame_type; 684 685 fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); 686 687 /* During the receive path, we do not have eth hdr as well as fcoe hdr 688 * at trace entry point so we will stuff 0xff just to make it generic. 689 */ 690 if (frame_type == FNIC_FC_RECV) { 691 eth_fcoe_hdr_len = sizeof(struct ethhdr) + 692 sizeof(struct fcoe_hdr); 693 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); 694 /* Copy the rest of data frame */ 695 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, 696 min_t(u8, fc_trc_frame_len, 697 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE 698 - eth_fcoe_hdr_len))); 699 } else { 700 memcpy((char *)fc_trace, (void *)frame, 701 min_t(u8, fc_trc_frame_len, 702 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); 703 } 704 705 /* Store the actual received length */ 706 fc_buf->frame_len = fc_trc_frame_len; 707 708 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 709 return 0; 710 } 711 712 /* 713 * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file 714 * Passed parameter: 715 * @fnic_dbgfs_t: pointer to debugfs trace buffer 716 * rdata_flag: 1 => Unformated file 717 * 0 => formated file 718 * Description: 719 * This routine will copy the trace data to memory file with 720 * proper formatting and also copy to another memory 721 * file without formatting for further procesing. 722 * Retrun Value: 723 * Number of bytes that were dumped into fnic_dbgfs_t 724 */ 725 726 int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) 727 { 728 int rd_idx, wr_idx; 729 unsigned long flags; 730 int len = 0, j; 731 struct fc_trace_hdr *tdata; 732 char *fc_trace; 733 734 spin_lock_irqsave(&fnic_fc_trace_lock, flags); 735 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { 736 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 737 pr_info("fnic: Buffer is empty\n"); 738 return 0; 739 } 740 rd_idx = fc_trace_entries.rd_idx; 741 wr_idx = fc_trace_entries.wr_idx; 742 if (rdata_flag == 0) { 743 len += snprintf(fnic_dbgfs_prt->buffer + len, 744 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 745 "Time Stamp (UTC)\t\t" 746 "Host No: F Type: len: FCoE_FRAME:\n"); 747 } 748 749 while (rd_idx != wr_idx) { 750 tdata = (struct fc_trace_hdr *) 751 fc_trace_entries.page_offset[rd_idx]; 752 if (!tdata) { 753 pr_info("fnic: Rd data is NULL\n"); 754 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 755 return 0; 756 } 757 if (rdata_flag == 0) { 758 copy_and_format_trace_data(tdata, 759 fnic_dbgfs_prt, &len, rdata_flag); 760 } else { 761 fc_trace = (char *)tdata; 762 for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { 763 len += snprintf(fnic_dbgfs_prt->buffer + len, 764 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) 765 - len, "%02x", fc_trace[j] & 0xff); 766 } /* for loop */ 767 len += snprintf(fnic_dbgfs_prt->buffer + len, 768 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 769 "\n"); 770 } 771 rd_idx++; 772 if (rd_idx > (fc_trace_max_entries - 1)) 773 rd_idx = 0; 774 } 775 776 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 777 return len; 778 } 779 780 /* 781 * copy_and_format_trace_data: Copy formatted data to char * buffer 782 * Passed Parameter: 783 * @fc_trace_hdr_t: pointer to trace data 784 * @fnic_dbgfs_t: pointer to debugfs trace buffer 785 * @orig_len: pointer to len 786 * rdata_flag: 0 => Formated file, 1 => Unformated file 787 * Description: 788 * This routine will format and copy the passed trace data 789 * for formated file or unformated file accordingly. 790 */ 791 792 void copy_and_format_trace_data(struct fc_trace_hdr *tdata, 793 fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, 794 u8 rdata_flag) 795 { 796 struct tm tm; 797 int j, i = 1, len; 798 char *fc_trace, *fmt; 799 int ethhdr_len = sizeof(struct ethhdr) - 1; 800 int fcoehdr_len = sizeof(struct fcoe_hdr); 801 int fchdr_len = sizeof(struct fc_frame_header); 802 int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; 803 804 tdata->frame_type = tdata->frame_type & 0x7F; 805 806 len = *orig_len; 807 808 time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm); 809 810 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; 811 len += snprintf(fnic_dbgfs_prt->buffer + len, 812 max_size - len, 813 fmt, 814 tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, 815 tm.tm_hour, tm.tm_min, tm.tm_sec, 816 tdata->time_stamp.tv_nsec, tdata->host_no, 817 tdata->frame_type, tdata->frame_len); 818 819 fc_trace = (char *)FC_TRACE_ADDRESS(tdata); 820 821 for (j = 0; j < min_t(u8, tdata->frame_len, 822 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { 823 if (tdata->frame_type == FNIC_FC_LE) { 824 len += snprintf(fnic_dbgfs_prt->buffer + len, 825 max_size - len, "%c", fc_trace[j]); 826 } else { 827 len += snprintf(fnic_dbgfs_prt->buffer + len, 828 max_size - len, "%02x", fc_trace[j] & 0xff); 829 len += snprintf(fnic_dbgfs_prt->buffer + len, 830 max_size - len, " "); 831 if (j == ethhdr_len || 832 j == ethhdr_len + fcoehdr_len || 833 j == ethhdr_len + fcoehdr_len + fchdr_len || 834 (i > 3 && j%fchdr_len == 0)) { 835 len += snprintf(fnic_dbgfs_prt->buffer 836 + len, max_size - len, 837 "\n\t\t\t\t\t\t\t\t"); 838 i++; 839 } 840 } /* end of else*/ 841 } /* End of for loop*/ 842 len += snprintf(fnic_dbgfs_prt->buffer + len, 843 max_size - len, "\n"); 844 *orig_len = len; 845 } 846