1 /* 2 * Copyright 2012 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mempool.h> 20 #include <linux/errno.h> 21 #include <linux/spinlock.h> 22 #include <linux/kallsyms.h> 23 #include <linux/time.h> 24 #include <linux/vmalloc.h> 25 #include "fnic_io.h" 26 #include "fnic.h" 27 28 unsigned int trace_max_pages; 29 static int fnic_max_trace_entries; 30 31 static unsigned long fnic_trace_buf_p; 32 static DEFINE_SPINLOCK(fnic_trace_lock); 33 34 static fnic_trace_dbg_t fnic_trace_entries; 35 int fnic_tracing_enabled = 1; 36 37 /* static char *fnic_fc_ctlr_trace_buf_p; */ 38 39 static int fc_trace_max_entries; 40 static unsigned long fnic_fc_ctlr_trace_buf_p; 41 static fnic_trace_dbg_t fc_trace_entries; 42 int fnic_fc_tracing_enabled = 1; 43 int fnic_fc_trace_cleared = 1; 44 static DEFINE_SPINLOCK(fnic_fc_trace_lock); 45 46 47 /* 48 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information 49 * 50 * Description: 51 * This routine gets next available trace buffer entry location @wr_idx 52 * from allocated trace buffer pages and give that memory location 53 * to user to store the trace information. 54 * 55 * Return Value: 56 * This routine returns pointer to next available trace entry 57 * @fnic_buf_head for user to fill trace information. 58 */ 59 fnic_trace_data_t *fnic_trace_get_buf(void) 60 { 61 unsigned long fnic_buf_head; 62 unsigned long flags; 63 64 spin_lock_irqsave(&fnic_trace_lock, flags); 65 66 /* 67 * Get next available memory location for writing trace information 68 * at @wr_idx and increment @wr_idx 69 */ 70 fnic_buf_head = 71 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; 72 fnic_trace_entries.wr_idx++; 73 74 /* 75 * Verify if trace buffer is full then change wd_idx to 76 * start from zero 77 */ 78 if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries) 79 fnic_trace_entries.wr_idx = 0; 80 81 /* 82 * Verify if write index @wr_idx and read index @rd_idx are same then 83 * increment @rd_idx to move to next entry in trace buffer 84 */ 85 if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) { 86 fnic_trace_entries.rd_idx++; 87 if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries) 88 fnic_trace_entries.rd_idx = 0; 89 } 90 spin_unlock_irqrestore(&fnic_trace_lock, flags); 91 return (fnic_trace_data_t *)fnic_buf_head; 92 } 93 94 /* 95 * fnic_get_trace_data - Copy trace buffer to a memory file 96 * @fnic_dbgfs_t: pointer to debugfs trace buffer 97 * 98 * Description: 99 * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t 100 * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in 101 * the log and process the log until the end of the buffer. Then it will gather 102 * from the beginning of the log and process until the current entry @wr_idx. 103 * 104 * Return Value: 105 * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t 106 */ 107 int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) 108 { 109 int rd_idx; 110 int wr_idx; 111 int len = 0; 112 unsigned long flags; 113 char str[KSYM_SYMBOL_LEN]; 114 struct timespec64 val; 115 fnic_trace_data_t *tbp; 116 117 spin_lock_irqsave(&fnic_trace_lock, flags); 118 rd_idx = fnic_trace_entries.rd_idx; 119 wr_idx = fnic_trace_entries.wr_idx; 120 if (wr_idx < rd_idx) { 121 while (1) { 122 /* Start from read index @rd_idx */ 123 tbp = (fnic_trace_data_t *) 124 fnic_trace_entries.page_offset[rd_idx]; 125 if (!tbp) { 126 spin_unlock_irqrestore(&fnic_trace_lock, flags); 127 return 0; 128 } 129 /* Convert function pointer to function name */ 130 if (sizeof(unsigned long) < 8) { 131 sprint_symbol(str, tbp->fnaddr.low); 132 jiffies_to_timespec64(tbp->timestamp.low, &val); 133 } else { 134 sprint_symbol(str, tbp->fnaddr.val); 135 jiffies_to_timespec64(tbp->timestamp.val, &val); 136 } 137 /* 138 * Dump trace buffer entry to memory file 139 * and increment read index @rd_idx 140 */ 141 len += scnprintf(fnic_dbgfs_prt->buffer + len, 142 (trace_max_pages * PAGE_SIZE * 3) - len, 143 "%16llu.%09lu %-50s %8x %8x %16llx %16llx " 144 "%16llx %16llx %16llx\n", (u64)val.tv_sec, 145 val.tv_nsec, str, tbp->host_no, tbp->tag, 146 tbp->data[0], tbp->data[1], tbp->data[2], 147 tbp->data[3], tbp->data[4]); 148 rd_idx++; 149 /* 150 * If rd_idx is reached to maximum trace entries 151 * then move rd_idx to zero 152 */ 153 if (rd_idx > (fnic_max_trace_entries-1)) 154 rd_idx = 0; 155 /* 156 * Continure dumpping trace buffer entries into 157 * memory file till rd_idx reaches write index 158 */ 159 if (rd_idx == wr_idx) 160 break; 161 } 162 } else if (wr_idx > rd_idx) { 163 while (1) { 164 /* Start from read index @rd_idx */ 165 tbp = (fnic_trace_data_t *) 166 fnic_trace_entries.page_offset[rd_idx]; 167 if (!tbp) { 168 spin_unlock_irqrestore(&fnic_trace_lock, flags); 169 return 0; 170 } 171 /* Convert function pointer to function name */ 172 if (sizeof(unsigned long) < 8) { 173 sprint_symbol(str, tbp->fnaddr.low); 174 jiffies_to_timespec64(tbp->timestamp.low, &val); 175 } else { 176 sprint_symbol(str, tbp->fnaddr.val); 177 jiffies_to_timespec64(tbp->timestamp.val, &val); 178 } 179 /* 180 * Dump trace buffer entry to memory file 181 * and increment read index @rd_idx 182 */ 183 len += scnprintf(fnic_dbgfs_prt->buffer + len, 184 (trace_max_pages * PAGE_SIZE * 3) - len, 185 "%16llu.%09lu %-50s %8x %8x %16llx %16llx " 186 "%16llx %16llx %16llx\n", (u64)val.tv_sec, 187 val.tv_nsec, str, tbp->host_no, tbp->tag, 188 tbp->data[0], tbp->data[1], tbp->data[2], 189 tbp->data[3], tbp->data[4]); 190 rd_idx++; 191 /* 192 * Continue dumpping trace buffer entries into 193 * memory file till rd_idx reaches write index 194 */ 195 if (rd_idx == wr_idx) 196 break; 197 } 198 } 199 spin_unlock_irqrestore(&fnic_trace_lock, flags); 200 return len; 201 } 202 203 /* 204 * fnic_get_stats_data - Copy fnic stats buffer to a memory file 205 * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer 206 * 207 * Description: 208 * This routine gathers the fnic stats debugfs data from the fnic_stats struct 209 * and dumps it to stats_debug_info. 210 * 211 * Return Value: 212 * This routine returns the amount of bytes that were dumped into 213 * stats_debug_info 214 */ 215 int fnic_get_stats_data(struct stats_debug_info *debug, 216 struct fnic_stats *stats) 217 { 218 int len = 0; 219 int buf_size = debug->buf_size; 220 struct timespec64 val1, val2; 221 222 ktime_get_real_ts64(&val1); 223 len = scnprintf(debug->debug_buffer + len, buf_size - len, 224 "------------------------------------------\n" 225 "\t\tTime\n" 226 "------------------------------------------\n"); 227 228 len += scnprintf(debug->debug_buffer + len, buf_size - len, 229 "Current time : [%lld:%ld]\n" 230 "Last stats reset time: [%lld:%09ld]\n" 231 "Last stats read time: [%lld:%ld]\n" 232 "delta since last reset: [%lld:%ld]\n" 233 "delta since last read: [%lld:%ld]\n", 234 (s64)val1.tv_sec, val1.tv_nsec, 235 (s64)stats->stats_timestamps.last_reset_time.tv_sec, 236 stats->stats_timestamps.last_reset_time.tv_nsec, 237 (s64)stats->stats_timestamps.last_read_time.tv_sec, 238 stats->stats_timestamps.last_read_time.tv_nsec, 239 (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, 240 timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, 241 (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, 242 timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); 243 244 stats->stats_timestamps.last_read_time = val1; 245 246 len += scnprintf(debug->debug_buffer + len, buf_size - len, 247 "------------------------------------------\n" 248 "\t\tIO Statistics\n" 249 "------------------------------------------\n"); 250 len += scnprintf(debug->debug_buffer + len, buf_size - len, 251 "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" 252 "Number of IOs: %lld\nNumber of IO Completions: %lld\n" 253 "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" 254 "Number of Memory alloc Failures: %lld\n" 255 "Number of IOREQ Null: %lld\n" 256 "Number of SCSI cmd pointer Null: %lld\n" 257 258 "\nIO completion times: \n" 259 " < 10 ms : %lld\n" 260 " 10 ms - 100 ms : %lld\n" 261 " 100 ms - 500 ms : %lld\n" 262 " 500 ms - 5 sec: %lld\n" 263 " 5 sec - 10 sec: %lld\n" 264 " 10 sec - 30 sec: %lld\n" 265 " > 30 sec: %lld\n", 266 (u64)atomic64_read(&stats->io_stats.active_ios), 267 (u64)atomic64_read(&stats->io_stats.max_active_ios), 268 (u64)atomic64_read(&stats->io_stats.num_ios), 269 (u64)atomic64_read(&stats->io_stats.io_completions), 270 (u64)atomic64_read(&stats->io_stats.io_failures), 271 (u64)atomic64_read(&stats->io_stats.io_not_found), 272 (u64)atomic64_read(&stats->io_stats.alloc_failures), 273 (u64)atomic64_read(&stats->io_stats.ioreq_null), 274 (u64)atomic64_read(&stats->io_stats.sc_null), 275 (u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec), 276 (u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec), 277 (u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec), 278 (u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec), 279 (u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec), 280 (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec), 281 (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec)); 282 283 len += scnprintf(debug->debug_buffer + len, buf_size - len, 284 "\nCurrent Max IO time : %lld\n", 285 (u64)atomic64_read(&stats->io_stats.current_max_io_time)); 286 287 len += scnprintf(debug->debug_buffer + len, buf_size - len, 288 "\n------------------------------------------\n" 289 "\t\tAbort Statistics\n" 290 "------------------------------------------\n"); 291 292 len += scnprintf(debug->debug_buffer + len, buf_size - len, 293 "Number of Aborts: %lld\n" 294 "Number of Abort Failures: %lld\n" 295 "Number of Abort Driver Timeouts: %lld\n" 296 "Number of Abort FW Timeouts: %lld\n" 297 "Number of Abort IO NOT Found: %lld\n" 298 299 "Abort issued times: \n" 300 " < 6 sec : %lld\n" 301 " 6 sec - 20 sec : %lld\n" 302 " 20 sec - 30 sec : %lld\n" 303 " 30 sec - 40 sec : %lld\n" 304 " 40 sec - 50 sec : %lld\n" 305 " 50 sec - 60 sec : %lld\n" 306 " > 60 sec: %lld\n", 307 308 (u64)atomic64_read(&stats->abts_stats.aborts), 309 (u64)atomic64_read(&stats->abts_stats.abort_failures), 310 (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), 311 (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), 312 (u64)atomic64_read(&stats->abts_stats.abort_io_not_found), 313 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec), 314 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec), 315 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec), 316 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec), 317 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec), 318 (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec), 319 (u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec)); 320 321 len += scnprintf(debug->debug_buffer + len, buf_size - len, 322 "\n------------------------------------------\n" 323 "\t\tTerminate Statistics\n" 324 "------------------------------------------\n"); 325 326 len += scnprintf(debug->debug_buffer + len, buf_size - len, 327 "Number of Terminates: %lld\n" 328 "Maximum Terminates: %lld\n" 329 "Number of Terminate Driver Timeouts: %lld\n" 330 "Number of Terminate FW Timeouts: %lld\n" 331 "Number of Terminate IO NOT Found: %lld\n" 332 "Number of Terminate Failures: %lld\n", 333 (u64)atomic64_read(&stats->term_stats.terminates), 334 (u64)atomic64_read(&stats->term_stats.max_terminates), 335 (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), 336 (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), 337 (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), 338 (u64)atomic64_read(&stats->term_stats.terminate_failures)); 339 340 len += scnprintf(debug->debug_buffer + len, buf_size - len, 341 "\n------------------------------------------\n" 342 "\t\tReset Statistics\n" 343 "------------------------------------------\n"); 344 345 len += scnprintf(debug->debug_buffer + len, buf_size - len, 346 "Number of Device Resets: %lld\n" 347 "Number of Device Reset Failures: %lld\n" 348 "Number of Device Reset Aborts: %lld\n" 349 "Number of Device Reset Timeouts: %lld\n" 350 "Number of Device Reset Terminates: %lld\n" 351 "Number of FW Resets: %lld\n" 352 "Number of FW Reset Completions: %lld\n" 353 "Number of FW Reset Failures: %lld\n" 354 "Number of Fnic Reset: %lld\n" 355 "Number of Fnic Reset Completions: %lld\n" 356 "Number of Fnic Reset Failures: %lld\n", 357 (u64)atomic64_read(&stats->reset_stats.device_resets), 358 (u64)atomic64_read(&stats->reset_stats.device_reset_failures), 359 (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), 360 (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), 361 (u64)atomic64_read( 362 &stats->reset_stats.device_reset_terminates), 363 (u64)atomic64_read(&stats->reset_stats.fw_resets), 364 (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), 365 (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), 366 (u64)atomic64_read(&stats->reset_stats.fnic_resets), 367 (u64)atomic64_read( 368 &stats->reset_stats.fnic_reset_completions), 369 (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); 370 371 len += scnprintf(debug->debug_buffer + len, buf_size - len, 372 "\n------------------------------------------\n" 373 "\t\tFirmware Statistics\n" 374 "------------------------------------------\n"); 375 376 len += scnprintf(debug->debug_buffer + len, buf_size - len, 377 "Number of Active FW Requests %lld\n" 378 "Maximum FW Requests: %lld\n" 379 "Number of FW out of resources: %lld\n" 380 "Number of FW IO errors: %lld\n", 381 (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), 382 (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), 383 (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), 384 (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); 385 386 len += scnprintf(debug->debug_buffer + len, buf_size - len, 387 "\n------------------------------------------\n" 388 "\t\tVlan Discovery Statistics\n" 389 "------------------------------------------\n"); 390 391 len += scnprintf(debug->debug_buffer + len, buf_size - len, 392 "Number of Vlan Discovery Requests Sent %lld\n" 393 "Vlan Response Received with no FCF VLAN ID: %lld\n" 394 "No solicitations recvd after vlan set, expiry count: %lld\n" 395 "Flogi rejects count: %lld\n", 396 (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), 397 (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), 398 (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), 399 (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); 400 401 len += scnprintf(debug->debug_buffer + len, buf_size - len, 402 "\n------------------------------------------\n" 403 "\t\tOther Important Statistics\n" 404 "------------------------------------------\n"); 405 406 jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1); 407 jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2); 408 409 len += scnprintf(debug->debug_buffer + len, buf_size - len, 410 "Last ISR time: %llu (%8llu.%09lu)\n" 411 "Last ACK time: %llu (%8llu.%09lu)\n" 412 "Max ISR jiffies: %llu\n" 413 "Max ISR time (ms) (0 denotes < 1 ms): %llu\n" 414 "Corr. work done: %llu\n" 415 "Number of ISRs: %lld\n" 416 "Maximum CQ Entries: %lld\n" 417 "Number of ACK index out of range: %lld\n" 418 "Number of data count mismatch: %lld\n" 419 "Number of FCPIO Timeouts: %lld\n" 420 "Number of FCPIO Aborted: %lld\n" 421 "Number of SGL Invalid: %lld\n" 422 "Number of Copy WQ Alloc Failures for ABTs: %lld\n" 423 "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" 424 "Number of Copy WQ Alloc Failures for IOs: %lld\n" 425 "Number of no icmnd itmf Completions: %lld\n" 426 "Number of Check Conditions encountered: %lld\n" 427 "Number of QUEUE Fulls: %lld\n" 428 "Number of rport not ready: %lld\n" 429 "Number of receive frame errors: %lld\n", 430 (u64)stats->misc_stats.last_isr_time, 431 (s64)val1.tv_sec, val1.tv_nsec, 432 (u64)stats->misc_stats.last_ack_time, 433 (s64)val2.tv_sec, val2.tv_nsec, 434 (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies), 435 (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms), 436 (u64)atomic64_read(&stats->misc_stats.corr_work_done), 437 (u64)atomic64_read(&stats->misc_stats.isr_count), 438 (u64)atomic64_read(&stats->misc_stats.max_cq_entries), 439 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), 440 (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), 441 (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), 442 (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), 443 (u64)atomic64_read(&stats->misc_stats.sgl_invalid), 444 (u64)atomic64_read( 445 &stats->misc_stats.abts_cpwq_alloc_failures), 446 (u64)atomic64_read( 447 &stats->misc_stats.devrst_cpwq_alloc_failures), 448 (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), 449 (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), 450 (u64)atomic64_read(&stats->misc_stats.check_condition), 451 (u64)atomic64_read(&stats->misc_stats.queue_fulls), 452 (u64)atomic64_read(&stats->misc_stats.rport_not_ready), 453 (u64)atomic64_read(&stats->misc_stats.frame_errors)); 454 455 len += scnprintf(debug->debug_buffer + len, buf_size - len, 456 "Firmware reported port speed: %llu\n", 457 (u64)atomic64_read( 458 &stats->misc_stats.current_port_speed)); 459 460 return len; 461 462 } 463 464 /* 465 * fnic_trace_buf_init - Initialize fnic trace buffer logging facility 466 * 467 * Description: 468 * Initialize trace buffer data structure by allocating required memory and 469 * setting page_offset information for every trace entry by adding trace entry 470 * length to previous page_offset value. 471 */ 472 int fnic_trace_buf_init(void) 473 { 474 unsigned long fnic_buf_head; 475 int i; 476 int err = 0; 477 478 trace_max_pages = fnic_trace_max_pages; 479 fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ 480 FNIC_ENTRY_SIZE_BYTES; 481 482 fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE); 483 if (!fnic_trace_buf_p) { 484 printk(KERN_ERR PFX "Failed to allocate memory " 485 "for fnic_trace_buf_p\n"); 486 err = -ENOMEM; 487 goto err_fnic_trace_buf_init; 488 } 489 490 fnic_trace_entries.page_offset = 491 vmalloc(array_size(fnic_max_trace_entries, 492 sizeof(unsigned long))); 493 if (!fnic_trace_entries.page_offset) { 494 printk(KERN_ERR PFX "Failed to allocate memory for" 495 " page_offset\n"); 496 if (fnic_trace_buf_p) { 497 vfree((void *)fnic_trace_buf_p); 498 fnic_trace_buf_p = 0; 499 } 500 err = -ENOMEM; 501 goto err_fnic_trace_buf_init; 502 } 503 memset((void *)fnic_trace_entries.page_offset, 0, 504 (fnic_max_trace_entries * sizeof(unsigned long))); 505 fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; 506 fnic_buf_head = fnic_trace_buf_p; 507 508 /* 509 * Set page_offset field of fnic_trace_entries struct by 510 * calculating memory location for every trace entry using 511 * length of each trace entry 512 */ 513 for (i = 0; i < fnic_max_trace_entries; i++) { 514 fnic_trace_entries.page_offset[i] = fnic_buf_head; 515 fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; 516 } 517 fnic_trace_debugfs_init(); 518 pr_info("fnic: Successfully Initialized Trace Buffer\n"); 519 return err; 520 521 err_fnic_trace_buf_init: 522 return err; 523 } 524 525 /* 526 * fnic_trace_free - Free memory of fnic trace data structures. 527 */ 528 void fnic_trace_free(void) 529 { 530 fnic_tracing_enabled = 0; 531 fnic_trace_debugfs_terminate(); 532 if (fnic_trace_entries.page_offset) { 533 vfree((void *)fnic_trace_entries.page_offset); 534 fnic_trace_entries.page_offset = NULL; 535 } 536 if (fnic_trace_buf_p) { 537 vfree((void *)fnic_trace_buf_p); 538 fnic_trace_buf_p = 0; 539 } 540 printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); 541 } 542 543 /* 544 * fnic_fc_ctlr_trace_buf_init - 545 * Initialize trace buffer to log fnic control frames 546 * Description: 547 * Initialize trace buffer data structure by allocating 548 * required memory for trace data as well as for Indexes. 549 * Frame size is 256 bytes and 550 * memory is allocated for 1024 entries of 256 bytes. 551 * Page_offset(Index) is set to the address of trace entry 552 * and page_offset is initialized by adding frame size 553 * to the previous page_offset entry. 554 */ 555 556 int fnic_fc_trace_init(void) 557 { 558 unsigned long fc_trace_buf_head; 559 int err = 0; 560 int i; 561 562 fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ 563 FC_TRC_SIZE_BYTES; 564 fnic_fc_ctlr_trace_buf_p = 565 (unsigned long)vmalloc(array_size(PAGE_SIZE, 566 fnic_fc_trace_max_pages)); 567 if (!fnic_fc_ctlr_trace_buf_p) { 568 pr_err("fnic: Failed to allocate memory for " 569 "FC Control Trace Buf\n"); 570 err = -ENOMEM; 571 goto err_fnic_fc_ctlr_trace_buf_init; 572 } 573 574 memset((void *)fnic_fc_ctlr_trace_buf_p, 0, 575 fnic_fc_trace_max_pages * PAGE_SIZE); 576 577 /* Allocate memory for page offset */ 578 fc_trace_entries.page_offset = 579 vmalloc(array_size(fc_trace_max_entries, 580 sizeof(unsigned long))); 581 if (!fc_trace_entries.page_offset) { 582 pr_err("fnic:Failed to allocate memory for page_offset\n"); 583 if (fnic_fc_ctlr_trace_buf_p) { 584 pr_err("fnic: Freeing FC Control Trace Buf\n"); 585 vfree((void *)fnic_fc_ctlr_trace_buf_p); 586 fnic_fc_ctlr_trace_buf_p = 0; 587 } 588 err = -ENOMEM; 589 goto err_fnic_fc_ctlr_trace_buf_init; 590 } 591 memset((void *)fc_trace_entries.page_offset, 0, 592 (fc_trace_max_entries * sizeof(unsigned long))); 593 594 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; 595 fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; 596 597 /* 598 * Set up fc_trace_entries.page_offset field with memory location 599 * for every trace entry 600 */ 601 for (i = 0; i < fc_trace_max_entries; i++) { 602 fc_trace_entries.page_offset[i] = fc_trace_buf_head; 603 fc_trace_buf_head += FC_TRC_SIZE_BYTES; 604 } 605 fnic_fc_trace_debugfs_init(); 606 pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); 607 return err; 608 609 err_fnic_fc_ctlr_trace_buf_init: 610 return err; 611 } 612 613 /* 614 * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. 615 */ 616 void fnic_fc_trace_free(void) 617 { 618 fnic_fc_tracing_enabled = 0; 619 fnic_fc_trace_debugfs_terminate(); 620 if (fc_trace_entries.page_offset) { 621 vfree((void *)fc_trace_entries.page_offset); 622 fc_trace_entries.page_offset = NULL; 623 } 624 if (fnic_fc_ctlr_trace_buf_p) { 625 vfree((void *)fnic_fc_ctlr_trace_buf_p); 626 fnic_fc_ctlr_trace_buf_p = 0; 627 } 628 pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); 629 } 630 631 /* 632 * fnic_fc_ctlr_set_trace_data: 633 * Maintain rd & wr idx accordingly and set data 634 * Passed parameters: 635 * host_no: host number accociated with fnic 636 * frame_type: send_frame, rece_frame or link event 637 * fc_frame: pointer to fc_frame 638 * frame_len: Length of the fc_frame 639 * Description: 640 * This routine will get next available wr_idx and 641 * copy all passed trace data to the buffer pointed by wr_idx 642 * and increment wr_idx. It will also make sure that we dont 643 * overwrite the entry which we are reading and also 644 * wrap around if we reach the maximum entries. 645 * Returned Value: 646 * It will return 0 for success or -1 for failure 647 */ 648 int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, 649 char *frame, u32 fc_trc_frame_len) 650 { 651 unsigned long flags; 652 struct fc_trace_hdr *fc_buf; 653 unsigned long eth_fcoe_hdr_len; 654 char *fc_trace; 655 656 if (fnic_fc_tracing_enabled == 0) 657 return 0; 658 659 spin_lock_irqsave(&fnic_fc_trace_lock, flags); 660 661 if (fnic_fc_trace_cleared == 1) { 662 fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; 663 pr_info("fnic: Resetting the read idx\n"); 664 memset((void *)fnic_fc_ctlr_trace_buf_p, 0, 665 fnic_fc_trace_max_pages * PAGE_SIZE); 666 fnic_fc_trace_cleared = 0; 667 } 668 669 fc_buf = (struct fc_trace_hdr *) 670 fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; 671 672 fc_trace_entries.wr_idx++; 673 674 if (fc_trace_entries.wr_idx >= fc_trace_max_entries) 675 fc_trace_entries.wr_idx = 0; 676 677 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { 678 fc_trace_entries.rd_idx++; 679 if (fc_trace_entries.rd_idx >= fc_trace_max_entries) 680 fc_trace_entries.rd_idx = 0; 681 } 682 683 ktime_get_real_ts64(&fc_buf->time_stamp); 684 fc_buf->host_no = host_no; 685 fc_buf->frame_type = frame_type; 686 687 fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); 688 689 /* During the receive path, we do not have eth hdr as well as fcoe hdr 690 * at trace entry point so we will stuff 0xff just to make it generic. 691 */ 692 if (frame_type == FNIC_FC_RECV) { 693 eth_fcoe_hdr_len = sizeof(struct ethhdr) + 694 sizeof(struct fcoe_hdr); 695 memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); 696 /* Copy the rest of data frame */ 697 memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, 698 min_t(u8, fc_trc_frame_len, 699 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE 700 - eth_fcoe_hdr_len))); 701 } else { 702 memcpy((char *)fc_trace, (void *)frame, 703 min_t(u8, fc_trc_frame_len, 704 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); 705 } 706 707 /* Store the actual received length */ 708 fc_buf->frame_len = fc_trc_frame_len; 709 710 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 711 return 0; 712 } 713 714 /* 715 * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file 716 * Passed parameter: 717 * @fnic_dbgfs_t: pointer to debugfs trace buffer 718 * rdata_flag: 1 => Unformated file 719 * 0 => formated file 720 * Description: 721 * This routine will copy the trace data to memory file with 722 * proper formatting and also copy to another memory 723 * file without formatting for further procesing. 724 * Retrun Value: 725 * Number of bytes that were dumped into fnic_dbgfs_t 726 */ 727 728 int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) 729 { 730 int rd_idx, wr_idx; 731 unsigned long flags; 732 int len = 0, j; 733 struct fc_trace_hdr *tdata; 734 char *fc_trace; 735 736 spin_lock_irqsave(&fnic_fc_trace_lock, flags); 737 if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { 738 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 739 pr_info("fnic: Buffer is empty\n"); 740 return 0; 741 } 742 rd_idx = fc_trace_entries.rd_idx; 743 wr_idx = fc_trace_entries.wr_idx; 744 if (rdata_flag == 0) { 745 len += scnprintf(fnic_dbgfs_prt->buffer + len, 746 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 747 "Time Stamp (UTC)\t\t" 748 "Host No: F Type: len: FCoE_FRAME:\n"); 749 } 750 751 while (rd_idx != wr_idx) { 752 tdata = (struct fc_trace_hdr *) 753 fc_trace_entries.page_offset[rd_idx]; 754 if (!tdata) { 755 pr_info("fnic: Rd data is NULL\n"); 756 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 757 return 0; 758 } 759 if (rdata_flag == 0) { 760 copy_and_format_trace_data(tdata, 761 fnic_dbgfs_prt, &len, rdata_flag); 762 } else { 763 fc_trace = (char *)tdata; 764 for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { 765 len += scnprintf(fnic_dbgfs_prt->buffer + len, 766 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) 767 - len, "%02x", fc_trace[j] & 0xff); 768 } /* for loop */ 769 len += scnprintf(fnic_dbgfs_prt->buffer + len, 770 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 771 "\n"); 772 } 773 rd_idx++; 774 if (rd_idx > (fc_trace_max_entries - 1)) 775 rd_idx = 0; 776 } 777 778 spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); 779 return len; 780 } 781 782 /* 783 * copy_and_format_trace_data: Copy formatted data to char * buffer 784 * Passed Parameter: 785 * @fc_trace_hdr_t: pointer to trace data 786 * @fnic_dbgfs_t: pointer to debugfs trace buffer 787 * @orig_len: pointer to len 788 * rdata_flag: 0 => Formated file, 1 => Unformated file 789 * Description: 790 * This routine will format and copy the passed trace data 791 * for formated file or unformated file accordingly. 792 */ 793 794 void copy_and_format_trace_data(struct fc_trace_hdr *tdata, 795 fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, 796 u8 rdata_flag) 797 { 798 struct tm tm; 799 int j, i = 1, len; 800 char *fc_trace, *fmt; 801 int ethhdr_len = sizeof(struct ethhdr) - 1; 802 int fcoehdr_len = sizeof(struct fcoe_hdr); 803 int fchdr_len = sizeof(struct fc_frame_header); 804 int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; 805 806 tdata->frame_type = tdata->frame_type & 0x7F; 807 808 len = *orig_len; 809 810 time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm); 811 812 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; 813 len += scnprintf(fnic_dbgfs_prt->buffer + len, 814 max_size - len, 815 fmt, 816 tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, 817 tm.tm_hour, tm.tm_min, tm.tm_sec, 818 tdata->time_stamp.tv_nsec, tdata->host_no, 819 tdata->frame_type, tdata->frame_len); 820 821 fc_trace = (char *)FC_TRACE_ADDRESS(tdata); 822 823 for (j = 0; j < min_t(u8, tdata->frame_len, 824 (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { 825 if (tdata->frame_type == FNIC_FC_LE) { 826 len += scnprintf(fnic_dbgfs_prt->buffer + len, 827 max_size - len, "%c", fc_trace[j]); 828 } else { 829 len += scnprintf(fnic_dbgfs_prt->buffer + len, 830 max_size - len, "%02x", fc_trace[j] & 0xff); 831 len += scnprintf(fnic_dbgfs_prt->buffer + len, 832 max_size - len, " "); 833 if (j == ethhdr_len || 834 j == ethhdr_len + fcoehdr_len || 835 j == ethhdr_len + fcoehdr_len + fchdr_len || 836 (i > 3 && j%fchdr_len == 0)) { 837 len += scnprintf(fnic_dbgfs_prt->buffer 838 + len, max_size - len, 839 "\n\t\t\t\t\t\t\t\t"); 840 i++; 841 } 842 } /* end of else*/ 843 } /* End of for loop*/ 844 len += scnprintf(fnic_dbgfs_prt->buffer + len, 845 max_size - len, "\n"); 846 *orig_len = len; 847 } 848