1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017, 2018 Oracle. All rights reserved. 4 * 5 * Trace point definitions for the "rpcrdma" subsystem. 6 */ 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM rpcrdma 9 10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_RPCRDMA_H 12 13 #include <linux/scatterlist.h> 14 #include <linux/sunrpc/rpc_rdma_cid.h> 15 #include <linux/tracepoint.h> 16 #include <rdma/ib_cm.h> 17 #include <trace/events/rdma.h> 18 19 /** 20 ** Event classes 21 **/ 22 23 DECLARE_EVENT_CLASS(rpcrdma_completion_class, 24 TP_PROTO( 25 const struct ib_wc *wc, 26 const struct rpc_rdma_cid *cid 27 ), 28 29 TP_ARGS(wc, cid), 30 31 TP_STRUCT__entry( 32 __field(u32, cq_id) 33 __field(int, completion_id) 34 __field(unsigned long, status) 35 __field(unsigned int, vendor_err) 36 ), 37 38 TP_fast_assign( 39 __entry->cq_id = cid->ci_queue_id; 40 __entry->completion_id = cid->ci_completion_id; 41 __entry->status = wc->status; 42 if (wc->status) 43 __entry->vendor_err = wc->vendor_err; 44 else 45 __entry->vendor_err = 0; 46 ), 47 48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", 49 __entry->cq_id, __entry->completion_id, 50 rdma_show_wc_status(__entry->status), 51 __entry->status, __entry->vendor_err 52 ) 53 ); 54 55 #define DEFINE_COMPLETION_EVENT(name) \ 56 DEFINE_EVENT(rpcrdma_completion_class, name, \ 57 TP_PROTO( \ 58 const struct ib_wc *wc, \ 59 const struct rpc_rdma_cid *cid \ 60 ), \ 61 TP_ARGS(wc, cid)) 62 63 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class, 64 TP_PROTO( 65 const struct ib_wc *wc, 66 const struct rpc_rdma_cid *cid 67 ), 68 69 TP_ARGS(wc, cid), 70 71 TP_STRUCT__entry( 72 __field(u32, cq_id) 73 __field(int, completion_id) 74 __field(unsigned long, status) 75 __field(unsigned int, vendor_err) 76 ), 77 78 TP_fast_assign( 79 __entry->cq_id = cid->ci_queue_id; 80 __entry->completion_id = cid->ci_completion_id; 81 __entry->status = wc->status; 82 if (wc->status) 83 __entry->vendor_err = wc->vendor_err; 84 else 85 __entry->vendor_err = 0; 86 ), 87 88 TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)", 89 __entry->cq_id, __entry->completion_id, 90 rdma_show_wc_status(__entry->status), 91 __entry->status, __entry->vendor_err 92 ) 93 ); 94 95 #define DEFINE_MR_COMPLETION_EVENT(name) \ 96 DEFINE_EVENT(rpcrdma_mr_completion_class, name, \ 97 TP_PROTO( \ 98 const struct ib_wc *wc, \ 99 const struct rpc_rdma_cid *cid \ 100 ), \ 101 TP_ARGS(wc, cid)) 102 103 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, 104 TP_PROTO( 105 const struct ib_wc *wc, 106 const struct rpc_rdma_cid *cid 107 ), 108 109 TP_ARGS(wc, cid), 110 111 TP_STRUCT__entry( 112 __field(u32, cq_id) 113 __field(int, completion_id) 114 __field(u32, received) 115 __field(unsigned long, status) 116 __field(unsigned int, vendor_err) 117 ), 118 119 TP_fast_assign( 120 __entry->cq_id = cid->ci_queue_id; 121 __entry->completion_id = cid->ci_completion_id; 122 __entry->status = wc->status; 123 if (wc->status) { 124 __entry->received = 0; 125 __entry->vendor_err = wc->vendor_err; 126 } else { 127 __entry->received = wc->byte_len; 128 __entry->vendor_err = 0; 129 } 130 ), 131 132 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u", 133 __entry->cq_id, __entry->completion_id, 134 rdma_show_wc_status(__entry->status), 135 __entry->status, __entry->vendor_err, 136 __entry->received 137 ) 138 ); 139 140 #define DEFINE_RECEIVE_COMPLETION_EVENT(name) \ 141 DEFINE_EVENT(rpcrdma_receive_completion_class, name, \ 142 TP_PROTO( \ 143 const struct ib_wc *wc, \ 144 const struct rpc_rdma_cid *cid \ 145 ), \ 146 TP_ARGS(wc, cid)) 147 148 DECLARE_EVENT_CLASS(xprtrdma_reply_class, 149 TP_PROTO( 150 const struct rpcrdma_rep *rep 151 ), 152 153 TP_ARGS(rep), 154 155 TP_STRUCT__entry( 156 __field(u32, xid) 157 __field(u32, version) 158 __field(u32, proc) 159 __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) 160 __string(port, rpcrdma_portstr(rep->rr_rxprt)) 161 ), 162 163 TP_fast_assign( 164 __entry->xid = be32_to_cpu(rep->rr_xid); 165 __entry->version = be32_to_cpu(rep->rr_vers); 166 __entry->proc = be32_to_cpu(rep->rr_proc); 167 __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); 168 __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); 169 ), 170 171 TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", 172 __get_str(addr), __get_str(port), 173 __entry->xid, __entry->version, __entry->proc 174 ) 175 ); 176 177 #define DEFINE_REPLY_EVENT(name) \ 178 DEFINE_EVENT(xprtrdma_reply_class, \ 179 xprtrdma_reply_##name##_err, \ 180 TP_PROTO( \ 181 const struct rpcrdma_rep *rep \ 182 ), \ 183 TP_ARGS(rep)) 184 185 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 186 TP_PROTO( 187 const struct rpcrdma_xprt *r_xprt 188 ), 189 190 TP_ARGS(r_xprt), 191 192 TP_STRUCT__entry( 193 __string(addr, rpcrdma_addrstr(r_xprt)) 194 __string(port, rpcrdma_portstr(r_xprt)) 195 ), 196 197 TP_fast_assign( 198 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 199 __assign_str(port, rpcrdma_portstr(r_xprt)); 200 ), 201 202 TP_printk("peer=[%s]:%s", 203 __get_str(addr), __get_str(port) 204 ) 205 ); 206 207 #define DEFINE_RXPRT_EVENT(name) \ 208 DEFINE_EVENT(xprtrdma_rxprt, name, \ 209 TP_PROTO( \ 210 const struct rpcrdma_xprt *r_xprt \ 211 ), \ 212 TP_ARGS(r_xprt)) 213 214 DECLARE_EVENT_CLASS(xprtrdma_connect_class, 215 TP_PROTO( 216 const struct rpcrdma_xprt *r_xprt, 217 int rc 218 ), 219 220 TP_ARGS(r_xprt, rc), 221 222 TP_STRUCT__entry( 223 __field(int, rc) 224 __field(int, connect_status) 225 __string(addr, rpcrdma_addrstr(r_xprt)) 226 __string(port, rpcrdma_portstr(r_xprt)) 227 ), 228 229 TP_fast_assign( 230 __entry->rc = rc; 231 __entry->connect_status = r_xprt->rx_ep->re_connect_status; 232 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 233 __assign_str(port, rpcrdma_portstr(r_xprt)); 234 ), 235 236 TP_printk("peer=[%s]:%s rc=%d connection status=%d", 237 __get_str(addr), __get_str(port), 238 __entry->rc, __entry->connect_status 239 ) 240 ); 241 242 #define DEFINE_CONN_EVENT(name) \ 243 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ 244 TP_PROTO( \ 245 const struct rpcrdma_xprt *r_xprt, \ 246 int rc \ 247 ), \ 248 TP_ARGS(r_xprt, rc)) 249 250 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 251 TP_PROTO( 252 const struct rpc_task *task, 253 unsigned int pos, 254 struct rpcrdma_mr *mr, 255 int nsegs 256 ), 257 258 TP_ARGS(task, pos, mr, nsegs), 259 260 TP_STRUCT__entry( 261 __field(unsigned int, task_id) 262 __field(unsigned int, client_id) 263 __field(unsigned int, pos) 264 __field(int, nents) 265 __field(u32, handle) 266 __field(u32, length) 267 __field(u64, offset) 268 __field(int, nsegs) 269 ), 270 271 TP_fast_assign( 272 __entry->task_id = task->tk_pid; 273 __entry->client_id = task->tk_client->cl_clid; 274 __entry->pos = pos; 275 __entry->nents = mr->mr_nents; 276 __entry->handle = mr->mr_handle; 277 __entry->length = mr->mr_length; 278 __entry->offset = mr->mr_offset; 279 __entry->nsegs = nsegs; 280 ), 281 282 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", 283 __entry->task_id, __entry->client_id, 284 __entry->pos, __entry->length, 285 (unsigned long long)__entry->offset, __entry->handle, 286 __entry->nents < __entry->nsegs ? "more" : "last" 287 ) 288 ); 289 290 #define DEFINE_RDCH_EVENT(name) \ 291 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ 292 TP_PROTO( \ 293 const struct rpc_task *task, \ 294 unsigned int pos, \ 295 struct rpcrdma_mr *mr, \ 296 int nsegs \ 297 ), \ 298 TP_ARGS(task, pos, mr, nsegs)) 299 300 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 301 TP_PROTO( 302 const struct rpc_task *task, 303 struct rpcrdma_mr *mr, 304 int nsegs 305 ), 306 307 TP_ARGS(task, mr, nsegs), 308 309 TP_STRUCT__entry( 310 __field(unsigned int, task_id) 311 __field(unsigned int, client_id) 312 __field(int, nents) 313 __field(u32, handle) 314 __field(u32, length) 315 __field(u64, offset) 316 __field(int, nsegs) 317 ), 318 319 TP_fast_assign( 320 __entry->task_id = task->tk_pid; 321 __entry->client_id = task->tk_client->cl_clid; 322 __entry->nents = mr->mr_nents; 323 __entry->handle = mr->mr_handle; 324 __entry->length = mr->mr_length; 325 __entry->offset = mr->mr_offset; 326 __entry->nsegs = nsegs; 327 ), 328 329 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", 330 __entry->task_id, __entry->client_id, 331 __entry->length, (unsigned long long)__entry->offset, 332 __entry->handle, 333 __entry->nents < __entry->nsegs ? "more" : "last" 334 ) 335 ); 336 337 #define DEFINE_WRCH_EVENT(name) \ 338 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ 339 TP_PROTO( \ 340 const struct rpc_task *task, \ 341 struct rpcrdma_mr *mr, \ 342 int nsegs \ 343 ), \ 344 TP_ARGS(task, mr, nsegs)) 345 346 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); 347 TRACE_DEFINE_ENUM(DMA_TO_DEVICE); 348 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); 349 TRACE_DEFINE_ENUM(DMA_NONE); 350 351 #define xprtrdma_show_direction(x) \ 352 __print_symbolic(x, \ 353 { DMA_BIDIRECTIONAL, "BIDIR" }, \ 354 { DMA_TO_DEVICE, "TO_DEVICE" }, \ 355 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ 356 { DMA_NONE, "NONE" }) 357 358 DECLARE_EVENT_CLASS(xprtrdma_mr_class, 359 TP_PROTO( 360 const struct rpcrdma_mr *mr 361 ), 362 363 TP_ARGS(mr), 364 365 TP_STRUCT__entry( 366 __field(unsigned int, task_id) 367 __field(unsigned int, client_id) 368 __field(u32, mr_id) 369 __field(int, nents) 370 __field(u32, handle) 371 __field(u32, length) 372 __field(u64, offset) 373 __field(u32, dir) 374 ), 375 376 TP_fast_assign( 377 const struct rpcrdma_req *req = mr->mr_req; 378 const struct rpc_task *task = req->rl_slot.rq_task; 379 380 __entry->task_id = task->tk_pid; 381 __entry->client_id = task->tk_client->cl_clid; 382 __entry->mr_id = mr->mr_ibmr->res.id; 383 __entry->nents = mr->mr_nents; 384 __entry->handle = mr->mr_handle; 385 __entry->length = mr->mr_length; 386 __entry->offset = mr->mr_offset; 387 __entry->dir = mr->mr_dir; 388 ), 389 390 TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 391 __entry->task_id, __entry->client_id, 392 __entry->mr_id, __entry->nents, __entry->length, 393 (unsigned long long)__entry->offset, __entry->handle, 394 xprtrdma_show_direction(__entry->dir) 395 ) 396 ); 397 398 #define DEFINE_MR_EVENT(name) \ 399 DEFINE_EVENT(xprtrdma_mr_class, \ 400 xprtrdma_mr_##name, \ 401 TP_PROTO( \ 402 const struct rpcrdma_mr *mr \ 403 ), \ 404 TP_ARGS(mr)) 405 406 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class, 407 TP_PROTO( 408 const struct rpcrdma_mr *mr 409 ), 410 411 TP_ARGS(mr), 412 413 TP_STRUCT__entry( 414 __field(u32, mr_id) 415 __field(int, nents) 416 __field(u32, handle) 417 __field(u32, length) 418 __field(u64, offset) 419 __field(u32, dir) 420 ), 421 422 TP_fast_assign( 423 __entry->mr_id = mr->mr_ibmr->res.id; 424 __entry->nents = mr->mr_nents; 425 __entry->handle = mr->mr_handle; 426 __entry->length = mr->mr_length; 427 __entry->offset = mr->mr_offset; 428 __entry->dir = mr->mr_dir; 429 ), 430 431 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 432 __entry->mr_id, __entry->nents, __entry->length, 433 (unsigned long long)__entry->offset, __entry->handle, 434 xprtrdma_show_direction(__entry->dir) 435 ) 436 ); 437 438 #define DEFINE_ANON_MR_EVENT(name) \ 439 DEFINE_EVENT(xprtrdma_anonymous_mr_class, \ 440 xprtrdma_mr_##name, \ 441 TP_PROTO( \ 442 const struct rpcrdma_mr *mr \ 443 ), \ 444 TP_ARGS(mr)) 445 446 DECLARE_EVENT_CLASS(xprtrdma_callback_class, 447 TP_PROTO( 448 const struct rpcrdma_xprt *r_xprt, 449 const struct rpc_rqst *rqst 450 ), 451 452 TP_ARGS(r_xprt, rqst), 453 454 TP_STRUCT__entry( 455 __field(u32, xid) 456 __string(addr, rpcrdma_addrstr(r_xprt)) 457 __string(port, rpcrdma_portstr(r_xprt)) 458 ), 459 460 TP_fast_assign( 461 __entry->xid = be32_to_cpu(rqst->rq_xid); 462 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 463 __assign_str(port, rpcrdma_portstr(r_xprt)); 464 ), 465 466 TP_printk("peer=[%s]:%s xid=0x%08x", 467 __get_str(addr), __get_str(port), __entry->xid 468 ) 469 ); 470 471 #define DEFINE_CALLBACK_EVENT(name) \ 472 DEFINE_EVENT(xprtrdma_callback_class, \ 473 xprtrdma_cb_##name, \ 474 TP_PROTO( \ 475 const struct rpcrdma_xprt *r_xprt, \ 476 const struct rpc_rqst *rqst \ 477 ), \ 478 TP_ARGS(r_xprt, rqst)) 479 480 /** 481 ** Connection events 482 **/ 483 484 TRACE_EVENT(xprtrdma_inline_thresh, 485 TP_PROTO( 486 const struct rpcrdma_ep *ep 487 ), 488 489 TP_ARGS(ep), 490 491 TP_STRUCT__entry( 492 __field(unsigned int, inline_send) 493 __field(unsigned int, inline_recv) 494 __field(unsigned int, max_send) 495 __field(unsigned int, max_recv) 496 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 497 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 498 ), 499 500 TP_fast_assign( 501 const struct rdma_cm_id *id = ep->re_id; 502 503 __entry->inline_send = ep->re_inline_send; 504 __entry->inline_recv = ep->re_inline_recv; 505 __entry->max_send = ep->re_max_inline_send; 506 __entry->max_recv = ep->re_max_inline_recv; 507 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 508 sizeof(struct sockaddr_in6)); 509 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 510 sizeof(struct sockaddr_in6)); 511 ), 512 513 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 514 __entry->srcaddr, __entry->dstaddr, 515 __entry->inline_send, __entry->inline_recv, 516 __entry->max_send, __entry->max_recv 517 ) 518 ); 519 520 DEFINE_CONN_EVENT(connect); 521 DEFINE_CONN_EVENT(disconnect); 522 523 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 524 525 TRACE_EVENT(xprtrdma_op_connect, 526 TP_PROTO( 527 const struct rpcrdma_xprt *r_xprt, 528 unsigned long delay 529 ), 530 531 TP_ARGS(r_xprt, delay), 532 533 TP_STRUCT__entry( 534 __field(unsigned long, delay) 535 __string(addr, rpcrdma_addrstr(r_xprt)) 536 __string(port, rpcrdma_portstr(r_xprt)) 537 ), 538 539 TP_fast_assign( 540 __entry->delay = delay; 541 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 542 __assign_str(port, rpcrdma_portstr(r_xprt)); 543 ), 544 545 TP_printk("peer=[%s]:%s delay=%lu", 546 __get_str(addr), __get_str(port), __entry->delay 547 ) 548 ); 549 550 551 TRACE_EVENT(xprtrdma_op_set_cto, 552 TP_PROTO( 553 const struct rpcrdma_xprt *r_xprt, 554 unsigned long connect, 555 unsigned long reconnect 556 ), 557 558 TP_ARGS(r_xprt, connect, reconnect), 559 560 TP_STRUCT__entry( 561 __field(unsigned long, connect) 562 __field(unsigned long, reconnect) 563 __string(addr, rpcrdma_addrstr(r_xprt)) 564 __string(port, rpcrdma_portstr(r_xprt)) 565 ), 566 567 TP_fast_assign( 568 __entry->connect = connect; 569 __entry->reconnect = reconnect; 570 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 571 __assign_str(port, rpcrdma_portstr(r_xprt)); 572 ), 573 574 TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu", 575 __get_str(addr), __get_str(port), 576 __entry->connect / HZ, __entry->reconnect / HZ 577 ) 578 ); 579 580 /** 581 ** Call events 582 **/ 583 584 TRACE_EVENT(xprtrdma_createmrs, 585 TP_PROTO( 586 const struct rpcrdma_xprt *r_xprt, 587 unsigned int count 588 ), 589 590 TP_ARGS(r_xprt, count), 591 592 TP_STRUCT__entry( 593 __string(addr, rpcrdma_addrstr(r_xprt)) 594 __string(port, rpcrdma_portstr(r_xprt)) 595 __field(unsigned int, count) 596 ), 597 598 TP_fast_assign( 599 __entry->count = count; 600 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 601 __assign_str(port, rpcrdma_portstr(r_xprt)); 602 ), 603 604 TP_printk("peer=[%s]:%s created %u MRs", 605 __get_str(addr), __get_str(port), __entry->count 606 ) 607 ); 608 609 TRACE_EVENT(xprtrdma_nomrs_err, 610 TP_PROTO( 611 const struct rpcrdma_xprt *r_xprt, 612 const struct rpcrdma_req *req 613 ), 614 615 TP_ARGS(r_xprt, req), 616 617 TP_STRUCT__entry( 618 __field(unsigned int, task_id) 619 __field(unsigned int, client_id) 620 __string(addr, rpcrdma_addrstr(r_xprt)) 621 __string(port, rpcrdma_portstr(r_xprt)) 622 ), 623 624 TP_fast_assign( 625 const struct rpc_rqst *rqst = &req->rl_slot; 626 627 __entry->task_id = rqst->rq_task->tk_pid; 628 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 629 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 630 __assign_str(port, rpcrdma_portstr(r_xprt)); 631 ), 632 633 TP_printk("peer=[%s]:%s task:%u@%u", 634 __get_str(addr), __get_str(port), 635 __entry->task_id, __entry->client_id 636 ) 637 ); 638 639 DEFINE_RDCH_EVENT(read); 640 DEFINE_WRCH_EVENT(write); 641 DEFINE_WRCH_EVENT(reply); 642 643 TRACE_DEFINE_ENUM(rpcrdma_noch); 644 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); 645 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); 646 TRACE_DEFINE_ENUM(rpcrdma_readch); 647 TRACE_DEFINE_ENUM(rpcrdma_areadch); 648 TRACE_DEFINE_ENUM(rpcrdma_writech); 649 TRACE_DEFINE_ENUM(rpcrdma_replych); 650 651 #define xprtrdma_show_chunktype(x) \ 652 __print_symbolic(x, \ 653 { rpcrdma_noch, "inline" }, \ 654 { rpcrdma_noch_pullup, "pullup" }, \ 655 { rpcrdma_noch_mapped, "mapped" }, \ 656 { rpcrdma_readch, "read list" }, \ 657 { rpcrdma_areadch, "*read list" }, \ 658 { rpcrdma_writech, "write list" }, \ 659 { rpcrdma_replych, "reply chunk" }) 660 661 TRACE_EVENT(xprtrdma_marshal, 662 TP_PROTO( 663 const struct rpcrdma_req *req, 664 unsigned int rtype, 665 unsigned int wtype 666 ), 667 668 TP_ARGS(req, rtype, wtype), 669 670 TP_STRUCT__entry( 671 __field(unsigned int, task_id) 672 __field(unsigned int, client_id) 673 __field(u32, xid) 674 __field(unsigned int, hdrlen) 675 __field(unsigned int, headlen) 676 __field(unsigned int, pagelen) 677 __field(unsigned int, taillen) 678 __field(unsigned int, rtype) 679 __field(unsigned int, wtype) 680 ), 681 682 TP_fast_assign( 683 const struct rpc_rqst *rqst = &req->rl_slot; 684 685 __entry->task_id = rqst->rq_task->tk_pid; 686 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 687 __entry->xid = be32_to_cpu(rqst->rq_xid); 688 __entry->hdrlen = req->rl_hdrbuf.len; 689 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 690 __entry->pagelen = rqst->rq_snd_buf.page_len; 691 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 692 __entry->rtype = rtype; 693 __entry->wtype = wtype; 694 ), 695 696 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 697 __entry->task_id, __entry->client_id, __entry->xid, 698 __entry->hdrlen, 699 __entry->headlen, __entry->pagelen, __entry->taillen, 700 xprtrdma_show_chunktype(__entry->rtype), 701 xprtrdma_show_chunktype(__entry->wtype) 702 ) 703 ); 704 705 TRACE_EVENT(xprtrdma_marshal_failed, 706 TP_PROTO(const struct rpc_rqst *rqst, 707 int ret 708 ), 709 710 TP_ARGS(rqst, ret), 711 712 TP_STRUCT__entry( 713 __field(unsigned int, task_id) 714 __field(unsigned int, client_id) 715 __field(u32, xid) 716 __field(int, ret) 717 ), 718 719 TP_fast_assign( 720 __entry->task_id = rqst->rq_task->tk_pid; 721 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 722 __entry->xid = be32_to_cpu(rqst->rq_xid); 723 __entry->ret = ret; 724 ), 725 726 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 727 __entry->task_id, __entry->client_id, __entry->xid, 728 __entry->ret 729 ) 730 ); 731 732 TRACE_EVENT(xprtrdma_prepsend_failed, 733 TP_PROTO(const struct rpc_rqst *rqst, 734 int ret 735 ), 736 737 TP_ARGS(rqst, ret), 738 739 TP_STRUCT__entry( 740 __field(unsigned int, task_id) 741 __field(unsigned int, client_id) 742 __field(u32, xid) 743 __field(int, ret) 744 ), 745 746 TP_fast_assign( 747 __entry->task_id = rqst->rq_task->tk_pid; 748 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 749 __entry->xid = be32_to_cpu(rqst->rq_xid); 750 __entry->ret = ret; 751 ), 752 753 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 754 __entry->task_id, __entry->client_id, __entry->xid, 755 __entry->ret 756 ) 757 ); 758 759 TRACE_EVENT(xprtrdma_post_send, 760 TP_PROTO( 761 const struct rpcrdma_req *req 762 ), 763 764 TP_ARGS(req), 765 766 TP_STRUCT__entry( 767 __field(u32, cq_id) 768 __field(int, completion_id) 769 __field(unsigned int, task_id) 770 __field(unsigned int, client_id) 771 __field(int, num_sge) 772 __field(int, signaled) 773 ), 774 775 TP_fast_assign( 776 const struct rpc_rqst *rqst = &req->rl_slot; 777 const struct rpcrdma_sendctx *sc = req->rl_sendctx; 778 779 __entry->cq_id = sc->sc_cid.ci_queue_id; 780 __entry->completion_id = sc->sc_cid.ci_completion_id; 781 __entry->task_id = rqst->rq_task->tk_pid; 782 __entry->client_id = rqst->rq_task->tk_client ? 783 rqst->rq_task->tk_client->cl_clid : -1; 784 __entry->num_sge = req->rl_wr.num_sge; 785 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 786 ), 787 788 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", 789 __entry->task_id, __entry->client_id, 790 __entry->cq_id, __entry->completion_id, 791 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), 792 (__entry->signaled ? "signaled" : "") 793 ) 794 ); 795 796 TRACE_EVENT(xprtrdma_post_recv, 797 TP_PROTO( 798 const struct rpcrdma_rep *rep 799 ), 800 801 TP_ARGS(rep), 802 803 TP_STRUCT__entry( 804 __field(u32, cq_id) 805 __field(int, completion_id) 806 ), 807 808 TP_fast_assign( 809 __entry->cq_id = rep->rr_cid.ci_queue_id; 810 __entry->completion_id = rep->rr_cid.ci_completion_id; 811 ), 812 813 TP_printk("cq.id=%d cid=%d", 814 __entry->cq_id, __entry->completion_id 815 ) 816 ); 817 818 TRACE_EVENT(xprtrdma_post_recvs, 819 TP_PROTO( 820 const struct rpcrdma_xprt *r_xprt, 821 unsigned int count, 822 int status 823 ), 824 825 TP_ARGS(r_xprt, count, status), 826 827 TP_STRUCT__entry( 828 __field(u32, cq_id) 829 __field(unsigned int, count) 830 __field(int, status) 831 __field(int, posted) 832 __string(addr, rpcrdma_addrstr(r_xprt)) 833 __string(port, rpcrdma_portstr(r_xprt)) 834 ), 835 836 TP_fast_assign( 837 const struct rpcrdma_ep *ep = r_xprt->rx_ep; 838 839 __entry->cq_id = ep->re_attr.recv_cq->res.id; 840 __entry->count = count; 841 __entry->status = status; 842 __entry->posted = ep->re_receive_count; 843 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 844 __assign_str(port, rpcrdma_portstr(r_xprt)); 845 ), 846 847 TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)", 848 __get_str(addr), __get_str(port), __entry->cq_id, 849 __entry->count, __entry->posted, __entry->status 850 ) 851 ); 852 853 TRACE_EVENT(xprtrdma_post_linv_err, 854 TP_PROTO( 855 const struct rpcrdma_req *req, 856 int status 857 ), 858 859 TP_ARGS(req, status), 860 861 TP_STRUCT__entry( 862 __field(unsigned int, task_id) 863 __field(unsigned int, client_id) 864 __field(int, status) 865 ), 866 867 TP_fast_assign( 868 const struct rpc_task *task = req->rl_slot.rq_task; 869 870 __entry->task_id = task->tk_pid; 871 __entry->client_id = task->tk_client->cl_clid; 872 __entry->status = status; 873 ), 874 875 TP_printk("task:%u@%u status=%d", 876 __entry->task_id, __entry->client_id, __entry->status 877 ) 878 ); 879 880 /** 881 ** Completion events 882 **/ 883 884 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive); 885 886 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); 887 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg); 888 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li); 889 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake); 890 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done); 891 892 TRACE_EVENT(xprtrdma_frwr_alloc, 893 TP_PROTO( 894 const struct rpcrdma_mr *mr, 895 int rc 896 ), 897 898 TP_ARGS(mr, rc), 899 900 TP_STRUCT__entry( 901 __field(u32, mr_id) 902 __field(int, rc) 903 ), 904 905 TP_fast_assign( 906 __entry->mr_id = mr->mr_ibmr->res.id; 907 __entry->rc = rc; 908 ), 909 910 TP_printk("mr.id=%u: rc=%d", 911 __entry->mr_id, __entry->rc 912 ) 913 ); 914 915 TRACE_EVENT(xprtrdma_frwr_dereg, 916 TP_PROTO( 917 const struct rpcrdma_mr *mr, 918 int rc 919 ), 920 921 TP_ARGS(mr, rc), 922 923 TP_STRUCT__entry( 924 __field(u32, mr_id) 925 __field(int, nents) 926 __field(u32, handle) 927 __field(u32, length) 928 __field(u64, offset) 929 __field(u32, dir) 930 __field(int, rc) 931 ), 932 933 TP_fast_assign( 934 __entry->mr_id = mr->mr_ibmr->res.id; 935 __entry->nents = mr->mr_nents; 936 __entry->handle = mr->mr_handle; 937 __entry->length = mr->mr_length; 938 __entry->offset = mr->mr_offset; 939 __entry->dir = mr->mr_dir; 940 __entry->rc = rc; 941 ), 942 943 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 944 __entry->mr_id, __entry->nents, __entry->length, 945 (unsigned long long)__entry->offset, __entry->handle, 946 xprtrdma_show_direction(__entry->dir), 947 __entry->rc 948 ) 949 ); 950 951 TRACE_EVENT(xprtrdma_frwr_sgerr, 952 TP_PROTO( 953 const struct rpcrdma_mr *mr, 954 int sg_nents 955 ), 956 957 TP_ARGS(mr, sg_nents), 958 959 TP_STRUCT__entry( 960 __field(u32, mr_id) 961 __field(u64, addr) 962 __field(u32, dir) 963 __field(int, nents) 964 ), 965 966 TP_fast_assign( 967 __entry->mr_id = mr->mr_ibmr->res.id; 968 __entry->addr = mr->mr_sg->dma_address; 969 __entry->dir = mr->mr_dir; 970 __entry->nents = sg_nents; 971 ), 972 973 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 974 __entry->mr_id, __entry->addr, 975 xprtrdma_show_direction(__entry->dir), 976 __entry->nents 977 ) 978 ); 979 980 TRACE_EVENT(xprtrdma_frwr_maperr, 981 TP_PROTO( 982 const struct rpcrdma_mr *mr, 983 int num_mapped 984 ), 985 986 TP_ARGS(mr, num_mapped), 987 988 TP_STRUCT__entry( 989 __field(u32, mr_id) 990 __field(u64, addr) 991 __field(u32, dir) 992 __field(int, num_mapped) 993 __field(int, nents) 994 ), 995 996 TP_fast_assign( 997 __entry->mr_id = mr->mr_ibmr->res.id; 998 __entry->addr = mr->mr_sg->dma_address; 999 __entry->dir = mr->mr_dir; 1000 __entry->num_mapped = num_mapped; 1001 __entry->nents = mr->mr_nents; 1002 ), 1003 1004 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 1005 __entry->mr_id, __entry->addr, 1006 xprtrdma_show_direction(__entry->dir), 1007 __entry->num_mapped, __entry->nents 1008 ) 1009 ); 1010 1011 DEFINE_MR_EVENT(fastreg); 1012 DEFINE_MR_EVENT(localinv); 1013 DEFINE_MR_EVENT(reminv); 1014 DEFINE_MR_EVENT(map); 1015 1016 DEFINE_ANON_MR_EVENT(unmap); 1017 1018 TRACE_EVENT(xprtrdma_dma_maperr, 1019 TP_PROTO( 1020 u64 addr 1021 ), 1022 1023 TP_ARGS(addr), 1024 1025 TP_STRUCT__entry( 1026 __field(u64, addr) 1027 ), 1028 1029 TP_fast_assign( 1030 __entry->addr = addr; 1031 ), 1032 1033 TP_printk("dma addr=0x%llx\n", __entry->addr) 1034 ); 1035 1036 /** 1037 ** Reply events 1038 **/ 1039 1040 TRACE_EVENT(xprtrdma_reply, 1041 TP_PROTO( 1042 const struct rpc_task *task, 1043 const struct rpcrdma_rep *rep, 1044 unsigned int credits 1045 ), 1046 1047 TP_ARGS(task, rep, credits), 1048 1049 TP_STRUCT__entry( 1050 __field(unsigned int, task_id) 1051 __field(unsigned int, client_id) 1052 __field(u32, xid) 1053 __field(unsigned int, credits) 1054 ), 1055 1056 TP_fast_assign( 1057 __entry->task_id = task->tk_pid; 1058 __entry->client_id = task->tk_client->cl_clid; 1059 __entry->xid = be32_to_cpu(rep->rr_xid); 1060 __entry->credits = credits; 1061 ), 1062 1063 TP_printk("task:%u@%u xid=0x%08x credits=%u", 1064 __entry->task_id, __entry->client_id, __entry->xid, 1065 __entry->credits 1066 ) 1067 ); 1068 1069 DEFINE_REPLY_EVENT(vers); 1070 DEFINE_REPLY_EVENT(rqst); 1071 DEFINE_REPLY_EVENT(short); 1072 DEFINE_REPLY_EVENT(hdr); 1073 1074 TRACE_EVENT(xprtrdma_err_vers, 1075 TP_PROTO( 1076 const struct rpc_rqst *rqst, 1077 __be32 *min, 1078 __be32 *max 1079 ), 1080 1081 TP_ARGS(rqst, min, max), 1082 1083 TP_STRUCT__entry( 1084 __field(unsigned int, task_id) 1085 __field(unsigned int, client_id) 1086 __field(u32, xid) 1087 __field(u32, min) 1088 __field(u32, max) 1089 ), 1090 1091 TP_fast_assign( 1092 __entry->task_id = rqst->rq_task->tk_pid; 1093 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1094 __entry->xid = be32_to_cpu(rqst->rq_xid); 1095 __entry->min = be32_to_cpup(min); 1096 __entry->max = be32_to_cpup(max); 1097 ), 1098 1099 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", 1100 __entry->task_id, __entry->client_id, __entry->xid, 1101 __entry->min, __entry->max 1102 ) 1103 ); 1104 1105 TRACE_EVENT(xprtrdma_err_chunk, 1106 TP_PROTO( 1107 const struct rpc_rqst *rqst 1108 ), 1109 1110 TP_ARGS(rqst), 1111 1112 TP_STRUCT__entry( 1113 __field(unsigned int, task_id) 1114 __field(unsigned int, client_id) 1115 __field(u32, xid) 1116 ), 1117 1118 TP_fast_assign( 1119 __entry->task_id = rqst->rq_task->tk_pid; 1120 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1121 __entry->xid = be32_to_cpu(rqst->rq_xid); 1122 ), 1123 1124 TP_printk("task:%u@%u xid=0x%08x", 1125 __entry->task_id, __entry->client_id, __entry->xid 1126 ) 1127 ); 1128 1129 TRACE_EVENT(xprtrdma_err_unrecognized, 1130 TP_PROTO( 1131 const struct rpc_rqst *rqst, 1132 __be32 *procedure 1133 ), 1134 1135 TP_ARGS(rqst, procedure), 1136 1137 TP_STRUCT__entry( 1138 __field(unsigned int, task_id) 1139 __field(unsigned int, client_id) 1140 __field(u32, xid) 1141 __field(u32, procedure) 1142 ), 1143 1144 TP_fast_assign( 1145 __entry->task_id = rqst->rq_task->tk_pid; 1146 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1147 __entry->procedure = be32_to_cpup(procedure); 1148 ), 1149 1150 TP_printk("task:%u@%u xid=0x%08x procedure=%u", 1151 __entry->task_id, __entry->client_id, __entry->xid, 1152 __entry->procedure 1153 ) 1154 ); 1155 1156 TRACE_EVENT(xprtrdma_fixup, 1157 TP_PROTO( 1158 const struct rpc_rqst *rqst, 1159 unsigned long fixup 1160 ), 1161 1162 TP_ARGS(rqst, fixup), 1163 1164 TP_STRUCT__entry( 1165 __field(unsigned int, task_id) 1166 __field(unsigned int, client_id) 1167 __field(unsigned long, fixup) 1168 __field(size_t, headlen) 1169 __field(unsigned int, pagelen) 1170 __field(size_t, taillen) 1171 ), 1172 1173 TP_fast_assign( 1174 __entry->task_id = rqst->rq_task->tk_pid; 1175 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1176 __entry->fixup = fixup; 1177 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; 1178 __entry->pagelen = rqst->rq_rcv_buf.page_len; 1179 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; 1180 ), 1181 1182 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", 1183 __entry->task_id, __entry->client_id, __entry->fixup, 1184 __entry->headlen, __entry->pagelen, __entry->taillen 1185 ) 1186 ); 1187 1188 TRACE_EVENT(xprtrdma_decode_seg, 1189 TP_PROTO( 1190 u32 handle, 1191 u32 length, 1192 u64 offset 1193 ), 1194 1195 TP_ARGS(handle, length, offset), 1196 1197 TP_STRUCT__entry( 1198 __field(u32, handle) 1199 __field(u32, length) 1200 __field(u64, offset) 1201 ), 1202 1203 TP_fast_assign( 1204 __entry->handle = handle; 1205 __entry->length = length; 1206 __entry->offset = offset; 1207 ), 1208 1209 TP_printk("%u@0x%016llx:0x%08x", 1210 __entry->length, (unsigned long long)__entry->offset, 1211 __entry->handle 1212 ) 1213 ); 1214 1215 TRACE_EVENT(xprtrdma_mrs_zap, 1216 TP_PROTO( 1217 const struct rpc_task *task 1218 ), 1219 1220 TP_ARGS(task), 1221 1222 TP_STRUCT__entry( 1223 __field(unsigned int, task_id) 1224 __field(unsigned int, client_id) 1225 ), 1226 1227 TP_fast_assign( 1228 __entry->task_id = task->tk_pid; 1229 __entry->client_id = task->tk_client->cl_clid; 1230 ), 1231 1232 TP_printk("task:%u@%u", 1233 __entry->task_id, __entry->client_id 1234 ) 1235 ); 1236 1237 /** 1238 ** Callback events 1239 **/ 1240 1241 TRACE_EVENT(xprtrdma_cb_setup, 1242 TP_PROTO( 1243 const struct rpcrdma_xprt *r_xprt, 1244 unsigned int reqs 1245 ), 1246 1247 TP_ARGS(r_xprt, reqs), 1248 1249 TP_STRUCT__entry( 1250 __field(unsigned int, reqs) 1251 __string(addr, rpcrdma_addrstr(r_xprt)) 1252 __string(port, rpcrdma_portstr(r_xprt)) 1253 ), 1254 1255 TP_fast_assign( 1256 __entry->reqs = reqs; 1257 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 1258 __assign_str(port, rpcrdma_portstr(r_xprt)); 1259 ), 1260 1261 TP_printk("peer=[%s]:%s %u reqs", 1262 __get_str(addr), __get_str(port), __entry->reqs 1263 ) 1264 ); 1265 1266 DEFINE_CALLBACK_EVENT(call); 1267 DEFINE_CALLBACK_EVENT(reply); 1268 1269 /** 1270 ** Server-side RPC/RDMA events 1271 **/ 1272 1273 DECLARE_EVENT_CLASS(svcrdma_accept_class, 1274 TP_PROTO( 1275 const struct svcxprt_rdma *rdma, 1276 long status 1277 ), 1278 1279 TP_ARGS(rdma, status), 1280 1281 TP_STRUCT__entry( 1282 __field(long, status) 1283 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1284 ), 1285 1286 TP_fast_assign( 1287 __entry->status = status; 1288 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1289 ), 1290 1291 TP_printk("addr=%s status=%ld", 1292 __get_str(addr), __entry->status 1293 ) 1294 ); 1295 1296 #define DEFINE_ACCEPT_EVENT(name) \ 1297 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ 1298 TP_PROTO( \ 1299 const struct svcxprt_rdma *rdma, \ 1300 long status \ 1301 ), \ 1302 TP_ARGS(rdma, status)) 1303 1304 DEFINE_ACCEPT_EVENT(pd); 1305 DEFINE_ACCEPT_EVENT(qp); 1306 DEFINE_ACCEPT_EVENT(fabric); 1307 DEFINE_ACCEPT_EVENT(initdepth); 1308 DEFINE_ACCEPT_EVENT(accept); 1309 1310 TRACE_DEFINE_ENUM(RDMA_MSG); 1311 TRACE_DEFINE_ENUM(RDMA_NOMSG); 1312 TRACE_DEFINE_ENUM(RDMA_MSGP); 1313 TRACE_DEFINE_ENUM(RDMA_DONE); 1314 TRACE_DEFINE_ENUM(RDMA_ERROR); 1315 1316 #define show_rpcrdma_proc(x) \ 1317 __print_symbolic(x, \ 1318 { RDMA_MSG, "RDMA_MSG" }, \ 1319 { RDMA_NOMSG, "RDMA_NOMSG" }, \ 1320 { RDMA_MSGP, "RDMA_MSGP" }, \ 1321 { RDMA_DONE, "RDMA_DONE" }, \ 1322 { RDMA_ERROR, "RDMA_ERROR" }) 1323 1324 TRACE_EVENT(svcrdma_decode_rqst, 1325 TP_PROTO( 1326 const struct svc_rdma_recv_ctxt *ctxt, 1327 __be32 *p, 1328 unsigned int hdrlen 1329 ), 1330 1331 TP_ARGS(ctxt, p, hdrlen), 1332 1333 TP_STRUCT__entry( 1334 __field(u32, cq_id) 1335 __field(int, completion_id) 1336 __field(u32, xid) 1337 __field(u32, vers) 1338 __field(u32, proc) 1339 __field(u32, credits) 1340 __field(unsigned int, hdrlen) 1341 ), 1342 1343 TP_fast_assign( 1344 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1345 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1346 __entry->xid = be32_to_cpup(p++); 1347 __entry->vers = be32_to_cpup(p++); 1348 __entry->credits = be32_to_cpup(p++); 1349 __entry->proc = be32_to_cpup(p); 1350 __entry->hdrlen = hdrlen; 1351 ), 1352 1353 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", 1354 __entry->cq_id, __entry->completion_id, 1355 __entry->xid, __entry->vers, __entry->credits, 1356 show_rpcrdma_proc(__entry->proc), __entry->hdrlen) 1357 ); 1358 1359 TRACE_EVENT(svcrdma_decode_short_err, 1360 TP_PROTO( 1361 const struct svc_rdma_recv_ctxt *ctxt, 1362 unsigned int hdrlen 1363 ), 1364 1365 TP_ARGS(ctxt, hdrlen), 1366 1367 TP_STRUCT__entry( 1368 __field(u32, cq_id) 1369 __field(int, completion_id) 1370 __field(unsigned int, hdrlen) 1371 ), 1372 1373 TP_fast_assign( 1374 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1375 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1376 __entry->hdrlen = hdrlen; 1377 ), 1378 1379 TP_printk("cq.id=%u cid=%d hdrlen=%u", 1380 __entry->cq_id, __entry->completion_id, 1381 __entry->hdrlen) 1382 ); 1383 1384 DECLARE_EVENT_CLASS(svcrdma_badreq_event, 1385 TP_PROTO( 1386 const struct svc_rdma_recv_ctxt *ctxt, 1387 __be32 *p 1388 ), 1389 1390 TP_ARGS(ctxt, p), 1391 1392 TP_STRUCT__entry( 1393 __field(u32, cq_id) 1394 __field(int, completion_id) 1395 __field(u32, xid) 1396 __field(u32, vers) 1397 __field(u32, proc) 1398 __field(u32, credits) 1399 ), 1400 1401 TP_fast_assign( 1402 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1403 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1404 __entry->xid = be32_to_cpup(p++); 1405 __entry->vers = be32_to_cpup(p++); 1406 __entry->credits = be32_to_cpup(p++); 1407 __entry->proc = be32_to_cpup(p); 1408 ), 1409 1410 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", 1411 __entry->cq_id, __entry->completion_id, 1412 __entry->xid, __entry->vers, __entry->credits, __entry->proc) 1413 ); 1414 1415 #define DEFINE_BADREQ_EVENT(name) \ 1416 DEFINE_EVENT(svcrdma_badreq_event, \ 1417 svcrdma_decode_##name##_err, \ 1418 TP_PROTO( \ 1419 const struct svc_rdma_recv_ctxt *ctxt, \ 1420 __be32 *p \ 1421 ), \ 1422 TP_ARGS(ctxt, p)) 1423 1424 DEFINE_BADREQ_EVENT(badvers); 1425 DEFINE_BADREQ_EVENT(drop); 1426 DEFINE_BADREQ_EVENT(badproc); 1427 DEFINE_BADREQ_EVENT(parse); 1428 1429 TRACE_EVENT(svcrdma_encode_wseg, 1430 TP_PROTO( 1431 const struct svc_rdma_send_ctxt *ctxt, 1432 u32 segno, 1433 u32 handle, 1434 u32 length, 1435 u64 offset 1436 ), 1437 1438 TP_ARGS(ctxt, segno, handle, length, offset), 1439 1440 TP_STRUCT__entry( 1441 __field(u32, cq_id) 1442 __field(int, completion_id) 1443 __field(u32, segno) 1444 __field(u32, handle) 1445 __field(u32, length) 1446 __field(u64, offset) 1447 ), 1448 1449 TP_fast_assign( 1450 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1451 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1452 __entry->segno = segno; 1453 __entry->handle = handle; 1454 __entry->length = length; 1455 __entry->offset = offset; 1456 ), 1457 1458 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", 1459 __entry->cq_id, __entry->completion_id, 1460 __entry->segno, __entry->length, 1461 (unsigned long long)__entry->offset, __entry->handle 1462 ) 1463 ); 1464 1465 TRACE_EVENT(svcrdma_decode_rseg, 1466 TP_PROTO( 1467 const struct rpc_rdma_cid *cid, 1468 const struct svc_rdma_chunk *chunk, 1469 const struct svc_rdma_segment *segment 1470 ), 1471 1472 TP_ARGS(cid, chunk, segment), 1473 1474 TP_STRUCT__entry( 1475 __field(u32, cq_id) 1476 __field(int, completion_id) 1477 __field(u32, segno) 1478 __field(u32, position) 1479 __field(u32, handle) 1480 __field(u32, length) 1481 __field(u64, offset) 1482 ), 1483 1484 TP_fast_assign( 1485 __entry->cq_id = cid->ci_queue_id; 1486 __entry->completion_id = cid->ci_completion_id; 1487 __entry->segno = chunk->ch_segcount; 1488 __entry->position = chunk->ch_position; 1489 __entry->handle = segment->rs_handle; 1490 __entry->length = segment->rs_length; 1491 __entry->offset = segment->rs_offset; 1492 ), 1493 1494 TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", 1495 __entry->cq_id, __entry->completion_id, 1496 __entry->segno, __entry->position, __entry->length, 1497 (unsigned long long)__entry->offset, __entry->handle 1498 ) 1499 ); 1500 1501 TRACE_EVENT(svcrdma_decode_wseg, 1502 TP_PROTO( 1503 const struct rpc_rdma_cid *cid, 1504 const struct svc_rdma_chunk *chunk, 1505 u32 segno 1506 ), 1507 1508 TP_ARGS(cid, chunk, segno), 1509 1510 TP_STRUCT__entry( 1511 __field(u32, cq_id) 1512 __field(int, completion_id) 1513 __field(u32, segno) 1514 __field(u32, handle) 1515 __field(u32, length) 1516 __field(u64, offset) 1517 ), 1518 1519 TP_fast_assign( 1520 const struct svc_rdma_segment *segment = 1521 &chunk->ch_segments[segno]; 1522 1523 __entry->cq_id = cid->ci_queue_id; 1524 __entry->completion_id = cid->ci_completion_id; 1525 __entry->segno = segno; 1526 __entry->handle = segment->rs_handle; 1527 __entry->length = segment->rs_length; 1528 __entry->offset = segment->rs_offset; 1529 ), 1530 1531 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", 1532 __entry->cq_id, __entry->completion_id, 1533 __entry->segno, __entry->length, 1534 (unsigned long long)__entry->offset, __entry->handle 1535 ) 1536 ); 1537 1538 DECLARE_EVENT_CLASS(svcrdma_error_event, 1539 TP_PROTO( 1540 __be32 xid 1541 ), 1542 1543 TP_ARGS(xid), 1544 1545 TP_STRUCT__entry( 1546 __field(u32, xid) 1547 ), 1548 1549 TP_fast_assign( 1550 __entry->xid = be32_to_cpu(xid); 1551 ), 1552 1553 TP_printk("xid=0x%08x", 1554 __entry->xid 1555 ) 1556 ); 1557 1558 #define DEFINE_ERROR_EVENT(name) \ 1559 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \ 1560 TP_PROTO( \ 1561 __be32 xid \ 1562 ), \ 1563 TP_ARGS(xid)) 1564 1565 DEFINE_ERROR_EVENT(vers); 1566 DEFINE_ERROR_EVENT(chunk); 1567 1568 /** 1569 ** Server-side RDMA API events 1570 **/ 1571 1572 DECLARE_EVENT_CLASS(svcrdma_dma_map_class, 1573 TP_PROTO( 1574 const struct svcxprt_rdma *rdma, 1575 u64 dma_addr, 1576 u32 length 1577 ), 1578 1579 TP_ARGS(rdma, dma_addr, length), 1580 1581 TP_STRUCT__entry( 1582 __field(u64, dma_addr) 1583 __field(u32, length) 1584 __string(device, rdma->sc_cm_id->device->name) 1585 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1586 ), 1587 1588 TP_fast_assign( 1589 __entry->dma_addr = dma_addr; 1590 __entry->length = length; 1591 __assign_str(device, rdma->sc_cm_id->device->name); 1592 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1593 ), 1594 1595 TP_printk("addr=%s device=%s dma_addr=%llu length=%u", 1596 __get_str(addr), __get_str(device), 1597 __entry->dma_addr, __entry->length 1598 ) 1599 ); 1600 1601 #define DEFINE_SVC_DMA_EVENT(name) \ 1602 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ 1603 TP_PROTO( \ 1604 const struct svcxprt_rdma *rdma,\ 1605 u64 dma_addr, \ 1606 u32 length \ 1607 ), \ 1608 TP_ARGS(rdma, dma_addr, length)) 1609 1610 DEFINE_SVC_DMA_EVENT(dma_map_page); 1611 DEFINE_SVC_DMA_EVENT(dma_map_err); 1612 DEFINE_SVC_DMA_EVENT(dma_unmap_page); 1613 1614 TRACE_EVENT(svcrdma_dma_map_rw_err, 1615 TP_PROTO( 1616 const struct svcxprt_rdma *rdma, 1617 unsigned int nents, 1618 int status 1619 ), 1620 1621 TP_ARGS(rdma, nents, status), 1622 1623 TP_STRUCT__entry( 1624 __field(int, status) 1625 __field(unsigned int, nents) 1626 __string(device, rdma->sc_cm_id->device->name) 1627 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1628 ), 1629 1630 TP_fast_assign( 1631 __entry->status = status; 1632 __entry->nents = nents; 1633 __assign_str(device, rdma->sc_cm_id->device->name); 1634 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1635 ), 1636 1637 TP_printk("addr=%s device=%s nents=%u status=%d", 1638 __get_str(addr), __get_str(device), __entry->nents, 1639 __entry->status 1640 ) 1641 ); 1642 1643 TRACE_EVENT(svcrdma_no_rwctx_err, 1644 TP_PROTO( 1645 const struct svcxprt_rdma *rdma, 1646 unsigned int num_sges 1647 ), 1648 1649 TP_ARGS(rdma, num_sges), 1650 1651 TP_STRUCT__entry( 1652 __field(unsigned int, num_sges) 1653 __string(device, rdma->sc_cm_id->device->name) 1654 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1655 ), 1656 1657 TP_fast_assign( 1658 __entry->num_sges = num_sges; 1659 __assign_str(device, rdma->sc_cm_id->device->name); 1660 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1661 ), 1662 1663 TP_printk("addr=%s device=%s num_sges=%d", 1664 __get_str(addr), __get_str(device), __entry->num_sges 1665 ) 1666 ); 1667 1668 TRACE_EVENT(svcrdma_page_overrun_err, 1669 TP_PROTO( 1670 const struct svcxprt_rdma *rdma, 1671 const struct svc_rqst *rqst, 1672 unsigned int pageno 1673 ), 1674 1675 TP_ARGS(rdma, rqst, pageno), 1676 1677 TP_STRUCT__entry( 1678 __field(unsigned int, pageno) 1679 __field(u32, xid) 1680 __string(device, rdma->sc_cm_id->device->name) 1681 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1682 ), 1683 1684 TP_fast_assign( 1685 __entry->pageno = pageno; 1686 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1687 __assign_str(device, rdma->sc_cm_id->device->name); 1688 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1689 ), 1690 1691 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), 1692 __get_str(device), __entry->xid, __entry->pageno 1693 ) 1694 ); 1695 1696 TRACE_EVENT(svcrdma_small_wrch_err, 1697 TP_PROTO( 1698 const struct svcxprt_rdma *rdma, 1699 unsigned int remaining, 1700 unsigned int seg_no, 1701 unsigned int num_segs 1702 ), 1703 1704 TP_ARGS(rdma, remaining, seg_no, num_segs), 1705 1706 TP_STRUCT__entry( 1707 __field(unsigned int, remaining) 1708 __field(unsigned int, seg_no) 1709 __field(unsigned int, num_segs) 1710 __string(device, rdma->sc_cm_id->device->name) 1711 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1712 ), 1713 1714 TP_fast_assign( 1715 __entry->remaining = remaining; 1716 __entry->seg_no = seg_no; 1717 __entry->num_segs = num_segs; 1718 __assign_str(device, rdma->sc_cm_id->device->name); 1719 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1720 ), 1721 1722 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", 1723 __get_str(addr), __get_str(device), __entry->remaining, 1724 __entry->seg_no, __entry->num_segs 1725 ) 1726 ); 1727 1728 TRACE_EVENT(svcrdma_send_pullup, 1729 TP_PROTO( 1730 const struct svc_rdma_send_ctxt *ctxt, 1731 unsigned int msglen 1732 ), 1733 1734 TP_ARGS(ctxt, msglen), 1735 1736 TP_STRUCT__entry( 1737 __field(u32, cq_id) 1738 __field(int, completion_id) 1739 __field(unsigned int, hdrlen) 1740 __field(unsigned int, msglen) 1741 ), 1742 1743 TP_fast_assign( 1744 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1745 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1746 __entry->hdrlen = ctxt->sc_hdrbuf.len, 1747 __entry->msglen = msglen; 1748 ), 1749 1750 TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)", 1751 __entry->cq_id, __entry->completion_id, 1752 __entry->hdrlen, __entry->msglen, 1753 __entry->hdrlen + __entry->msglen) 1754 ); 1755 1756 TRACE_EVENT(svcrdma_send_err, 1757 TP_PROTO( 1758 const struct svc_rqst *rqst, 1759 int status 1760 ), 1761 1762 TP_ARGS(rqst, status), 1763 1764 TP_STRUCT__entry( 1765 __field(int, status) 1766 __field(u32, xid) 1767 __string(addr, rqst->rq_xprt->xpt_remotebuf) 1768 ), 1769 1770 TP_fast_assign( 1771 __entry->status = status; 1772 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1773 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); 1774 ), 1775 1776 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), 1777 __entry->xid, __entry->status 1778 ) 1779 ); 1780 1781 TRACE_EVENT(svcrdma_post_send, 1782 TP_PROTO( 1783 const struct svc_rdma_send_ctxt *ctxt 1784 ), 1785 1786 TP_ARGS(ctxt), 1787 1788 TP_STRUCT__entry( 1789 __field(u32, cq_id) 1790 __field(int, completion_id) 1791 __field(unsigned int, num_sge) 1792 __field(u32, inv_rkey) 1793 ), 1794 1795 TP_fast_assign( 1796 const struct ib_send_wr *wr = &ctxt->sc_send_wr; 1797 1798 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1799 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1800 __entry->num_sge = wr->num_sge; 1801 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? 1802 wr->ex.invalidate_rkey : 0; 1803 ), 1804 1805 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", 1806 __entry->cq_id, __entry->completion_id, 1807 __entry->num_sge, __entry->inv_rkey 1808 ) 1809 ); 1810 1811 DEFINE_COMPLETION_EVENT(svcrdma_wc_send); 1812 1813 TRACE_EVENT(svcrdma_post_recv, 1814 TP_PROTO( 1815 const struct svc_rdma_recv_ctxt *ctxt 1816 ), 1817 1818 TP_ARGS(ctxt), 1819 1820 TP_STRUCT__entry( 1821 __field(u32, cq_id) 1822 __field(int, completion_id) 1823 ), 1824 1825 TP_fast_assign( 1826 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1827 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1828 ), 1829 1830 TP_printk("cq.id=%d cid=%d", 1831 __entry->cq_id, __entry->completion_id 1832 ) 1833 ); 1834 1835 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive); 1836 1837 TRACE_EVENT(svcrdma_rq_post_err, 1838 TP_PROTO( 1839 const struct svcxprt_rdma *rdma, 1840 int status 1841 ), 1842 1843 TP_ARGS(rdma, status), 1844 1845 TP_STRUCT__entry( 1846 __field(int, status) 1847 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1848 ), 1849 1850 TP_fast_assign( 1851 __entry->status = status; 1852 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1853 ), 1854 1855 TP_printk("addr=%s status=%d", 1856 __get_str(addr), __entry->status 1857 ) 1858 ); 1859 1860 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class, 1861 TP_PROTO( 1862 const struct rpc_rdma_cid *cid, 1863 int sqecount 1864 ), 1865 1866 TP_ARGS(cid, sqecount), 1867 1868 TP_STRUCT__entry( 1869 __field(u32, cq_id) 1870 __field(int, completion_id) 1871 __field(int, sqecount) 1872 ), 1873 1874 TP_fast_assign( 1875 __entry->cq_id = cid->ci_queue_id; 1876 __entry->completion_id = cid->ci_completion_id; 1877 __entry->sqecount = sqecount; 1878 ), 1879 1880 TP_printk("cq.id=%u cid=%d sqecount=%d", 1881 __entry->cq_id, __entry->completion_id, 1882 __entry->sqecount 1883 ) 1884 ); 1885 1886 #define DEFINE_POST_CHUNK_EVENT(name) \ 1887 DEFINE_EVENT(svcrdma_post_chunk_class, \ 1888 svcrdma_post_##name##_chunk, \ 1889 TP_PROTO( \ 1890 const struct rpc_rdma_cid *cid, \ 1891 int sqecount \ 1892 ), \ 1893 TP_ARGS(cid, sqecount)) 1894 1895 DEFINE_POST_CHUNK_EVENT(read); 1896 DEFINE_POST_CHUNK_EVENT(write); 1897 DEFINE_POST_CHUNK_EVENT(reply); 1898 1899 DEFINE_COMPLETION_EVENT(svcrdma_wc_read); 1900 DEFINE_COMPLETION_EVENT(svcrdma_wc_write); 1901 1902 TRACE_EVENT(svcrdma_qp_error, 1903 TP_PROTO( 1904 const struct ib_event *event, 1905 const struct sockaddr *sap 1906 ), 1907 1908 TP_ARGS(event, sap), 1909 1910 TP_STRUCT__entry( 1911 __field(unsigned int, event) 1912 __string(device, event->device->name) 1913 __array(__u8, addr, INET6_ADDRSTRLEN + 10) 1914 ), 1915 1916 TP_fast_assign( 1917 __entry->event = event->event; 1918 __assign_str(device, event->device->name); 1919 snprintf(__entry->addr, sizeof(__entry->addr) - 1, 1920 "%pISpc", sap); 1921 ), 1922 1923 TP_printk("addr=%s dev=%s event=%s (%u)", 1924 __entry->addr, __get_str(device), 1925 rdma_show_ib_event(__entry->event), __entry->event 1926 ) 1927 ); 1928 1929 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, 1930 TP_PROTO( 1931 const struct svcxprt_rdma *rdma 1932 ), 1933 1934 TP_ARGS(rdma), 1935 1936 TP_STRUCT__entry( 1937 __field(int, avail) 1938 __field(int, depth) 1939 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1940 ), 1941 1942 TP_fast_assign( 1943 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1944 __entry->depth = rdma->sc_sq_depth; 1945 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1946 ), 1947 1948 TP_printk("addr=%s sc_sq_avail=%d/%d", 1949 __get_str(addr), __entry->avail, __entry->depth 1950 ) 1951 ); 1952 1953 #define DEFINE_SQ_EVENT(name) \ 1954 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ 1955 TP_PROTO( \ 1956 const struct svcxprt_rdma *rdma \ 1957 ), \ 1958 TP_ARGS(rdma)) 1959 1960 DEFINE_SQ_EVENT(full); 1961 DEFINE_SQ_EVENT(retry); 1962 1963 TRACE_EVENT(svcrdma_sq_post_err, 1964 TP_PROTO( 1965 const struct svcxprt_rdma *rdma, 1966 int status 1967 ), 1968 1969 TP_ARGS(rdma, status), 1970 1971 TP_STRUCT__entry( 1972 __field(int, avail) 1973 __field(int, depth) 1974 __field(int, status) 1975 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1976 ), 1977 1978 TP_fast_assign( 1979 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1980 __entry->depth = rdma->sc_sq_depth; 1981 __entry->status = status; 1982 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1983 ), 1984 1985 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", 1986 __get_str(addr), __entry->avail, __entry->depth, 1987 __entry->status 1988 ) 1989 ); 1990 1991 #endif /* _TRACE_RPCRDMA_H */ 1992 1993 #include <trace/define_trace.h> 1994