1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017 Oracle. All rights reserved. 4 */ 5 #undef TRACE_SYSTEM 6 #define TRACE_SYSTEM rpcrdma 7 8 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 9 #define _TRACE_RPCRDMA_H 10 11 #include <linux/tracepoint.h> 12 #include <trace/events/rdma.h> 13 14 /** 15 ** Event classes 16 **/ 17 18 DECLARE_EVENT_CLASS(xprtrdma_reply_event, 19 TP_PROTO( 20 const struct rpcrdma_rep *rep 21 ), 22 23 TP_ARGS(rep), 24 25 TP_STRUCT__entry( 26 __field(const void *, rep) 27 __field(const void *, r_xprt) 28 __field(u32, xid) 29 __field(u32, version) 30 __field(u32, proc) 31 ), 32 33 TP_fast_assign( 34 __entry->rep = rep; 35 __entry->r_xprt = rep->rr_rxprt; 36 __entry->xid = be32_to_cpu(rep->rr_xid); 37 __entry->version = be32_to_cpu(rep->rr_vers); 38 __entry->proc = be32_to_cpu(rep->rr_proc); 39 ), 40 41 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", 42 __entry->r_xprt, __entry->xid, __entry->rep, 43 __entry->version, __entry->proc 44 ) 45 ); 46 47 #define DEFINE_REPLY_EVENT(name) \ 48 DEFINE_EVENT(xprtrdma_reply_event, name, \ 49 TP_PROTO( \ 50 const struct rpcrdma_rep *rep \ 51 ), \ 52 TP_ARGS(rep)) 53 54 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 55 TP_PROTO( 56 const struct rpcrdma_xprt *r_xprt 57 ), 58 59 TP_ARGS(r_xprt), 60 61 TP_STRUCT__entry( 62 __field(const void *, r_xprt) 63 __string(addr, rpcrdma_addrstr(r_xprt)) 64 __string(port, rpcrdma_portstr(r_xprt)) 65 ), 66 67 TP_fast_assign( 68 __entry->r_xprt = r_xprt; 69 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 70 __assign_str(port, rpcrdma_portstr(r_xprt)); 71 ), 72 73 TP_printk("peer=[%s]:%s r_xprt=%p", 74 __get_str(addr), __get_str(port), __entry->r_xprt 75 ) 76 ); 77 78 #define DEFINE_RXPRT_EVENT(name) \ 79 DEFINE_EVENT(xprtrdma_rxprt, name, \ 80 TP_PROTO( \ 81 const struct rpcrdma_xprt *r_xprt \ 82 ), \ 83 TP_ARGS(r_xprt)) 84 85 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 86 TP_PROTO( 87 const struct rpc_task *task, 88 unsigned int pos, 89 struct rpcrdma_mr *mr, 90 int nsegs 91 ), 92 93 TP_ARGS(task, pos, mr, nsegs), 94 95 TP_STRUCT__entry( 96 __field(unsigned int, task_id) 97 __field(unsigned int, client_id) 98 __field(const void *, mr) 99 __field(unsigned int, pos) 100 __field(int, nents) 101 __field(u32, handle) 102 __field(u32, length) 103 __field(u64, offset) 104 __field(int, nsegs) 105 ), 106 107 TP_fast_assign( 108 __entry->task_id = task->tk_pid; 109 __entry->client_id = task->tk_client->cl_clid; 110 __entry->mr = mr; 111 __entry->pos = pos; 112 __entry->nents = mr->mr_nents; 113 __entry->handle = mr->mr_handle; 114 __entry->length = mr->mr_length; 115 __entry->offset = mr->mr_offset; 116 __entry->nsegs = nsegs; 117 ), 118 119 TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)", 120 __entry->task_id, __entry->client_id, __entry->mr, 121 __entry->pos, __entry->length, 122 (unsigned long long)__entry->offset, __entry->handle, 123 __entry->nents < __entry->nsegs ? "more" : "last" 124 ) 125 ); 126 127 #define DEFINE_RDCH_EVENT(name) \ 128 DEFINE_EVENT(xprtrdma_rdch_event, name, \ 129 TP_PROTO( \ 130 const struct rpc_task *task, \ 131 unsigned int pos, \ 132 struct rpcrdma_mr *mr, \ 133 int nsegs \ 134 ), \ 135 TP_ARGS(task, pos, mr, nsegs)) 136 137 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 138 TP_PROTO( 139 const struct rpc_task *task, 140 struct rpcrdma_mr *mr, 141 int nsegs 142 ), 143 144 TP_ARGS(task, mr, nsegs), 145 146 TP_STRUCT__entry( 147 __field(unsigned int, task_id) 148 __field(unsigned int, client_id) 149 __field(const void *, mr) 150 __field(int, nents) 151 __field(u32, handle) 152 __field(u32, length) 153 __field(u64, offset) 154 __field(int, nsegs) 155 ), 156 157 TP_fast_assign( 158 __entry->task_id = task->tk_pid; 159 __entry->client_id = task->tk_client->cl_clid; 160 __entry->mr = mr; 161 __entry->nents = mr->mr_nents; 162 __entry->handle = mr->mr_handle; 163 __entry->length = mr->mr_length; 164 __entry->offset = mr->mr_offset; 165 __entry->nsegs = nsegs; 166 ), 167 168 TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)", 169 __entry->task_id, __entry->client_id, __entry->mr, 170 __entry->length, (unsigned long long)__entry->offset, 171 __entry->handle, 172 __entry->nents < __entry->nsegs ? "more" : "last" 173 ) 174 ); 175 176 #define DEFINE_WRCH_EVENT(name) \ 177 DEFINE_EVENT(xprtrdma_wrch_event, name, \ 178 TP_PROTO( \ 179 const struct rpc_task *task, \ 180 struct rpcrdma_mr *mr, \ 181 int nsegs \ 182 ), \ 183 TP_ARGS(task, mr, nsegs)) 184 185 TRACE_DEFINE_ENUM(FRWR_IS_INVALID); 186 TRACE_DEFINE_ENUM(FRWR_IS_VALID); 187 TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR); 188 TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI); 189 190 #define xprtrdma_show_frwr_state(x) \ 191 __print_symbolic(x, \ 192 { FRWR_IS_INVALID, "INVALID" }, \ 193 { FRWR_IS_VALID, "VALID" }, \ 194 { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \ 195 { FRWR_FLUSHED_LI, "FLUSHED_LI" }) 196 197 DECLARE_EVENT_CLASS(xprtrdma_frwr_done, 198 TP_PROTO( 199 const struct ib_wc *wc, 200 const struct rpcrdma_frwr *frwr 201 ), 202 203 TP_ARGS(wc, frwr), 204 205 TP_STRUCT__entry( 206 __field(const void *, mr) 207 __field(unsigned int, state) 208 __field(unsigned int, status) 209 __field(unsigned int, vendor_err) 210 ), 211 212 TP_fast_assign( 213 __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); 214 __entry->state = frwr->fr_state; 215 __entry->status = wc->status; 216 __entry->vendor_err = __entry->status ? wc->vendor_err : 0; 217 ), 218 219 TP_printk( 220 "mr=%p state=%s: %s (%u/0x%x)", 221 __entry->mr, xprtrdma_show_frwr_state(__entry->state), 222 rdma_show_wc_status(__entry->status), 223 __entry->status, __entry->vendor_err 224 ) 225 ); 226 227 #define DEFINE_FRWR_DONE_EVENT(name) \ 228 DEFINE_EVENT(xprtrdma_frwr_done, name, \ 229 TP_PROTO( \ 230 const struct ib_wc *wc, \ 231 const struct rpcrdma_frwr *frwr \ 232 ), \ 233 TP_ARGS(wc, frwr)) 234 235 DECLARE_EVENT_CLASS(xprtrdma_mr, 236 TP_PROTO( 237 const struct rpcrdma_mr *mr 238 ), 239 240 TP_ARGS(mr), 241 242 TP_STRUCT__entry( 243 __field(const void *, mr) 244 __field(u32, handle) 245 __field(u32, length) 246 __field(u64, offset) 247 ), 248 249 TP_fast_assign( 250 __entry->mr = mr; 251 __entry->handle = mr->mr_handle; 252 __entry->length = mr->mr_length; 253 __entry->offset = mr->mr_offset; 254 ), 255 256 TP_printk("mr=%p %u@0x%016llx:0x%08x", 257 __entry->mr, __entry->length, 258 (unsigned long long)__entry->offset, 259 __entry->handle 260 ) 261 ); 262 263 #define DEFINE_MR_EVENT(name) \ 264 DEFINE_EVENT(xprtrdma_mr, name, \ 265 TP_PROTO( \ 266 const struct rpcrdma_mr *mr \ 267 ), \ 268 TP_ARGS(mr)) 269 270 DECLARE_EVENT_CLASS(xprtrdma_cb_event, 271 TP_PROTO( 272 const struct rpc_rqst *rqst 273 ), 274 275 TP_ARGS(rqst), 276 277 TP_STRUCT__entry( 278 __field(const void *, rqst) 279 __field(const void *, rep) 280 __field(const void *, req) 281 __field(u32, xid) 282 ), 283 284 TP_fast_assign( 285 __entry->rqst = rqst; 286 __entry->req = rpcr_to_rdmar(rqst); 287 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; 288 __entry->xid = be32_to_cpu(rqst->rq_xid); 289 ), 290 291 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", 292 __entry->xid, __entry->rqst, __entry->req, __entry->rep 293 ) 294 ); 295 296 #define DEFINE_CB_EVENT(name) \ 297 DEFINE_EVENT(xprtrdma_cb_event, name, \ 298 TP_PROTO( \ 299 const struct rpc_rqst *rqst \ 300 ), \ 301 TP_ARGS(rqst)) 302 303 /** 304 ** Connection events 305 **/ 306 307 TRACE_EVENT(xprtrdma_conn_upcall, 308 TP_PROTO( 309 const struct rpcrdma_xprt *r_xprt, 310 struct rdma_cm_event *event 311 ), 312 313 TP_ARGS(r_xprt, event), 314 315 TP_STRUCT__entry( 316 __field(const void *, r_xprt) 317 __field(unsigned int, event) 318 __field(int, status) 319 __string(addr, rpcrdma_addrstr(r_xprt)) 320 __string(port, rpcrdma_portstr(r_xprt)) 321 ), 322 323 TP_fast_assign( 324 __entry->r_xprt = r_xprt; 325 __entry->event = event->event; 326 __entry->status = event->status; 327 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 328 __assign_str(port, rpcrdma_portstr(r_xprt)); 329 ), 330 331 TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)", 332 __get_str(addr), __get_str(port), 333 __entry->r_xprt, rdma_show_cm_event(__entry->event), 334 __entry->event, __entry->status 335 ) 336 ); 337 338 TRACE_EVENT(xprtrdma_disconnect, 339 TP_PROTO( 340 const struct rpcrdma_xprt *r_xprt, 341 int status 342 ), 343 344 TP_ARGS(r_xprt, status), 345 346 TP_STRUCT__entry( 347 __field(const void *, r_xprt) 348 __field(int, status) 349 __field(int, connected) 350 __string(addr, rpcrdma_addrstr(r_xprt)) 351 __string(port, rpcrdma_portstr(r_xprt)) 352 ), 353 354 TP_fast_assign( 355 __entry->r_xprt = r_xprt; 356 __entry->status = status; 357 __entry->connected = r_xprt->rx_ep.rep_connected; 358 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 359 __assign_str(port, rpcrdma_portstr(r_xprt)); 360 ), 361 362 TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected", 363 __get_str(addr), __get_str(port), 364 __entry->r_xprt, __entry->status, 365 __entry->connected == 1 ? "still " : "dis" 366 ) 367 ); 368 369 DEFINE_RXPRT_EVENT(xprtrdma_conn_start); 370 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout); 371 DEFINE_RXPRT_EVENT(xprtrdma_create); 372 DEFINE_RXPRT_EVENT(xprtrdma_destroy); 373 DEFINE_RXPRT_EVENT(xprtrdma_remove); 374 DEFINE_RXPRT_EVENT(xprtrdma_reinsert); 375 DEFINE_RXPRT_EVENT(xprtrdma_reconnect); 376 DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc); 377 378 TRACE_EVENT(xprtrdma_qp_error, 379 TP_PROTO( 380 const struct rpcrdma_xprt *r_xprt, 381 const struct ib_event *event 382 ), 383 384 TP_ARGS(r_xprt, event), 385 386 TP_STRUCT__entry( 387 __field(const void *, r_xprt) 388 __field(unsigned int, event) 389 __string(name, event->device->name) 390 __string(addr, rpcrdma_addrstr(r_xprt)) 391 __string(port, rpcrdma_portstr(r_xprt)) 392 ), 393 394 TP_fast_assign( 395 __entry->r_xprt = r_xprt; 396 __entry->event = event->event; 397 __assign_str(name, event->device->name); 398 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 399 __assign_str(port, rpcrdma_portstr(r_xprt)); 400 ), 401 402 TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)", 403 __get_str(addr), __get_str(port), __entry->r_xprt, 404 __get_str(name), rdma_show_ib_event(__entry->event), 405 __entry->event 406 ) 407 ); 408 409 /** 410 ** Call events 411 **/ 412 413 TRACE_EVENT(xprtrdma_createmrs, 414 TP_PROTO( 415 const struct rpcrdma_xprt *r_xprt, 416 unsigned int count 417 ), 418 419 TP_ARGS(r_xprt, count), 420 421 TP_STRUCT__entry( 422 __field(const void *, r_xprt) 423 __field(unsigned int, count) 424 ), 425 426 TP_fast_assign( 427 __entry->r_xprt = r_xprt; 428 __entry->count = count; 429 ), 430 431 TP_printk("r_xprt=%p: created %u MRs", 432 __entry->r_xprt, __entry->count 433 ) 434 ); 435 436 DEFINE_RXPRT_EVENT(xprtrdma_nomrs); 437 438 DEFINE_RDCH_EVENT(xprtrdma_read_chunk); 439 DEFINE_WRCH_EVENT(xprtrdma_write_chunk); 440 DEFINE_WRCH_EVENT(xprtrdma_reply_chunk); 441 442 TRACE_DEFINE_ENUM(rpcrdma_noch); 443 TRACE_DEFINE_ENUM(rpcrdma_readch); 444 TRACE_DEFINE_ENUM(rpcrdma_areadch); 445 TRACE_DEFINE_ENUM(rpcrdma_writech); 446 TRACE_DEFINE_ENUM(rpcrdma_replych); 447 448 #define xprtrdma_show_chunktype(x) \ 449 __print_symbolic(x, \ 450 { rpcrdma_noch, "inline" }, \ 451 { rpcrdma_readch, "read list" }, \ 452 { rpcrdma_areadch, "*read list" }, \ 453 { rpcrdma_writech, "write list" }, \ 454 { rpcrdma_replych, "reply chunk" }) 455 456 TRACE_EVENT(xprtrdma_marshal, 457 TP_PROTO( 458 const struct rpc_rqst *rqst, 459 unsigned int hdrlen, 460 unsigned int rtype, 461 unsigned int wtype 462 ), 463 464 TP_ARGS(rqst, hdrlen, rtype, wtype), 465 466 TP_STRUCT__entry( 467 __field(unsigned int, task_id) 468 __field(unsigned int, client_id) 469 __field(u32, xid) 470 __field(unsigned int, hdrlen) 471 __field(unsigned int, headlen) 472 __field(unsigned int, pagelen) 473 __field(unsigned int, taillen) 474 __field(unsigned int, rtype) 475 __field(unsigned int, wtype) 476 ), 477 478 TP_fast_assign( 479 __entry->task_id = rqst->rq_task->tk_pid; 480 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 481 __entry->xid = be32_to_cpu(rqst->rq_xid); 482 __entry->hdrlen = hdrlen; 483 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 484 __entry->pagelen = rqst->rq_snd_buf.page_len; 485 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 486 __entry->rtype = rtype; 487 __entry->wtype = wtype; 488 ), 489 490 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 491 __entry->task_id, __entry->client_id, __entry->xid, 492 __entry->hdrlen, 493 __entry->headlen, __entry->pagelen, __entry->taillen, 494 xprtrdma_show_chunktype(__entry->rtype), 495 xprtrdma_show_chunktype(__entry->wtype) 496 ) 497 ); 498 499 TRACE_EVENT(xprtrdma_post_send, 500 TP_PROTO( 501 const struct rpcrdma_req *req, 502 int status 503 ), 504 505 TP_ARGS(req, status), 506 507 TP_STRUCT__entry( 508 __field(const void *, req) 509 __field(int, num_sge) 510 __field(bool, signaled) 511 __field(int, status) 512 ), 513 514 TP_fast_assign( 515 __entry->req = req; 516 __entry->num_sge = req->rl_sendctx->sc_wr.num_sge; 517 __entry->signaled = req->rl_sendctx->sc_wr.send_flags & 518 IB_SEND_SIGNALED; 519 __entry->status = status; 520 ), 521 522 TP_printk("req=%p, %d SGEs%s, status=%d", 523 __entry->req, __entry->num_sge, 524 (__entry->signaled ? ", signaled" : ""), 525 __entry->status 526 ) 527 ); 528 529 TRACE_EVENT(xprtrdma_post_recv, 530 TP_PROTO( 531 const struct ib_cqe *cqe 532 ), 533 534 TP_ARGS(cqe), 535 536 TP_STRUCT__entry( 537 __field(const void *, cqe) 538 ), 539 540 TP_fast_assign( 541 __entry->cqe = cqe; 542 ), 543 544 TP_printk("cqe=%p", 545 __entry->cqe 546 ) 547 ); 548 549 TRACE_EVENT(xprtrdma_post_recvs, 550 TP_PROTO( 551 const struct rpcrdma_xprt *r_xprt, 552 unsigned int count, 553 int status 554 ), 555 556 TP_ARGS(r_xprt, count, status), 557 558 TP_STRUCT__entry( 559 __field(const void *, r_xprt) 560 __field(unsigned int, count) 561 __field(int, status) 562 __field(int, posted) 563 __string(addr, rpcrdma_addrstr(r_xprt)) 564 __string(port, rpcrdma_portstr(r_xprt)) 565 ), 566 567 TP_fast_assign( 568 __entry->r_xprt = r_xprt; 569 __entry->count = count; 570 __entry->status = status; 571 __entry->posted = r_xprt->rx_buf.rb_posted_receives; 572 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 573 __assign_str(port, rpcrdma_portstr(r_xprt)); 574 ), 575 576 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", 577 __get_str(addr), __get_str(port), __entry->r_xprt, 578 __entry->count, __entry->posted, __entry->status 579 ) 580 ); 581 582 /** 583 ** Completion events 584 **/ 585 586 TRACE_EVENT(xprtrdma_wc_send, 587 TP_PROTO( 588 const struct rpcrdma_sendctx *sc, 589 const struct ib_wc *wc 590 ), 591 592 TP_ARGS(sc, wc), 593 594 TP_STRUCT__entry( 595 __field(const void *, req) 596 __field(unsigned int, unmap_count) 597 __field(unsigned int, status) 598 __field(unsigned int, vendor_err) 599 ), 600 601 TP_fast_assign( 602 __entry->req = sc->sc_req; 603 __entry->unmap_count = sc->sc_unmap_count; 604 __entry->status = wc->status; 605 __entry->vendor_err = __entry->status ? wc->vendor_err : 0; 606 ), 607 608 TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)", 609 __entry->req, __entry->unmap_count, 610 rdma_show_wc_status(__entry->status), 611 __entry->status, __entry->vendor_err 612 ) 613 ); 614 615 TRACE_EVENT(xprtrdma_wc_receive, 616 TP_PROTO( 617 const struct ib_wc *wc 618 ), 619 620 TP_ARGS(wc), 621 622 TP_STRUCT__entry( 623 __field(const void *, cqe) 624 __field(u32, byte_len) 625 __field(unsigned int, status) 626 __field(u32, vendor_err) 627 ), 628 629 TP_fast_assign( 630 __entry->cqe = wc->wr_cqe; 631 __entry->status = wc->status; 632 if (wc->status) { 633 __entry->byte_len = 0; 634 __entry->vendor_err = wc->vendor_err; 635 } else { 636 __entry->byte_len = wc->byte_len; 637 __entry->vendor_err = 0; 638 } 639 ), 640 641 TP_printk("cqe=%p %u bytes: %s (%u/0x%x)", 642 __entry->cqe, __entry->byte_len, 643 rdma_show_wc_status(__entry->status), 644 __entry->status, __entry->vendor_err 645 ) 646 ); 647 648 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); 649 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); 650 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); 651 652 DEFINE_MR_EVENT(xprtrdma_localinv); 653 DEFINE_MR_EVENT(xprtrdma_dma_unmap); 654 DEFINE_MR_EVENT(xprtrdma_remoteinv); 655 DEFINE_MR_EVENT(xprtrdma_recover_mr); 656 657 /** 658 ** Reply events 659 **/ 660 661 TRACE_EVENT(xprtrdma_reply, 662 TP_PROTO( 663 const struct rpc_task *task, 664 const struct rpcrdma_rep *rep, 665 const struct rpcrdma_req *req, 666 unsigned int credits 667 ), 668 669 TP_ARGS(task, rep, req, credits), 670 671 TP_STRUCT__entry( 672 __field(unsigned int, task_id) 673 __field(unsigned int, client_id) 674 __field(const void *, rep) 675 __field(const void *, req) 676 __field(u32, xid) 677 __field(unsigned int, credits) 678 ), 679 680 TP_fast_assign( 681 __entry->task_id = task->tk_pid; 682 __entry->client_id = task->tk_client->cl_clid; 683 __entry->rep = rep; 684 __entry->req = req; 685 __entry->xid = be32_to_cpu(rep->rr_xid); 686 __entry->credits = credits; 687 ), 688 689 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", 690 __entry->task_id, __entry->client_id, __entry->xid, 691 __entry->credits, __entry->rep, __entry->req 692 ) 693 ); 694 695 TRACE_EVENT(xprtrdma_defer_cmp, 696 TP_PROTO( 697 const struct rpcrdma_rep *rep 698 ), 699 700 TP_ARGS(rep), 701 702 TP_STRUCT__entry( 703 __field(unsigned int, task_id) 704 __field(unsigned int, client_id) 705 __field(const void *, rep) 706 __field(u32, xid) 707 ), 708 709 TP_fast_assign( 710 __entry->task_id = rep->rr_rqst->rq_task->tk_pid; 711 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; 712 __entry->rep = rep; 713 __entry->xid = be32_to_cpu(rep->rr_xid); 714 ), 715 716 TP_printk("task:%u@%u xid=0x%08x rep=%p", 717 __entry->task_id, __entry->client_id, __entry->xid, 718 __entry->rep 719 ) 720 ); 721 722 DEFINE_REPLY_EVENT(xprtrdma_reply_vers); 723 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); 724 DEFINE_REPLY_EVENT(xprtrdma_reply_short); 725 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); 726 727 TRACE_EVENT(xprtrdma_fixup, 728 TP_PROTO( 729 const struct rpc_rqst *rqst, 730 int len, 731 int hdrlen 732 ), 733 734 TP_ARGS(rqst, len, hdrlen), 735 736 TP_STRUCT__entry( 737 __field(unsigned int, task_id) 738 __field(unsigned int, client_id) 739 __field(const void *, base) 740 __field(int, len) 741 __field(int, hdrlen) 742 ), 743 744 TP_fast_assign( 745 __entry->task_id = rqst->rq_task->tk_pid; 746 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 747 __entry->base = rqst->rq_rcv_buf.head[0].iov_base; 748 __entry->len = len; 749 __entry->hdrlen = hdrlen; 750 ), 751 752 TP_printk("task:%u@%u base=%p len=%d hdrlen=%d", 753 __entry->task_id, __entry->client_id, 754 __entry->base, __entry->len, __entry->hdrlen 755 ) 756 ); 757 758 TRACE_EVENT(xprtrdma_fixup_pg, 759 TP_PROTO( 760 const struct rpc_rqst *rqst, 761 int pageno, 762 const void *pos, 763 int len, 764 int curlen 765 ), 766 767 TP_ARGS(rqst, pageno, pos, len, curlen), 768 769 TP_STRUCT__entry( 770 __field(unsigned int, task_id) 771 __field(unsigned int, client_id) 772 __field(const void *, pos) 773 __field(int, pageno) 774 __field(int, len) 775 __field(int, curlen) 776 ), 777 778 TP_fast_assign( 779 __entry->task_id = rqst->rq_task->tk_pid; 780 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 781 __entry->pos = pos; 782 __entry->pageno = pageno; 783 __entry->len = len; 784 __entry->curlen = curlen; 785 ), 786 787 TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d", 788 __entry->task_id, __entry->client_id, 789 __entry->pageno, __entry->pos, __entry->len, __entry->curlen 790 ) 791 ); 792 793 TRACE_EVENT(xprtrdma_decode_seg, 794 TP_PROTO( 795 u32 handle, 796 u32 length, 797 u64 offset 798 ), 799 800 TP_ARGS(handle, length, offset), 801 802 TP_STRUCT__entry( 803 __field(u32, handle) 804 __field(u32, length) 805 __field(u64, offset) 806 ), 807 808 TP_fast_assign( 809 __entry->handle = handle; 810 __entry->length = length; 811 __entry->offset = offset; 812 ), 813 814 TP_printk("%u@0x%016llx:0x%08x", 815 __entry->length, (unsigned long long)__entry->offset, 816 __entry->handle 817 ) 818 ); 819 820 /** 821 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps 822 **/ 823 824 TRACE_EVENT(xprtrdma_allocate, 825 TP_PROTO( 826 const struct rpc_task *task, 827 const struct rpcrdma_req *req 828 ), 829 830 TP_ARGS(task, req), 831 832 TP_STRUCT__entry( 833 __field(unsigned int, task_id) 834 __field(unsigned int, client_id) 835 __field(const void *, req) 836 __field(size_t, callsize) 837 __field(size_t, rcvsize) 838 ), 839 840 TP_fast_assign( 841 __entry->task_id = task->tk_pid; 842 __entry->client_id = task->tk_client->cl_clid; 843 __entry->req = req; 844 __entry->callsize = task->tk_rqstp->rq_callsize; 845 __entry->rcvsize = task->tk_rqstp->rq_rcvsize; 846 ), 847 848 TP_printk("task:%u@%u req=%p (%zu, %zu)", 849 __entry->task_id, __entry->client_id, 850 __entry->req, __entry->callsize, __entry->rcvsize 851 ) 852 ); 853 854 TRACE_EVENT(xprtrdma_rpc_done, 855 TP_PROTO( 856 const struct rpc_task *task, 857 const struct rpcrdma_req *req 858 ), 859 860 TP_ARGS(task, req), 861 862 TP_STRUCT__entry( 863 __field(unsigned int, task_id) 864 __field(unsigned int, client_id) 865 __field(const void *, req) 866 __field(const void *, rep) 867 ), 868 869 TP_fast_assign( 870 __entry->task_id = task->tk_pid; 871 __entry->client_id = task->tk_client->cl_clid; 872 __entry->req = req; 873 __entry->rep = req->rl_reply; 874 ), 875 876 TP_printk("task:%u@%u req=%p rep=%p", 877 __entry->task_id, __entry->client_id, 878 __entry->req, __entry->rep 879 ) 880 ); 881 882 DEFINE_RXPRT_EVENT(xprtrdma_noreps); 883 884 /** 885 ** Callback events 886 **/ 887 888 TRACE_EVENT(xprtrdma_cb_setup, 889 TP_PROTO( 890 const struct rpcrdma_xprt *r_xprt, 891 unsigned int reqs 892 ), 893 894 TP_ARGS(r_xprt, reqs), 895 896 TP_STRUCT__entry( 897 __field(const void *, r_xprt) 898 __field(unsigned int, reqs) 899 __string(addr, rpcrdma_addrstr(r_xprt)) 900 __string(port, rpcrdma_portstr(r_xprt)) 901 ), 902 903 TP_fast_assign( 904 __entry->r_xprt = r_xprt; 905 __entry->reqs = reqs; 906 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 907 __assign_str(port, rpcrdma_portstr(r_xprt)); 908 ), 909 910 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", 911 __get_str(addr), __get_str(port), 912 __entry->r_xprt, __entry->reqs 913 ) 914 ); 915 916 DEFINE_CB_EVENT(xprtrdma_cb_call); 917 DEFINE_CB_EVENT(xprtrdma_cb_reply); 918 919 #endif /* _TRACE_RPCRDMA_H */ 920 921 #include <trace/define_trace.h> 922