1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017, 2018 Oracle. All rights reserved. 4 * 5 * Trace point definitions for the "rpcrdma" subsystem. 6 */ 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM rpcrdma 9 10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_RPCRDMA_H 12 13 #include <linux/scatterlist.h> 14 #include <linux/sunrpc/rpc_rdma_cid.h> 15 #include <linux/tracepoint.h> 16 #include <trace/events/rdma.h> 17 18 /** 19 ** Event classes 20 **/ 21 22 DECLARE_EVENT_CLASS(rpcrdma_completion_class, 23 TP_PROTO( 24 const struct ib_wc *wc, 25 const struct rpc_rdma_cid *cid 26 ), 27 28 TP_ARGS(wc, cid), 29 30 TP_STRUCT__entry( 31 __field(u32, cq_id) 32 __field(int, completion_id) 33 __field(unsigned long, status) 34 __field(unsigned int, vendor_err) 35 ), 36 37 TP_fast_assign( 38 __entry->cq_id = cid->ci_queue_id; 39 __entry->completion_id = cid->ci_completion_id; 40 __entry->status = wc->status; 41 if (wc->status) 42 __entry->vendor_err = wc->vendor_err; 43 else 44 __entry->vendor_err = 0; 45 ), 46 47 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", 48 __entry->cq_id, __entry->completion_id, 49 rdma_show_wc_status(__entry->status), 50 __entry->status, __entry->vendor_err 51 ) 52 ); 53 54 #define DEFINE_COMPLETION_EVENT(name) \ 55 DEFINE_EVENT(rpcrdma_completion_class, name, \ 56 TP_PROTO( \ 57 const struct ib_wc *wc, \ 58 const struct rpc_rdma_cid *cid \ 59 ), \ 60 TP_ARGS(wc, cid)) 61 62 DECLARE_EVENT_CLASS(xprtrdma_reply_event, 63 TP_PROTO( 64 const struct rpcrdma_rep *rep 65 ), 66 67 TP_ARGS(rep), 68 69 TP_STRUCT__entry( 70 __field(const void *, rep) 71 __field(const void *, r_xprt) 72 __field(u32, xid) 73 __field(u32, version) 74 __field(u32, proc) 75 ), 76 77 TP_fast_assign( 78 __entry->rep = rep; 79 __entry->r_xprt = rep->rr_rxprt; 80 __entry->xid = be32_to_cpu(rep->rr_xid); 81 __entry->version = be32_to_cpu(rep->rr_vers); 82 __entry->proc = be32_to_cpu(rep->rr_proc); 83 ), 84 85 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", 86 __entry->r_xprt, __entry->xid, __entry->rep, 87 __entry->version, __entry->proc 88 ) 89 ); 90 91 #define DEFINE_REPLY_EVENT(name) \ 92 DEFINE_EVENT(xprtrdma_reply_event, name, \ 93 TP_PROTO( \ 94 const struct rpcrdma_rep *rep \ 95 ), \ 96 TP_ARGS(rep)) 97 98 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 99 TP_PROTO( 100 const struct rpcrdma_xprt *r_xprt 101 ), 102 103 TP_ARGS(r_xprt), 104 105 TP_STRUCT__entry( 106 __field(const void *, r_xprt) 107 __string(addr, rpcrdma_addrstr(r_xprt)) 108 __string(port, rpcrdma_portstr(r_xprt)) 109 ), 110 111 TP_fast_assign( 112 __entry->r_xprt = r_xprt; 113 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 114 __assign_str(port, rpcrdma_portstr(r_xprt)); 115 ), 116 117 TP_printk("peer=[%s]:%s r_xprt=%p", 118 __get_str(addr), __get_str(port), __entry->r_xprt 119 ) 120 ); 121 122 #define DEFINE_RXPRT_EVENT(name) \ 123 DEFINE_EVENT(xprtrdma_rxprt, name, \ 124 TP_PROTO( \ 125 const struct rpcrdma_xprt *r_xprt \ 126 ), \ 127 TP_ARGS(r_xprt)) 128 129 DECLARE_EVENT_CLASS(xprtrdma_connect_class, 130 TP_PROTO( 131 const struct rpcrdma_xprt *r_xprt, 132 int rc 133 ), 134 135 TP_ARGS(r_xprt, rc), 136 137 TP_STRUCT__entry( 138 __field(const void *, r_xprt) 139 __field(int, rc) 140 __field(int, connect_status) 141 __string(addr, rpcrdma_addrstr(r_xprt)) 142 __string(port, rpcrdma_portstr(r_xprt)) 143 ), 144 145 TP_fast_assign( 146 __entry->r_xprt = r_xprt; 147 __entry->rc = rc; 148 __entry->connect_status = r_xprt->rx_ep->re_connect_status; 149 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 150 __assign_str(port, rpcrdma_portstr(r_xprt)); 151 ), 152 153 TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", 154 __get_str(addr), __get_str(port), __entry->r_xprt, 155 __entry->rc, __entry->connect_status 156 ) 157 ); 158 159 #define DEFINE_CONN_EVENT(name) \ 160 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ 161 TP_PROTO( \ 162 const struct rpcrdma_xprt *r_xprt, \ 163 int rc \ 164 ), \ 165 TP_ARGS(r_xprt, rc)) 166 167 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 168 TP_PROTO( 169 const struct rpc_task *task, 170 unsigned int pos, 171 struct rpcrdma_mr *mr, 172 int nsegs 173 ), 174 175 TP_ARGS(task, pos, mr, nsegs), 176 177 TP_STRUCT__entry( 178 __field(unsigned int, task_id) 179 __field(unsigned int, client_id) 180 __field(unsigned int, pos) 181 __field(int, nents) 182 __field(u32, handle) 183 __field(u32, length) 184 __field(u64, offset) 185 __field(int, nsegs) 186 ), 187 188 TP_fast_assign( 189 __entry->task_id = task->tk_pid; 190 __entry->client_id = task->tk_client->cl_clid; 191 __entry->pos = pos; 192 __entry->nents = mr->mr_nents; 193 __entry->handle = mr->mr_handle; 194 __entry->length = mr->mr_length; 195 __entry->offset = mr->mr_offset; 196 __entry->nsegs = nsegs; 197 ), 198 199 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", 200 __entry->task_id, __entry->client_id, 201 __entry->pos, __entry->length, 202 (unsigned long long)__entry->offset, __entry->handle, 203 __entry->nents < __entry->nsegs ? "more" : "last" 204 ) 205 ); 206 207 #define DEFINE_RDCH_EVENT(name) \ 208 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ 209 TP_PROTO( \ 210 const struct rpc_task *task, \ 211 unsigned int pos, \ 212 struct rpcrdma_mr *mr, \ 213 int nsegs \ 214 ), \ 215 TP_ARGS(task, pos, mr, nsegs)) 216 217 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 218 TP_PROTO( 219 const struct rpc_task *task, 220 struct rpcrdma_mr *mr, 221 int nsegs 222 ), 223 224 TP_ARGS(task, mr, nsegs), 225 226 TP_STRUCT__entry( 227 __field(unsigned int, task_id) 228 __field(unsigned int, client_id) 229 __field(int, nents) 230 __field(u32, handle) 231 __field(u32, length) 232 __field(u64, offset) 233 __field(int, nsegs) 234 ), 235 236 TP_fast_assign( 237 __entry->task_id = task->tk_pid; 238 __entry->client_id = task->tk_client->cl_clid; 239 __entry->nents = mr->mr_nents; 240 __entry->handle = mr->mr_handle; 241 __entry->length = mr->mr_length; 242 __entry->offset = mr->mr_offset; 243 __entry->nsegs = nsegs; 244 ), 245 246 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", 247 __entry->task_id, __entry->client_id, 248 __entry->length, (unsigned long long)__entry->offset, 249 __entry->handle, 250 __entry->nents < __entry->nsegs ? "more" : "last" 251 ) 252 ); 253 254 #define DEFINE_WRCH_EVENT(name) \ 255 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ 256 TP_PROTO( \ 257 const struct rpc_task *task, \ 258 struct rpcrdma_mr *mr, \ 259 int nsegs \ 260 ), \ 261 TP_ARGS(task, mr, nsegs)) 262 263 DECLARE_EVENT_CLASS(xprtrdma_frwr_done, 264 TP_PROTO( 265 const struct ib_wc *wc, 266 const struct rpcrdma_frwr *frwr 267 ), 268 269 TP_ARGS(wc, frwr), 270 271 TP_STRUCT__entry( 272 __field(u32, mr_id) 273 __field(unsigned int, status) 274 __field(unsigned int, vendor_err) 275 ), 276 277 TP_fast_assign( 278 __entry->mr_id = frwr->fr_mr->res.id; 279 __entry->status = wc->status; 280 __entry->vendor_err = __entry->status ? wc->vendor_err : 0; 281 ), 282 283 TP_printk( 284 "mr.id=%u: %s (%u/0x%x)", 285 __entry->mr_id, rdma_show_wc_status(__entry->status), 286 __entry->status, __entry->vendor_err 287 ) 288 ); 289 290 #define DEFINE_FRWR_DONE_EVENT(name) \ 291 DEFINE_EVENT(xprtrdma_frwr_done, name, \ 292 TP_PROTO( \ 293 const struct ib_wc *wc, \ 294 const struct rpcrdma_frwr *frwr \ 295 ), \ 296 TP_ARGS(wc, frwr)) 297 298 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); 299 TRACE_DEFINE_ENUM(DMA_TO_DEVICE); 300 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); 301 TRACE_DEFINE_ENUM(DMA_NONE); 302 303 #define xprtrdma_show_direction(x) \ 304 __print_symbolic(x, \ 305 { DMA_BIDIRECTIONAL, "BIDIR" }, \ 306 { DMA_TO_DEVICE, "TO_DEVICE" }, \ 307 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ 308 { DMA_NONE, "NONE" }) 309 310 DECLARE_EVENT_CLASS(xprtrdma_mr, 311 TP_PROTO( 312 const struct rpcrdma_mr *mr 313 ), 314 315 TP_ARGS(mr), 316 317 TP_STRUCT__entry( 318 __field(u32, mr_id) 319 __field(int, nents) 320 __field(u32, handle) 321 __field(u32, length) 322 __field(u64, offset) 323 __field(u32, dir) 324 ), 325 326 TP_fast_assign( 327 __entry->mr_id = mr->frwr.fr_mr->res.id; 328 __entry->nents = mr->mr_nents; 329 __entry->handle = mr->mr_handle; 330 __entry->length = mr->mr_length; 331 __entry->offset = mr->mr_offset; 332 __entry->dir = mr->mr_dir; 333 ), 334 335 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 336 __entry->mr_id, __entry->nents, __entry->length, 337 (unsigned long long)__entry->offset, __entry->handle, 338 xprtrdma_show_direction(__entry->dir) 339 ) 340 ); 341 342 #define DEFINE_MR_EVENT(name) \ 343 DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ 344 TP_PROTO( \ 345 const struct rpcrdma_mr *mr \ 346 ), \ 347 TP_ARGS(mr)) 348 349 DECLARE_EVENT_CLASS(xprtrdma_cb_event, 350 TP_PROTO( 351 const struct rpc_rqst *rqst 352 ), 353 354 TP_ARGS(rqst), 355 356 TP_STRUCT__entry( 357 __field(const void *, rqst) 358 __field(const void *, rep) 359 __field(const void *, req) 360 __field(u32, xid) 361 ), 362 363 TP_fast_assign( 364 __entry->rqst = rqst; 365 __entry->req = rpcr_to_rdmar(rqst); 366 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; 367 __entry->xid = be32_to_cpu(rqst->rq_xid); 368 ), 369 370 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", 371 __entry->xid, __entry->rqst, __entry->req, __entry->rep 372 ) 373 ); 374 375 #define DEFINE_CB_EVENT(name) \ 376 DEFINE_EVENT(xprtrdma_cb_event, name, \ 377 TP_PROTO( \ 378 const struct rpc_rqst *rqst \ 379 ), \ 380 TP_ARGS(rqst)) 381 382 /** 383 ** Connection events 384 **/ 385 386 TRACE_EVENT(xprtrdma_inline_thresh, 387 TP_PROTO( 388 const struct rpcrdma_ep *ep 389 ), 390 391 TP_ARGS(ep), 392 393 TP_STRUCT__entry( 394 __field(unsigned int, inline_send) 395 __field(unsigned int, inline_recv) 396 __field(unsigned int, max_send) 397 __field(unsigned int, max_recv) 398 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 399 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 400 ), 401 402 TP_fast_assign( 403 const struct rdma_cm_id *id = ep->re_id; 404 405 __entry->inline_send = ep->re_inline_send; 406 __entry->inline_recv = ep->re_inline_recv; 407 __entry->max_send = ep->re_max_inline_send; 408 __entry->max_recv = ep->re_max_inline_recv; 409 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 410 sizeof(struct sockaddr_in6)); 411 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 412 sizeof(struct sockaddr_in6)); 413 ), 414 415 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 416 __entry->srcaddr, __entry->dstaddr, 417 __entry->inline_send, __entry->inline_recv, 418 __entry->max_send, __entry->max_recv 419 ) 420 ); 421 422 DEFINE_CONN_EVENT(connect); 423 DEFINE_CONN_EVENT(disconnect); 424 425 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 426 DEFINE_RXPRT_EVENT(xprtrdma_op_setport); 427 428 TRACE_EVENT(xprtrdma_op_connect, 429 TP_PROTO( 430 const struct rpcrdma_xprt *r_xprt, 431 unsigned long delay 432 ), 433 434 TP_ARGS(r_xprt, delay), 435 436 TP_STRUCT__entry( 437 __field(const void *, r_xprt) 438 __field(unsigned long, delay) 439 __string(addr, rpcrdma_addrstr(r_xprt)) 440 __string(port, rpcrdma_portstr(r_xprt)) 441 ), 442 443 TP_fast_assign( 444 __entry->r_xprt = r_xprt; 445 __entry->delay = delay; 446 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 447 __assign_str(port, rpcrdma_portstr(r_xprt)); 448 ), 449 450 TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", 451 __get_str(addr), __get_str(port), __entry->r_xprt, 452 __entry->delay 453 ) 454 ); 455 456 457 TRACE_EVENT(xprtrdma_op_set_cto, 458 TP_PROTO( 459 const struct rpcrdma_xprt *r_xprt, 460 unsigned long connect, 461 unsigned long reconnect 462 ), 463 464 TP_ARGS(r_xprt, connect, reconnect), 465 466 TP_STRUCT__entry( 467 __field(const void *, r_xprt) 468 __field(unsigned long, connect) 469 __field(unsigned long, reconnect) 470 __string(addr, rpcrdma_addrstr(r_xprt)) 471 __string(port, rpcrdma_portstr(r_xprt)) 472 ), 473 474 TP_fast_assign( 475 __entry->r_xprt = r_xprt; 476 __entry->connect = connect; 477 __entry->reconnect = reconnect; 478 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 479 __assign_str(port, rpcrdma_portstr(r_xprt)); 480 ), 481 482 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", 483 __get_str(addr), __get_str(port), __entry->r_xprt, 484 __entry->connect / HZ, __entry->reconnect / HZ 485 ) 486 ); 487 488 TRACE_EVENT(xprtrdma_qp_event, 489 TP_PROTO( 490 const struct rpcrdma_ep *ep, 491 const struct ib_event *event 492 ), 493 494 TP_ARGS(ep, event), 495 496 TP_STRUCT__entry( 497 __field(unsigned long, event) 498 __string(name, event->device->name) 499 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 500 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 501 ), 502 503 TP_fast_assign( 504 const struct rdma_cm_id *id = ep->re_id; 505 506 __entry->event = event->event; 507 __assign_str(name, event->device->name); 508 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 509 sizeof(struct sockaddr_in6)); 510 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 511 sizeof(struct sockaddr_in6)); 512 ), 513 514 TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", 515 __entry->srcaddr, __entry->dstaddr, __get_str(name), 516 rdma_show_ib_event(__entry->event), __entry->event 517 ) 518 ); 519 520 /** 521 ** Call events 522 **/ 523 524 TRACE_EVENT(xprtrdma_createmrs, 525 TP_PROTO( 526 const struct rpcrdma_xprt *r_xprt, 527 unsigned int count 528 ), 529 530 TP_ARGS(r_xprt, count), 531 532 TP_STRUCT__entry( 533 __field(const void *, r_xprt) 534 __string(addr, rpcrdma_addrstr(r_xprt)) 535 __string(port, rpcrdma_portstr(r_xprt)) 536 __field(unsigned int, count) 537 ), 538 539 TP_fast_assign( 540 __entry->r_xprt = r_xprt; 541 __entry->count = count; 542 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 543 __assign_str(port, rpcrdma_portstr(r_xprt)); 544 ), 545 546 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", 547 __get_str(addr), __get_str(port), __entry->r_xprt, 548 __entry->count 549 ) 550 ); 551 552 TRACE_EVENT(xprtrdma_mr_get, 553 TP_PROTO( 554 const struct rpcrdma_req *req 555 ), 556 557 TP_ARGS(req), 558 559 TP_STRUCT__entry( 560 __field(const void *, req) 561 __field(unsigned int, task_id) 562 __field(unsigned int, client_id) 563 __field(u32, xid) 564 ), 565 566 TP_fast_assign( 567 const struct rpc_rqst *rqst = &req->rl_slot; 568 569 __entry->req = req; 570 __entry->task_id = rqst->rq_task->tk_pid; 571 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 572 __entry->xid = be32_to_cpu(rqst->rq_xid); 573 ), 574 575 TP_printk("task:%u@%u xid=0x%08x req=%p", 576 __entry->task_id, __entry->client_id, __entry->xid, 577 __entry->req 578 ) 579 ); 580 581 TRACE_EVENT(xprtrdma_nomrs, 582 TP_PROTO( 583 const struct rpcrdma_req *req 584 ), 585 586 TP_ARGS(req), 587 588 TP_STRUCT__entry( 589 __field(const void *, req) 590 __field(unsigned int, task_id) 591 __field(unsigned int, client_id) 592 __field(u32, xid) 593 ), 594 595 TP_fast_assign( 596 const struct rpc_rqst *rqst = &req->rl_slot; 597 598 __entry->req = req; 599 __entry->task_id = rqst->rq_task->tk_pid; 600 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 601 __entry->xid = be32_to_cpu(rqst->rq_xid); 602 ), 603 604 TP_printk("task:%u@%u xid=0x%08x req=%p", 605 __entry->task_id, __entry->client_id, __entry->xid, 606 __entry->req 607 ) 608 ); 609 610 DEFINE_RDCH_EVENT(read); 611 DEFINE_WRCH_EVENT(write); 612 DEFINE_WRCH_EVENT(reply); 613 614 TRACE_DEFINE_ENUM(rpcrdma_noch); 615 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); 616 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); 617 TRACE_DEFINE_ENUM(rpcrdma_readch); 618 TRACE_DEFINE_ENUM(rpcrdma_areadch); 619 TRACE_DEFINE_ENUM(rpcrdma_writech); 620 TRACE_DEFINE_ENUM(rpcrdma_replych); 621 622 #define xprtrdma_show_chunktype(x) \ 623 __print_symbolic(x, \ 624 { rpcrdma_noch, "inline" }, \ 625 { rpcrdma_noch_pullup, "pullup" }, \ 626 { rpcrdma_noch_mapped, "mapped" }, \ 627 { rpcrdma_readch, "read list" }, \ 628 { rpcrdma_areadch, "*read list" }, \ 629 { rpcrdma_writech, "write list" }, \ 630 { rpcrdma_replych, "reply chunk" }) 631 632 TRACE_EVENT(xprtrdma_marshal, 633 TP_PROTO( 634 const struct rpcrdma_req *req, 635 unsigned int rtype, 636 unsigned int wtype 637 ), 638 639 TP_ARGS(req, rtype, wtype), 640 641 TP_STRUCT__entry( 642 __field(unsigned int, task_id) 643 __field(unsigned int, client_id) 644 __field(u32, xid) 645 __field(unsigned int, hdrlen) 646 __field(unsigned int, headlen) 647 __field(unsigned int, pagelen) 648 __field(unsigned int, taillen) 649 __field(unsigned int, rtype) 650 __field(unsigned int, wtype) 651 ), 652 653 TP_fast_assign( 654 const struct rpc_rqst *rqst = &req->rl_slot; 655 656 __entry->task_id = rqst->rq_task->tk_pid; 657 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 658 __entry->xid = be32_to_cpu(rqst->rq_xid); 659 __entry->hdrlen = req->rl_hdrbuf.len; 660 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 661 __entry->pagelen = rqst->rq_snd_buf.page_len; 662 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 663 __entry->rtype = rtype; 664 __entry->wtype = wtype; 665 ), 666 667 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 668 __entry->task_id, __entry->client_id, __entry->xid, 669 __entry->hdrlen, 670 __entry->headlen, __entry->pagelen, __entry->taillen, 671 xprtrdma_show_chunktype(__entry->rtype), 672 xprtrdma_show_chunktype(__entry->wtype) 673 ) 674 ); 675 676 TRACE_EVENT(xprtrdma_marshal_failed, 677 TP_PROTO(const struct rpc_rqst *rqst, 678 int ret 679 ), 680 681 TP_ARGS(rqst, ret), 682 683 TP_STRUCT__entry( 684 __field(unsigned int, task_id) 685 __field(unsigned int, client_id) 686 __field(u32, xid) 687 __field(int, ret) 688 ), 689 690 TP_fast_assign( 691 __entry->task_id = rqst->rq_task->tk_pid; 692 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 693 __entry->xid = be32_to_cpu(rqst->rq_xid); 694 __entry->ret = ret; 695 ), 696 697 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 698 __entry->task_id, __entry->client_id, __entry->xid, 699 __entry->ret 700 ) 701 ); 702 703 TRACE_EVENT(xprtrdma_prepsend_failed, 704 TP_PROTO(const struct rpc_rqst *rqst, 705 int ret 706 ), 707 708 TP_ARGS(rqst, ret), 709 710 TP_STRUCT__entry( 711 __field(unsigned int, task_id) 712 __field(unsigned int, client_id) 713 __field(u32, xid) 714 __field(int, ret) 715 ), 716 717 TP_fast_assign( 718 __entry->task_id = rqst->rq_task->tk_pid; 719 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 720 __entry->xid = be32_to_cpu(rqst->rq_xid); 721 __entry->ret = ret; 722 ), 723 724 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 725 __entry->task_id, __entry->client_id, __entry->xid, 726 __entry->ret 727 ) 728 ); 729 730 TRACE_EVENT(xprtrdma_post_send, 731 TP_PROTO( 732 const struct rpcrdma_req *req 733 ), 734 735 TP_ARGS(req), 736 737 TP_STRUCT__entry( 738 __field(const void *, req) 739 __field(const void *, sc) 740 __field(unsigned int, task_id) 741 __field(unsigned int, client_id) 742 __field(int, num_sge) 743 __field(int, signaled) 744 ), 745 746 TP_fast_assign( 747 const struct rpc_rqst *rqst = &req->rl_slot; 748 749 __entry->task_id = rqst->rq_task->tk_pid; 750 __entry->client_id = rqst->rq_task->tk_client ? 751 rqst->rq_task->tk_client->cl_clid : -1; 752 __entry->req = req; 753 __entry->sc = req->rl_sendctx; 754 __entry->num_sge = req->rl_wr.num_sge; 755 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 756 ), 757 758 TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", 759 __entry->task_id, __entry->client_id, 760 __entry->req, __entry->sc, __entry->num_sge, 761 (__entry->num_sge == 1 ? "" : "s"), 762 (__entry->signaled ? "signaled" : "") 763 ) 764 ); 765 766 TRACE_EVENT(xprtrdma_post_recv, 767 TP_PROTO( 768 const struct rpcrdma_rep *rep 769 ), 770 771 TP_ARGS(rep), 772 773 TP_STRUCT__entry( 774 __field(const void *, rep) 775 ), 776 777 TP_fast_assign( 778 __entry->rep = rep; 779 ), 780 781 TP_printk("rep=%p", 782 __entry->rep 783 ) 784 ); 785 786 TRACE_EVENT(xprtrdma_post_recvs, 787 TP_PROTO( 788 const struct rpcrdma_xprt *r_xprt, 789 unsigned int count, 790 int status 791 ), 792 793 TP_ARGS(r_xprt, count, status), 794 795 TP_STRUCT__entry( 796 __field(const void *, r_xprt) 797 __field(unsigned int, count) 798 __field(int, status) 799 __field(int, posted) 800 __string(addr, rpcrdma_addrstr(r_xprt)) 801 __string(port, rpcrdma_portstr(r_xprt)) 802 ), 803 804 TP_fast_assign( 805 __entry->r_xprt = r_xprt; 806 __entry->count = count; 807 __entry->status = status; 808 __entry->posted = r_xprt->rx_ep->re_receive_count; 809 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 810 __assign_str(port, rpcrdma_portstr(r_xprt)); 811 ), 812 813 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", 814 __get_str(addr), __get_str(port), __entry->r_xprt, 815 __entry->count, __entry->posted, __entry->status 816 ) 817 ); 818 819 TRACE_EVENT(xprtrdma_post_linv, 820 TP_PROTO( 821 const struct rpcrdma_req *req, 822 int status 823 ), 824 825 TP_ARGS(req, status), 826 827 TP_STRUCT__entry( 828 __field(const void *, req) 829 __field(int, status) 830 __field(u32, xid) 831 ), 832 833 TP_fast_assign( 834 __entry->req = req; 835 __entry->status = status; 836 __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); 837 ), 838 839 TP_printk("req=%p xid=0x%08x status=%d", 840 __entry->req, __entry->xid, __entry->status 841 ) 842 ); 843 844 /** 845 ** Completion events 846 **/ 847 848 TRACE_EVENT(xprtrdma_wc_send, 849 TP_PROTO( 850 const struct rpcrdma_sendctx *sc, 851 const struct ib_wc *wc 852 ), 853 854 TP_ARGS(sc, wc), 855 856 TP_STRUCT__entry( 857 __field(const void *, req) 858 __field(const void *, sc) 859 __field(unsigned int, unmap_count) 860 __field(unsigned int, status) 861 __field(unsigned int, vendor_err) 862 ), 863 864 TP_fast_assign( 865 __entry->req = sc->sc_req; 866 __entry->sc = sc; 867 __entry->unmap_count = sc->sc_unmap_count; 868 __entry->status = wc->status; 869 __entry->vendor_err = __entry->status ? wc->vendor_err : 0; 870 ), 871 872 TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)", 873 __entry->req, __entry->sc, __entry->unmap_count, 874 rdma_show_wc_status(__entry->status), 875 __entry->status, __entry->vendor_err 876 ) 877 ); 878 879 TRACE_EVENT(xprtrdma_wc_receive, 880 TP_PROTO( 881 const struct ib_wc *wc 882 ), 883 884 TP_ARGS(wc), 885 886 TP_STRUCT__entry( 887 __field(const void *, rep) 888 __field(u32, byte_len) 889 __field(unsigned int, status) 890 __field(u32, vendor_err) 891 ), 892 893 TP_fast_assign( 894 __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, 895 rr_cqe); 896 __entry->status = wc->status; 897 if (wc->status) { 898 __entry->byte_len = 0; 899 __entry->vendor_err = wc->vendor_err; 900 } else { 901 __entry->byte_len = wc->byte_len; 902 __entry->vendor_err = 0; 903 } 904 ), 905 906 TP_printk("rep=%p %u bytes: %s (%u/0x%x)", 907 __entry->rep, __entry->byte_len, 908 rdma_show_wc_status(__entry->status), 909 __entry->status, __entry->vendor_err 910 ) 911 ); 912 913 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); 914 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); 915 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); 916 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done); 917 918 TRACE_EVENT(xprtrdma_frwr_alloc, 919 TP_PROTO( 920 const struct rpcrdma_mr *mr, 921 int rc 922 ), 923 924 TP_ARGS(mr, rc), 925 926 TP_STRUCT__entry( 927 __field(u32, mr_id) 928 __field(int, rc) 929 ), 930 931 TP_fast_assign( 932 __entry->mr_id = mr->frwr.fr_mr->res.id; 933 __entry->rc = rc; 934 ), 935 936 TP_printk("mr.id=%u: rc=%d", 937 __entry->mr_id, __entry->rc 938 ) 939 ); 940 941 TRACE_EVENT(xprtrdma_frwr_dereg, 942 TP_PROTO( 943 const struct rpcrdma_mr *mr, 944 int rc 945 ), 946 947 TP_ARGS(mr, rc), 948 949 TP_STRUCT__entry( 950 __field(u32, mr_id) 951 __field(int, nents) 952 __field(u32, handle) 953 __field(u32, length) 954 __field(u64, offset) 955 __field(u32, dir) 956 __field(int, rc) 957 ), 958 959 TP_fast_assign( 960 __entry->mr_id = mr->frwr.fr_mr->res.id; 961 __entry->nents = mr->mr_nents; 962 __entry->handle = mr->mr_handle; 963 __entry->length = mr->mr_length; 964 __entry->offset = mr->mr_offset; 965 __entry->dir = mr->mr_dir; 966 __entry->rc = rc; 967 ), 968 969 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 970 __entry->mr_id, __entry->nents, __entry->length, 971 (unsigned long long)__entry->offset, __entry->handle, 972 xprtrdma_show_direction(__entry->dir), 973 __entry->rc 974 ) 975 ); 976 977 TRACE_EVENT(xprtrdma_frwr_sgerr, 978 TP_PROTO( 979 const struct rpcrdma_mr *mr, 980 int sg_nents 981 ), 982 983 TP_ARGS(mr, sg_nents), 984 985 TP_STRUCT__entry( 986 __field(u32, mr_id) 987 __field(u64, addr) 988 __field(u32, dir) 989 __field(int, nents) 990 ), 991 992 TP_fast_assign( 993 __entry->mr_id = mr->frwr.fr_mr->res.id; 994 __entry->addr = mr->mr_sg->dma_address; 995 __entry->dir = mr->mr_dir; 996 __entry->nents = sg_nents; 997 ), 998 999 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 1000 __entry->mr_id, __entry->addr, 1001 xprtrdma_show_direction(__entry->dir), 1002 __entry->nents 1003 ) 1004 ); 1005 1006 TRACE_EVENT(xprtrdma_frwr_maperr, 1007 TP_PROTO( 1008 const struct rpcrdma_mr *mr, 1009 int num_mapped 1010 ), 1011 1012 TP_ARGS(mr, num_mapped), 1013 1014 TP_STRUCT__entry( 1015 __field(u32, mr_id) 1016 __field(u64, addr) 1017 __field(u32, dir) 1018 __field(int, num_mapped) 1019 __field(int, nents) 1020 ), 1021 1022 TP_fast_assign( 1023 __entry->mr_id = mr->frwr.fr_mr->res.id; 1024 __entry->addr = mr->mr_sg->dma_address; 1025 __entry->dir = mr->mr_dir; 1026 __entry->num_mapped = num_mapped; 1027 __entry->nents = mr->mr_nents; 1028 ), 1029 1030 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 1031 __entry->mr_id, __entry->addr, 1032 xprtrdma_show_direction(__entry->dir), 1033 __entry->num_mapped, __entry->nents 1034 ) 1035 ); 1036 1037 DEFINE_MR_EVENT(localinv); 1038 DEFINE_MR_EVENT(map); 1039 DEFINE_MR_EVENT(unmap); 1040 DEFINE_MR_EVENT(reminv); 1041 DEFINE_MR_EVENT(recycle); 1042 1043 TRACE_EVENT(xprtrdma_dma_maperr, 1044 TP_PROTO( 1045 u64 addr 1046 ), 1047 1048 TP_ARGS(addr), 1049 1050 TP_STRUCT__entry( 1051 __field(u64, addr) 1052 ), 1053 1054 TP_fast_assign( 1055 __entry->addr = addr; 1056 ), 1057 1058 TP_printk("dma addr=0x%llx\n", __entry->addr) 1059 ); 1060 1061 /** 1062 ** Reply events 1063 **/ 1064 1065 TRACE_EVENT(xprtrdma_reply, 1066 TP_PROTO( 1067 const struct rpc_task *task, 1068 const struct rpcrdma_rep *rep, 1069 const struct rpcrdma_req *req, 1070 unsigned int credits 1071 ), 1072 1073 TP_ARGS(task, rep, req, credits), 1074 1075 TP_STRUCT__entry( 1076 __field(unsigned int, task_id) 1077 __field(unsigned int, client_id) 1078 __field(const void *, rep) 1079 __field(const void *, req) 1080 __field(u32, xid) 1081 __field(unsigned int, credits) 1082 ), 1083 1084 TP_fast_assign( 1085 __entry->task_id = task->tk_pid; 1086 __entry->client_id = task->tk_client->cl_clid; 1087 __entry->rep = rep; 1088 __entry->req = req; 1089 __entry->xid = be32_to_cpu(rep->rr_xid); 1090 __entry->credits = credits; 1091 ), 1092 1093 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", 1094 __entry->task_id, __entry->client_id, __entry->xid, 1095 __entry->credits, __entry->rep, __entry->req 1096 ) 1097 ); 1098 1099 TRACE_EVENT(xprtrdma_defer_cmp, 1100 TP_PROTO( 1101 const struct rpcrdma_rep *rep 1102 ), 1103 1104 TP_ARGS(rep), 1105 1106 TP_STRUCT__entry( 1107 __field(unsigned int, task_id) 1108 __field(unsigned int, client_id) 1109 __field(const void *, rep) 1110 __field(u32, xid) 1111 ), 1112 1113 TP_fast_assign( 1114 __entry->task_id = rep->rr_rqst->rq_task->tk_pid; 1115 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; 1116 __entry->rep = rep; 1117 __entry->xid = be32_to_cpu(rep->rr_xid); 1118 ), 1119 1120 TP_printk("task:%u@%u xid=0x%08x rep=%p", 1121 __entry->task_id, __entry->client_id, __entry->xid, 1122 __entry->rep 1123 ) 1124 ); 1125 1126 DEFINE_REPLY_EVENT(xprtrdma_reply_vers); 1127 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); 1128 DEFINE_REPLY_EVENT(xprtrdma_reply_short); 1129 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); 1130 1131 TRACE_EVENT(xprtrdma_fixup, 1132 TP_PROTO( 1133 const struct rpc_rqst *rqst, 1134 unsigned long fixup 1135 ), 1136 1137 TP_ARGS(rqst, fixup), 1138 1139 TP_STRUCT__entry( 1140 __field(unsigned int, task_id) 1141 __field(unsigned int, client_id) 1142 __field(unsigned long, fixup) 1143 __field(size_t, headlen) 1144 __field(unsigned int, pagelen) 1145 __field(size_t, taillen) 1146 ), 1147 1148 TP_fast_assign( 1149 __entry->task_id = rqst->rq_task->tk_pid; 1150 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1151 __entry->fixup = fixup; 1152 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; 1153 __entry->pagelen = rqst->rq_rcv_buf.page_len; 1154 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; 1155 ), 1156 1157 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", 1158 __entry->task_id, __entry->client_id, __entry->fixup, 1159 __entry->headlen, __entry->pagelen, __entry->taillen 1160 ) 1161 ); 1162 1163 TRACE_EVENT(xprtrdma_decode_seg, 1164 TP_PROTO( 1165 u32 handle, 1166 u32 length, 1167 u64 offset 1168 ), 1169 1170 TP_ARGS(handle, length, offset), 1171 1172 TP_STRUCT__entry( 1173 __field(u32, handle) 1174 __field(u32, length) 1175 __field(u64, offset) 1176 ), 1177 1178 TP_fast_assign( 1179 __entry->handle = handle; 1180 __entry->length = length; 1181 __entry->offset = offset; 1182 ), 1183 1184 TP_printk("%u@0x%016llx:0x%08x", 1185 __entry->length, (unsigned long long)__entry->offset, 1186 __entry->handle 1187 ) 1188 ); 1189 1190 /** 1191 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps 1192 **/ 1193 1194 TRACE_EVENT(xprtrdma_op_allocate, 1195 TP_PROTO( 1196 const struct rpc_task *task, 1197 const struct rpcrdma_req *req 1198 ), 1199 1200 TP_ARGS(task, req), 1201 1202 TP_STRUCT__entry( 1203 __field(unsigned int, task_id) 1204 __field(unsigned int, client_id) 1205 __field(const void *, req) 1206 __field(size_t, callsize) 1207 __field(size_t, rcvsize) 1208 ), 1209 1210 TP_fast_assign( 1211 __entry->task_id = task->tk_pid; 1212 __entry->client_id = task->tk_client->cl_clid; 1213 __entry->req = req; 1214 __entry->callsize = task->tk_rqstp->rq_callsize; 1215 __entry->rcvsize = task->tk_rqstp->rq_rcvsize; 1216 ), 1217 1218 TP_printk("task:%u@%u req=%p (%zu, %zu)", 1219 __entry->task_id, __entry->client_id, 1220 __entry->req, __entry->callsize, __entry->rcvsize 1221 ) 1222 ); 1223 1224 TRACE_EVENT(xprtrdma_op_free, 1225 TP_PROTO( 1226 const struct rpc_task *task, 1227 const struct rpcrdma_req *req 1228 ), 1229 1230 TP_ARGS(task, req), 1231 1232 TP_STRUCT__entry( 1233 __field(unsigned int, task_id) 1234 __field(unsigned int, client_id) 1235 __field(const void *, req) 1236 __field(const void *, rep) 1237 ), 1238 1239 TP_fast_assign( 1240 __entry->task_id = task->tk_pid; 1241 __entry->client_id = task->tk_client->cl_clid; 1242 __entry->req = req; 1243 __entry->rep = req->rl_reply; 1244 ), 1245 1246 TP_printk("task:%u@%u req=%p rep=%p", 1247 __entry->task_id, __entry->client_id, 1248 __entry->req, __entry->rep 1249 ) 1250 ); 1251 1252 /** 1253 ** Callback events 1254 **/ 1255 1256 TRACE_EVENT(xprtrdma_cb_setup, 1257 TP_PROTO( 1258 const struct rpcrdma_xprt *r_xprt, 1259 unsigned int reqs 1260 ), 1261 1262 TP_ARGS(r_xprt, reqs), 1263 1264 TP_STRUCT__entry( 1265 __field(const void *, r_xprt) 1266 __field(unsigned int, reqs) 1267 __string(addr, rpcrdma_addrstr(r_xprt)) 1268 __string(port, rpcrdma_portstr(r_xprt)) 1269 ), 1270 1271 TP_fast_assign( 1272 __entry->r_xprt = r_xprt; 1273 __entry->reqs = reqs; 1274 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 1275 __assign_str(port, rpcrdma_portstr(r_xprt)); 1276 ), 1277 1278 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", 1279 __get_str(addr), __get_str(port), 1280 __entry->r_xprt, __entry->reqs 1281 ) 1282 ); 1283 1284 DEFINE_CB_EVENT(xprtrdma_cb_call); 1285 DEFINE_CB_EVENT(xprtrdma_cb_reply); 1286 1287 TRACE_EVENT(xprtrdma_leaked_rep, 1288 TP_PROTO( 1289 const struct rpc_rqst *rqst, 1290 const struct rpcrdma_rep *rep 1291 ), 1292 1293 TP_ARGS(rqst, rep), 1294 1295 TP_STRUCT__entry( 1296 __field(unsigned int, task_id) 1297 __field(unsigned int, client_id) 1298 __field(u32, xid) 1299 __field(const void *, rep) 1300 ), 1301 1302 TP_fast_assign( 1303 __entry->task_id = rqst->rq_task->tk_pid; 1304 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1305 __entry->xid = be32_to_cpu(rqst->rq_xid); 1306 __entry->rep = rep; 1307 ), 1308 1309 TP_printk("task:%u@%u xid=0x%08x rep=%p", 1310 __entry->task_id, __entry->client_id, __entry->xid, 1311 __entry->rep 1312 ) 1313 ); 1314 1315 /** 1316 ** Server-side RPC/RDMA events 1317 **/ 1318 1319 DECLARE_EVENT_CLASS(svcrdma_accept_class, 1320 TP_PROTO( 1321 const struct svcxprt_rdma *rdma, 1322 long status 1323 ), 1324 1325 TP_ARGS(rdma, status), 1326 1327 TP_STRUCT__entry( 1328 __field(long, status) 1329 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1330 ), 1331 1332 TP_fast_assign( 1333 __entry->status = status; 1334 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1335 ), 1336 1337 TP_printk("addr=%s status=%ld", 1338 __get_str(addr), __entry->status 1339 ) 1340 ); 1341 1342 #define DEFINE_ACCEPT_EVENT(name) \ 1343 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ 1344 TP_PROTO( \ 1345 const struct svcxprt_rdma *rdma, \ 1346 long status \ 1347 ), \ 1348 TP_ARGS(rdma, status)) 1349 1350 DEFINE_ACCEPT_EVENT(pd); 1351 DEFINE_ACCEPT_EVENT(qp); 1352 DEFINE_ACCEPT_EVENT(fabric); 1353 DEFINE_ACCEPT_EVENT(initdepth); 1354 DEFINE_ACCEPT_EVENT(accept); 1355 1356 TRACE_DEFINE_ENUM(RDMA_MSG); 1357 TRACE_DEFINE_ENUM(RDMA_NOMSG); 1358 TRACE_DEFINE_ENUM(RDMA_MSGP); 1359 TRACE_DEFINE_ENUM(RDMA_DONE); 1360 TRACE_DEFINE_ENUM(RDMA_ERROR); 1361 1362 #define show_rpcrdma_proc(x) \ 1363 __print_symbolic(x, \ 1364 { RDMA_MSG, "RDMA_MSG" }, \ 1365 { RDMA_NOMSG, "RDMA_NOMSG" }, \ 1366 { RDMA_MSGP, "RDMA_MSGP" }, \ 1367 { RDMA_DONE, "RDMA_DONE" }, \ 1368 { RDMA_ERROR, "RDMA_ERROR" }) 1369 1370 TRACE_EVENT(svcrdma_decode_rqst, 1371 TP_PROTO( 1372 const struct svc_rdma_recv_ctxt *ctxt, 1373 __be32 *p, 1374 unsigned int hdrlen 1375 ), 1376 1377 TP_ARGS(ctxt, p, hdrlen), 1378 1379 TP_STRUCT__entry( 1380 __field(u32, cq_id) 1381 __field(int, completion_id) 1382 __field(u32, xid) 1383 __field(u32, vers) 1384 __field(u32, proc) 1385 __field(u32, credits) 1386 __field(unsigned int, hdrlen) 1387 ), 1388 1389 TP_fast_assign( 1390 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1391 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1392 __entry->xid = be32_to_cpup(p++); 1393 __entry->vers = be32_to_cpup(p++); 1394 __entry->credits = be32_to_cpup(p++); 1395 __entry->proc = be32_to_cpup(p); 1396 __entry->hdrlen = hdrlen; 1397 ), 1398 1399 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", 1400 __entry->cq_id, __entry->completion_id, 1401 __entry->xid, __entry->vers, __entry->credits, 1402 show_rpcrdma_proc(__entry->proc), __entry->hdrlen) 1403 ); 1404 1405 TRACE_EVENT(svcrdma_decode_short_err, 1406 TP_PROTO( 1407 const struct svc_rdma_recv_ctxt *ctxt, 1408 unsigned int hdrlen 1409 ), 1410 1411 TP_ARGS(ctxt, hdrlen), 1412 1413 TP_STRUCT__entry( 1414 __field(u32, cq_id) 1415 __field(int, completion_id) 1416 __field(unsigned int, hdrlen) 1417 ), 1418 1419 TP_fast_assign( 1420 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1421 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1422 __entry->hdrlen = hdrlen; 1423 ), 1424 1425 TP_printk("cq.id=%u cid=%d hdrlen=%u", 1426 __entry->cq_id, __entry->completion_id, 1427 __entry->hdrlen) 1428 ); 1429 1430 DECLARE_EVENT_CLASS(svcrdma_badreq_event, 1431 TP_PROTO( 1432 const struct svc_rdma_recv_ctxt *ctxt, 1433 __be32 *p 1434 ), 1435 1436 TP_ARGS(ctxt, p), 1437 1438 TP_STRUCT__entry( 1439 __field(u32, cq_id) 1440 __field(int, completion_id) 1441 __field(u32, xid) 1442 __field(u32, vers) 1443 __field(u32, proc) 1444 __field(u32, credits) 1445 ), 1446 1447 TP_fast_assign( 1448 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1449 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1450 __entry->xid = be32_to_cpup(p++); 1451 __entry->vers = be32_to_cpup(p++); 1452 __entry->credits = be32_to_cpup(p++); 1453 __entry->proc = be32_to_cpup(p); 1454 ), 1455 1456 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", 1457 __entry->cq_id, __entry->completion_id, 1458 __entry->xid, __entry->vers, __entry->credits, __entry->proc) 1459 ); 1460 1461 #define DEFINE_BADREQ_EVENT(name) \ 1462 DEFINE_EVENT(svcrdma_badreq_event, \ 1463 svcrdma_decode_##name##_err, \ 1464 TP_PROTO( \ 1465 const struct svc_rdma_recv_ctxt *ctxt, \ 1466 __be32 *p \ 1467 ), \ 1468 TP_ARGS(ctxt, p)) 1469 1470 DEFINE_BADREQ_EVENT(badvers); 1471 DEFINE_BADREQ_EVENT(drop); 1472 DEFINE_BADREQ_EVENT(badproc); 1473 DEFINE_BADREQ_EVENT(parse); 1474 1475 DECLARE_EVENT_CLASS(svcrdma_segment_event, 1476 TP_PROTO( 1477 u32 handle, 1478 u32 length, 1479 u64 offset 1480 ), 1481 1482 TP_ARGS(handle, length, offset), 1483 1484 TP_STRUCT__entry( 1485 __field(u32, handle) 1486 __field(u32, length) 1487 __field(u64, offset) 1488 ), 1489 1490 TP_fast_assign( 1491 __entry->handle = handle; 1492 __entry->length = length; 1493 __entry->offset = offset; 1494 ), 1495 1496 TP_printk("%u@0x%016llx:0x%08x", 1497 __entry->length, (unsigned long long)__entry->offset, 1498 __entry->handle 1499 ) 1500 ); 1501 1502 #define DEFINE_SEGMENT_EVENT(name) \ 1503 DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ 1504 TP_PROTO( \ 1505 u32 handle, \ 1506 u32 length, \ 1507 u64 offset \ 1508 ), \ 1509 TP_ARGS(handle, length, offset)) 1510 1511 DEFINE_SEGMENT_EVENT(decode_wseg); 1512 DEFINE_SEGMENT_EVENT(encode_rseg); 1513 DEFINE_SEGMENT_EVENT(send_rseg); 1514 DEFINE_SEGMENT_EVENT(encode_wseg); 1515 DEFINE_SEGMENT_EVENT(send_wseg); 1516 1517 DECLARE_EVENT_CLASS(svcrdma_chunk_event, 1518 TP_PROTO( 1519 u32 length 1520 ), 1521 1522 TP_ARGS(length), 1523 1524 TP_STRUCT__entry( 1525 __field(u32, length) 1526 ), 1527 1528 TP_fast_assign( 1529 __entry->length = length; 1530 ), 1531 1532 TP_printk("length=%u", 1533 __entry->length 1534 ) 1535 ); 1536 1537 #define DEFINE_CHUNK_EVENT(name) \ 1538 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \ 1539 TP_PROTO( \ 1540 u32 length \ 1541 ), \ 1542 TP_ARGS(length)) 1543 1544 DEFINE_CHUNK_EVENT(send_pzr); 1545 DEFINE_CHUNK_EVENT(encode_write_chunk); 1546 DEFINE_CHUNK_EVENT(send_write_chunk); 1547 DEFINE_CHUNK_EVENT(encode_read_chunk); 1548 DEFINE_CHUNK_EVENT(send_reply_chunk); 1549 1550 TRACE_EVENT(svcrdma_send_read_chunk, 1551 TP_PROTO( 1552 u32 length, 1553 u32 position 1554 ), 1555 1556 TP_ARGS(length, position), 1557 1558 TP_STRUCT__entry( 1559 __field(u32, length) 1560 __field(u32, position) 1561 ), 1562 1563 TP_fast_assign( 1564 __entry->length = length; 1565 __entry->position = position; 1566 ), 1567 1568 TP_printk("length=%u position=%u", 1569 __entry->length, __entry->position 1570 ) 1571 ); 1572 1573 DECLARE_EVENT_CLASS(svcrdma_error_event, 1574 TP_PROTO( 1575 __be32 xid 1576 ), 1577 1578 TP_ARGS(xid), 1579 1580 TP_STRUCT__entry( 1581 __field(u32, xid) 1582 ), 1583 1584 TP_fast_assign( 1585 __entry->xid = be32_to_cpu(xid); 1586 ), 1587 1588 TP_printk("xid=0x%08x", 1589 __entry->xid 1590 ) 1591 ); 1592 1593 #define DEFINE_ERROR_EVENT(name) \ 1594 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \ 1595 TP_PROTO( \ 1596 __be32 xid \ 1597 ), \ 1598 TP_ARGS(xid)) 1599 1600 DEFINE_ERROR_EVENT(vers); 1601 DEFINE_ERROR_EVENT(chunk); 1602 1603 /** 1604 ** Server-side RDMA API events 1605 **/ 1606 1607 DECLARE_EVENT_CLASS(svcrdma_dma_map_class, 1608 TP_PROTO( 1609 const struct svcxprt_rdma *rdma, 1610 u64 dma_addr, 1611 u32 length 1612 ), 1613 1614 TP_ARGS(rdma, dma_addr, length), 1615 1616 TP_STRUCT__entry( 1617 __field(u64, dma_addr) 1618 __field(u32, length) 1619 __string(device, rdma->sc_cm_id->device->name) 1620 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1621 ), 1622 1623 TP_fast_assign( 1624 __entry->dma_addr = dma_addr; 1625 __entry->length = length; 1626 __assign_str(device, rdma->sc_cm_id->device->name); 1627 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1628 ), 1629 1630 TP_printk("addr=%s device=%s dma_addr=%llu length=%u", 1631 __get_str(addr), __get_str(device), 1632 __entry->dma_addr, __entry->length 1633 ) 1634 ); 1635 1636 #define DEFINE_SVC_DMA_EVENT(name) \ 1637 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ 1638 TP_PROTO( \ 1639 const struct svcxprt_rdma *rdma,\ 1640 u64 dma_addr, \ 1641 u32 length \ 1642 ), \ 1643 TP_ARGS(rdma, dma_addr, length)) 1644 1645 DEFINE_SVC_DMA_EVENT(dma_map_page); 1646 DEFINE_SVC_DMA_EVENT(dma_unmap_page); 1647 1648 TRACE_EVENT(svcrdma_dma_map_rw_err, 1649 TP_PROTO( 1650 const struct svcxprt_rdma *rdma, 1651 unsigned int nents, 1652 int status 1653 ), 1654 1655 TP_ARGS(rdma, nents, status), 1656 1657 TP_STRUCT__entry( 1658 __field(int, status) 1659 __field(unsigned int, nents) 1660 __string(device, rdma->sc_cm_id->device->name) 1661 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1662 ), 1663 1664 TP_fast_assign( 1665 __entry->status = status; 1666 __entry->nents = nents; 1667 __assign_str(device, rdma->sc_cm_id->device->name); 1668 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1669 ), 1670 1671 TP_printk("addr=%s device=%s nents=%u status=%d", 1672 __get_str(addr), __get_str(device), __entry->nents, 1673 __entry->status 1674 ) 1675 ); 1676 1677 TRACE_EVENT(svcrdma_no_rwctx_err, 1678 TP_PROTO( 1679 const struct svcxprt_rdma *rdma, 1680 unsigned int num_sges 1681 ), 1682 1683 TP_ARGS(rdma, num_sges), 1684 1685 TP_STRUCT__entry( 1686 __field(unsigned int, num_sges) 1687 __string(device, rdma->sc_cm_id->device->name) 1688 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1689 ), 1690 1691 TP_fast_assign( 1692 __entry->num_sges = num_sges; 1693 __assign_str(device, rdma->sc_cm_id->device->name); 1694 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1695 ), 1696 1697 TP_printk("addr=%s device=%s num_sges=%d", 1698 __get_str(addr), __get_str(device), __entry->num_sges 1699 ) 1700 ); 1701 1702 TRACE_EVENT(svcrdma_page_overrun_err, 1703 TP_PROTO( 1704 const struct svcxprt_rdma *rdma, 1705 const struct svc_rqst *rqst, 1706 unsigned int pageno 1707 ), 1708 1709 TP_ARGS(rdma, rqst, pageno), 1710 1711 TP_STRUCT__entry( 1712 __field(unsigned int, pageno) 1713 __field(u32, xid) 1714 __string(device, rdma->sc_cm_id->device->name) 1715 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1716 ), 1717 1718 TP_fast_assign( 1719 __entry->pageno = pageno; 1720 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1721 __assign_str(device, rdma->sc_cm_id->device->name); 1722 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1723 ), 1724 1725 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), 1726 __get_str(device), __entry->xid, __entry->pageno 1727 ) 1728 ); 1729 1730 TRACE_EVENT(svcrdma_small_wrch_err, 1731 TP_PROTO( 1732 const struct svcxprt_rdma *rdma, 1733 unsigned int remaining, 1734 unsigned int seg_no, 1735 unsigned int num_segs 1736 ), 1737 1738 TP_ARGS(rdma, remaining, seg_no, num_segs), 1739 1740 TP_STRUCT__entry( 1741 __field(unsigned int, remaining) 1742 __field(unsigned int, seg_no) 1743 __field(unsigned int, num_segs) 1744 __string(device, rdma->sc_cm_id->device->name) 1745 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1746 ), 1747 1748 TP_fast_assign( 1749 __entry->remaining = remaining; 1750 __entry->seg_no = seg_no; 1751 __entry->num_segs = num_segs; 1752 __assign_str(device, rdma->sc_cm_id->device->name); 1753 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1754 ), 1755 1756 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", 1757 __get_str(addr), __get_str(device), __entry->remaining, 1758 __entry->seg_no, __entry->num_segs 1759 ) 1760 ); 1761 1762 TRACE_EVENT(svcrdma_send_pullup, 1763 TP_PROTO( 1764 unsigned int len 1765 ), 1766 1767 TP_ARGS(len), 1768 1769 TP_STRUCT__entry( 1770 __field(unsigned int, len) 1771 ), 1772 1773 TP_fast_assign( 1774 __entry->len = len; 1775 ), 1776 1777 TP_printk("len=%u", __entry->len) 1778 ); 1779 1780 TRACE_EVENT(svcrdma_send_err, 1781 TP_PROTO( 1782 const struct svc_rqst *rqst, 1783 int status 1784 ), 1785 1786 TP_ARGS(rqst, status), 1787 1788 TP_STRUCT__entry( 1789 __field(int, status) 1790 __field(u32, xid) 1791 __string(addr, rqst->rq_xprt->xpt_remotebuf) 1792 ), 1793 1794 TP_fast_assign( 1795 __entry->status = status; 1796 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1797 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); 1798 ), 1799 1800 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), 1801 __entry->xid, __entry->status 1802 ) 1803 ); 1804 1805 TRACE_EVENT(svcrdma_post_send, 1806 TP_PROTO( 1807 const struct svc_rdma_send_ctxt *ctxt 1808 ), 1809 1810 TP_ARGS(ctxt), 1811 1812 TP_STRUCT__entry( 1813 __field(u32, cq_id) 1814 __field(int, completion_id) 1815 __field(unsigned int, num_sge) 1816 __field(u32, inv_rkey) 1817 ), 1818 1819 TP_fast_assign( 1820 const struct ib_send_wr *wr = &ctxt->sc_send_wr; 1821 1822 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1823 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1824 __entry->num_sge = wr->num_sge; 1825 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? 1826 wr->ex.invalidate_rkey : 0; 1827 ), 1828 1829 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", 1830 __entry->cq_id, __entry->completion_id, 1831 __entry->num_sge, __entry->inv_rkey 1832 ) 1833 ); 1834 1835 DEFINE_COMPLETION_EVENT(svcrdma_wc_send); 1836 1837 TRACE_EVENT(svcrdma_post_recv, 1838 TP_PROTO( 1839 const struct svc_rdma_recv_ctxt *ctxt 1840 ), 1841 1842 TP_ARGS(ctxt), 1843 1844 TP_STRUCT__entry( 1845 __field(u32, cq_id) 1846 __field(int, completion_id) 1847 ), 1848 1849 TP_fast_assign( 1850 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1851 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1852 ), 1853 1854 TP_printk("cq.id=%d cid=%d", 1855 __entry->cq_id, __entry->completion_id 1856 ) 1857 ); 1858 1859 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); 1860 1861 TRACE_EVENT(svcrdma_rq_post_err, 1862 TP_PROTO( 1863 const struct svcxprt_rdma *rdma, 1864 int status 1865 ), 1866 1867 TP_ARGS(rdma, status), 1868 1869 TP_STRUCT__entry( 1870 __field(int, status) 1871 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1872 ), 1873 1874 TP_fast_assign( 1875 __entry->status = status; 1876 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1877 ), 1878 1879 TP_printk("addr=%s status=%d", 1880 __get_str(addr), __entry->status 1881 ) 1882 ); 1883 1884 TRACE_EVENT(svcrdma_post_chunk, 1885 TP_PROTO( 1886 const struct rpc_rdma_cid *cid, 1887 int sqecount 1888 ), 1889 1890 TP_ARGS(cid, sqecount), 1891 1892 TP_STRUCT__entry( 1893 __field(u32, cq_id) 1894 __field(int, completion_id) 1895 __field(int, sqecount) 1896 ), 1897 1898 TP_fast_assign( 1899 __entry->cq_id = cid->ci_queue_id; 1900 __entry->completion_id = cid->ci_completion_id; 1901 __entry->sqecount = sqecount; 1902 ), 1903 1904 TP_printk("cq.id=%u cid=%d sqecount=%d", 1905 __entry->cq_id, __entry->completion_id, 1906 __entry->sqecount 1907 ) 1908 ); 1909 1910 DEFINE_COMPLETION_EVENT(svcrdma_wc_read); 1911 DEFINE_COMPLETION_EVENT(svcrdma_wc_write); 1912 1913 TRACE_EVENT(svcrdma_qp_error, 1914 TP_PROTO( 1915 const struct ib_event *event, 1916 const struct sockaddr *sap 1917 ), 1918 1919 TP_ARGS(event, sap), 1920 1921 TP_STRUCT__entry( 1922 __field(unsigned int, event) 1923 __string(device, event->device->name) 1924 __array(__u8, addr, INET6_ADDRSTRLEN + 10) 1925 ), 1926 1927 TP_fast_assign( 1928 __entry->event = event->event; 1929 __assign_str(device, event->device->name); 1930 snprintf(__entry->addr, sizeof(__entry->addr) - 1, 1931 "%pISpc", sap); 1932 ), 1933 1934 TP_printk("addr=%s dev=%s event=%s (%u)", 1935 __entry->addr, __get_str(device), 1936 rdma_show_ib_event(__entry->event), __entry->event 1937 ) 1938 ); 1939 1940 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, 1941 TP_PROTO( 1942 const struct svcxprt_rdma *rdma 1943 ), 1944 1945 TP_ARGS(rdma), 1946 1947 TP_STRUCT__entry( 1948 __field(int, avail) 1949 __field(int, depth) 1950 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1951 ), 1952 1953 TP_fast_assign( 1954 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1955 __entry->depth = rdma->sc_sq_depth; 1956 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1957 ), 1958 1959 TP_printk("addr=%s sc_sq_avail=%d/%d", 1960 __get_str(addr), __entry->avail, __entry->depth 1961 ) 1962 ); 1963 1964 #define DEFINE_SQ_EVENT(name) \ 1965 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ 1966 TP_PROTO( \ 1967 const struct svcxprt_rdma *rdma \ 1968 ), \ 1969 TP_ARGS(rdma)) 1970 1971 DEFINE_SQ_EVENT(full); 1972 DEFINE_SQ_EVENT(retry); 1973 1974 TRACE_EVENT(svcrdma_sq_post_err, 1975 TP_PROTO( 1976 const struct svcxprt_rdma *rdma, 1977 int status 1978 ), 1979 1980 TP_ARGS(rdma, status), 1981 1982 TP_STRUCT__entry( 1983 __field(int, avail) 1984 __field(int, depth) 1985 __field(int, status) 1986 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1987 ), 1988 1989 TP_fast_assign( 1990 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1991 __entry->depth = rdma->sc_sq_depth; 1992 __entry->status = status; 1993 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1994 ), 1995 1996 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", 1997 __get_str(addr), __entry->avail, __entry->depth, 1998 __entry->status 1999 ) 2000 ); 2001 2002 #endif /* _TRACE_RPCRDMA_H */ 2003 2004 #include <trace/define_trace.h> 2005