1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017, 2018 Oracle. All rights reserved. 4 * 5 * Trace point definitions for the "rpcrdma" subsystem. 6 */ 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM rpcrdma 9 10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_RPCRDMA_H 12 13 #include <linux/scatterlist.h> 14 #include <linux/sunrpc/rpc_rdma_cid.h> 15 #include <linux/tracepoint.h> 16 #include <rdma/ib_cm.h> 17 #include <trace/events/rdma.h> 18 19 /** 20 ** Event classes 21 **/ 22 23 DECLARE_EVENT_CLASS(rpcrdma_completion_class, 24 TP_PROTO( 25 const struct ib_wc *wc, 26 const struct rpc_rdma_cid *cid 27 ), 28 29 TP_ARGS(wc, cid), 30 31 TP_STRUCT__entry( 32 __field(u32, cq_id) 33 __field(int, completion_id) 34 __field(unsigned long, status) 35 __field(unsigned int, vendor_err) 36 ), 37 38 TP_fast_assign( 39 __entry->cq_id = cid->ci_queue_id; 40 __entry->completion_id = cid->ci_completion_id; 41 __entry->status = wc->status; 42 if (wc->status) 43 __entry->vendor_err = wc->vendor_err; 44 else 45 __entry->vendor_err = 0; 46 ), 47 48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", 49 __entry->cq_id, __entry->completion_id, 50 rdma_show_wc_status(__entry->status), 51 __entry->status, __entry->vendor_err 52 ) 53 ); 54 55 #define DEFINE_COMPLETION_EVENT(name) \ 56 DEFINE_EVENT(rpcrdma_completion_class, name, \ 57 TP_PROTO( \ 58 const struct ib_wc *wc, \ 59 const struct rpc_rdma_cid *cid \ 60 ), \ 61 TP_ARGS(wc, cid)) 62 63 DECLARE_EVENT_CLASS(xprtrdma_reply_event, 64 TP_PROTO( 65 const struct rpcrdma_rep *rep 66 ), 67 68 TP_ARGS(rep), 69 70 TP_STRUCT__entry( 71 __field(const void *, rep) 72 __field(const void *, r_xprt) 73 __field(u32, xid) 74 __field(u32, version) 75 __field(u32, proc) 76 ), 77 78 TP_fast_assign( 79 __entry->rep = rep; 80 __entry->r_xprt = rep->rr_rxprt; 81 __entry->xid = be32_to_cpu(rep->rr_xid); 82 __entry->version = be32_to_cpu(rep->rr_vers); 83 __entry->proc = be32_to_cpu(rep->rr_proc); 84 ), 85 86 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", 87 __entry->r_xprt, __entry->xid, __entry->rep, 88 __entry->version, __entry->proc 89 ) 90 ); 91 92 #define DEFINE_REPLY_EVENT(name) \ 93 DEFINE_EVENT(xprtrdma_reply_event, name, \ 94 TP_PROTO( \ 95 const struct rpcrdma_rep *rep \ 96 ), \ 97 TP_ARGS(rep)) 98 99 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 100 TP_PROTO( 101 const struct rpcrdma_xprt *r_xprt 102 ), 103 104 TP_ARGS(r_xprt), 105 106 TP_STRUCT__entry( 107 __field(const void *, r_xprt) 108 __string(addr, rpcrdma_addrstr(r_xprt)) 109 __string(port, rpcrdma_portstr(r_xprt)) 110 ), 111 112 TP_fast_assign( 113 __entry->r_xprt = r_xprt; 114 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 115 __assign_str(port, rpcrdma_portstr(r_xprt)); 116 ), 117 118 TP_printk("peer=[%s]:%s r_xprt=%p", 119 __get_str(addr), __get_str(port), __entry->r_xprt 120 ) 121 ); 122 123 #define DEFINE_RXPRT_EVENT(name) \ 124 DEFINE_EVENT(xprtrdma_rxprt, name, \ 125 TP_PROTO( \ 126 const struct rpcrdma_xprt *r_xprt \ 127 ), \ 128 TP_ARGS(r_xprt)) 129 130 DECLARE_EVENT_CLASS(xprtrdma_connect_class, 131 TP_PROTO( 132 const struct rpcrdma_xprt *r_xprt, 133 int rc 134 ), 135 136 TP_ARGS(r_xprt, rc), 137 138 TP_STRUCT__entry( 139 __field(const void *, r_xprt) 140 __field(int, rc) 141 __field(int, connect_status) 142 __string(addr, rpcrdma_addrstr(r_xprt)) 143 __string(port, rpcrdma_portstr(r_xprt)) 144 ), 145 146 TP_fast_assign( 147 __entry->r_xprt = r_xprt; 148 __entry->rc = rc; 149 __entry->connect_status = r_xprt->rx_ep->re_connect_status; 150 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 151 __assign_str(port, rpcrdma_portstr(r_xprt)); 152 ), 153 154 TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", 155 __get_str(addr), __get_str(port), __entry->r_xprt, 156 __entry->rc, __entry->connect_status 157 ) 158 ); 159 160 #define DEFINE_CONN_EVENT(name) \ 161 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ 162 TP_PROTO( \ 163 const struct rpcrdma_xprt *r_xprt, \ 164 int rc \ 165 ), \ 166 TP_ARGS(r_xprt, rc)) 167 168 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 169 TP_PROTO( 170 const struct rpc_task *task, 171 unsigned int pos, 172 struct rpcrdma_mr *mr, 173 int nsegs 174 ), 175 176 TP_ARGS(task, pos, mr, nsegs), 177 178 TP_STRUCT__entry( 179 __field(unsigned int, task_id) 180 __field(unsigned int, client_id) 181 __field(unsigned int, pos) 182 __field(int, nents) 183 __field(u32, handle) 184 __field(u32, length) 185 __field(u64, offset) 186 __field(int, nsegs) 187 ), 188 189 TP_fast_assign( 190 __entry->task_id = task->tk_pid; 191 __entry->client_id = task->tk_client->cl_clid; 192 __entry->pos = pos; 193 __entry->nents = mr->mr_nents; 194 __entry->handle = mr->mr_handle; 195 __entry->length = mr->mr_length; 196 __entry->offset = mr->mr_offset; 197 __entry->nsegs = nsegs; 198 ), 199 200 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", 201 __entry->task_id, __entry->client_id, 202 __entry->pos, __entry->length, 203 (unsigned long long)__entry->offset, __entry->handle, 204 __entry->nents < __entry->nsegs ? "more" : "last" 205 ) 206 ); 207 208 #define DEFINE_RDCH_EVENT(name) \ 209 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ 210 TP_PROTO( \ 211 const struct rpc_task *task, \ 212 unsigned int pos, \ 213 struct rpcrdma_mr *mr, \ 214 int nsegs \ 215 ), \ 216 TP_ARGS(task, pos, mr, nsegs)) 217 218 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 219 TP_PROTO( 220 const struct rpc_task *task, 221 struct rpcrdma_mr *mr, 222 int nsegs 223 ), 224 225 TP_ARGS(task, mr, nsegs), 226 227 TP_STRUCT__entry( 228 __field(unsigned int, task_id) 229 __field(unsigned int, client_id) 230 __field(int, nents) 231 __field(u32, handle) 232 __field(u32, length) 233 __field(u64, offset) 234 __field(int, nsegs) 235 ), 236 237 TP_fast_assign( 238 __entry->task_id = task->tk_pid; 239 __entry->client_id = task->tk_client->cl_clid; 240 __entry->nents = mr->mr_nents; 241 __entry->handle = mr->mr_handle; 242 __entry->length = mr->mr_length; 243 __entry->offset = mr->mr_offset; 244 __entry->nsegs = nsegs; 245 ), 246 247 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", 248 __entry->task_id, __entry->client_id, 249 __entry->length, (unsigned long long)__entry->offset, 250 __entry->handle, 251 __entry->nents < __entry->nsegs ? "more" : "last" 252 ) 253 ); 254 255 #define DEFINE_WRCH_EVENT(name) \ 256 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ 257 TP_PROTO( \ 258 const struct rpc_task *task, \ 259 struct rpcrdma_mr *mr, \ 260 int nsegs \ 261 ), \ 262 TP_ARGS(task, mr, nsegs)) 263 264 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); 265 TRACE_DEFINE_ENUM(DMA_TO_DEVICE); 266 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); 267 TRACE_DEFINE_ENUM(DMA_NONE); 268 269 #define xprtrdma_show_direction(x) \ 270 __print_symbolic(x, \ 271 { DMA_BIDIRECTIONAL, "BIDIR" }, \ 272 { DMA_TO_DEVICE, "TO_DEVICE" }, \ 273 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ 274 { DMA_NONE, "NONE" }) 275 276 DECLARE_EVENT_CLASS(xprtrdma_mr, 277 TP_PROTO( 278 const struct rpcrdma_mr *mr 279 ), 280 281 TP_ARGS(mr), 282 283 TP_STRUCT__entry( 284 __field(u32, mr_id) 285 __field(int, nents) 286 __field(u32, handle) 287 __field(u32, length) 288 __field(u64, offset) 289 __field(u32, dir) 290 ), 291 292 TP_fast_assign( 293 __entry->mr_id = mr->frwr.fr_mr->res.id; 294 __entry->nents = mr->mr_nents; 295 __entry->handle = mr->mr_handle; 296 __entry->length = mr->mr_length; 297 __entry->offset = mr->mr_offset; 298 __entry->dir = mr->mr_dir; 299 ), 300 301 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 302 __entry->mr_id, __entry->nents, __entry->length, 303 (unsigned long long)__entry->offset, __entry->handle, 304 xprtrdma_show_direction(__entry->dir) 305 ) 306 ); 307 308 #define DEFINE_MR_EVENT(name) \ 309 DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ 310 TP_PROTO( \ 311 const struct rpcrdma_mr *mr \ 312 ), \ 313 TP_ARGS(mr)) 314 315 DECLARE_EVENT_CLASS(xprtrdma_cb_event, 316 TP_PROTO( 317 const struct rpc_rqst *rqst 318 ), 319 320 TP_ARGS(rqst), 321 322 TP_STRUCT__entry( 323 __field(const void *, rqst) 324 __field(const void *, rep) 325 __field(const void *, req) 326 __field(u32, xid) 327 ), 328 329 TP_fast_assign( 330 __entry->rqst = rqst; 331 __entry->req = rpcr_to_rdmar(rqst); 332 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; 333 __entry->xid = be32_to_cpu(rqst->rq_xid); 334 ), 335 336 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", 337 __entry->xid, __entry->rqst, __entry->req, __entry->rep 338 ) 339 ); 340 341 #define DEFINE_CB_EVENT(name) \ 342 DEFINE_EVENT(xprtrdma_cb_event, name, \ 343 TP_PROTO( \ 344 const struct rpc_rqst *rqst \ 345 ), \ 346 TP_ARGS(rqst)) 347 348 /** 349 ** Connection events 350 **/ 351 352 TRACE_EVENT(xprtrdma_inline_thresh, 353 TP_PROTO( 354 const struct rpcrdma_ep *ep 355 ), 356 357 TP_ARGS(ep), 358 359 TP_STRUCT__entry( 360 __field(unsigned int, inline_send) 361 __field(unsigned int, inline_recv) 362 __field(unsigned int, max_send) 363 __field(unsigned int, max_recv) 364 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 365 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 366 ), 367 368 TP_fast_assign( 369 const struct rdma_cm_id *id = ep->re_id; 370 371 __entry->inline_send = ep->re_inline_send; 372 __entry->inline_recv = ep->re_inline_recv; 373 __entry->max_send = ep->re_max_inline_send; 374 __entry->max_recv = ep->re_max_inline_recv; 375 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 376 sizeof(struct sockaddr_in6)); 377 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 378 sizeof(struct sockaddr_in6)); 379 ), 380 381 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 382 __entry->srcaddr, __entry->dstaddr, 383 __entry->inline_send, __entry->inline_recv, 384 __entry->max_send, __entry->max_recv 385 ) 386 ); 387 388 DEFINE_CONN_EVENT(connect); 389 DEFINE_CONN_EVENT(disconnect); 390 391 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 392 393 TRACE_EVENT(xprtrdma_op_connect, 394 TP_PROTO( 395 const struct rpcrdma_xprt *r_xprt, 396 unsigned long delay 397 ), 398 399 TP_ARGS(r_xprt, delay), 400 401 TP_STRUCT__entry( 402 __field(const void *, r_xprt) 403 __field(unsigned long, delay) 404 __string(addr, rpcrdma_addrstr(r_xprt)) 405 __string(port, rpcrdma_portstr(r_xprt)) 406 ), 407 408 TP_fast_assign( 409 __entry->r_xprt = r_xprt; 410 __entry->delay = delay; 411 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 412 __assign_str(port, rpcrdma_portstr(r_xprt)); 413 ), 414 415 TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", 416 __get_str(addr), __get_str(port), __entry->r_xprt, 417 __entry->delay 418 ) 419 ); 420 421 422 TRACE_EVENT(xprtrdma_op_set_cto, 423 TP_PROTO( 424 const struct rpcrdma_xprt *r_xprt, 425 unsigned long connect, 426 unsigned long reconnect 427 ), 428 429 TP_ARGS(r_xprt, connect, reconnect), 430 431 TP_STRUCT__entry( 432 __field(const void *, r_xprt) 433 __field(unsigned long, connect) 434 __field(unsigned long, reconnect) 435 __string(addr, rpcrdma_addrstr(r_xprt)) 436 __string(port, rpcrdma_portstr(r_xprt)) 437 ), 438 439 TP_fast_assign( 440 __entry->r_xprt = r_xprt; 441 __entry->connect = connect; 442 __entry->reconnect = reconnect; 443 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 444 __assign_str(port, rpcrdma_portstr(r_xprt)); 445 ), 446 447 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", 448 __get_str(addr), __get_str(port), __entry->r_xprt, 449 __entry->connect / HZ, __entry->reconnect / HZ 450 ) 451 ); 452 453 TRACE_EVENT(xprtrdma_qp_event, 454 TP_PROTO( 455 const struct rpcrdma_ep *ep, 456 const struct ib_event *event 457 ), 458 459 TP_ARGS(ep, event), 460 461 TP_STRUCT__entry( 462 __field(unsigned long, event) 463 __string(name, event->device->name) 464 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 465 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 466 ), 467 468 TP_fast_assign( 469 const struct rdma_cm_id *id = ep->re_id; 470 471 __entry->event = event->event; 472 __assign_str(name, event->device->name); 473 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 474 sizeof(struct sockaddr_in6)); 475 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 476 sizeof(struct sockaddr_in6)); 477 ), 478 479 TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", 480 __entry->srcaddr, __entry->dstaddr, __get_str(name), 481 rdma_show_ib_event(__entry->event), __entry->event 482 ) 483 ); 484 485 /** 486 ** Call events 487 **/ 488 489 TRACE_EVENT(xprtrdma_createmrs, 490 TP_PROTO( 491 const struct rpcrdma_xprt *r_xprt, 492 unsigned int count 493 ), 494 495 TP_ARGS(r_xprt, count), 496 497 TP_STRUCT__entry( 498 __field(const void *, r_xprt) 499 __string(addr, rpcrdma_addrstr(r_xprt)) 500 __string(port, rpcrdma_portstr(r_xprt)) 501 __field(unsigned int, count) 502 ), 503 504 TP_fast_assign( 505 __entry->r_xprt = r_xprt; 506 __entry->count = count; 507 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 508 __assign_str(port, rpcrdma_portstr(r_xprt)); 509 ), 510 511 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", 512 __get_str(addr), __get_str(port), __entry->r_xprt, 513 __entry->count 514 ) 515 ); 516 517 TRACE_EVENT(xprtrdma_mr_get, 518 TP_PROTO( 519 const struct rpcrdma_req *req 520 ), 521 522 TP_ARGS(req), 523 524 TP_STRUCT__entry( 525 __field(const void *, req) 526 __field(unsigned int, task_id) 527 __field(unsigned int, client_id) 528 __field(u32, xid) 529 ), 530 531 TP_fast_assign( 532 const struct rpc_rqst *rqst = &req->rl_slot; 533 534 __entry->req = req; 535 __entry->task_id = rqst->rq_task->tk_pid; 536 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 537 __entry->xid = be32_to_cpu(rqst->rq_xid); 538 ), 539 540 TP_printk("task:%u@%u xid=0x%08x req=%p", 541 __entry->task_id, __entry->client_id, __entry->xid, 542 __entry->req 543 ) 544 ); 545 546 TRACE_EVENT(xprtrdma_nomrs, 547 TP_PROTO( 548 const struct rpcrdma_req *req 549 ), 550 551 TP_ARGS(req), 552 553 TP_STRUCT__entry( 554 __field(const void *, req) 555 __field(unsigned int, task_id) 556 __field(unsigned int, client_id) 557 __field(u32, xid) 558 ), 559 560 TP_fast_assign( 561 const struct rpc_rqst *rqst = &req->rl_slot; 562 563 __entry->req = req; 564 __entry->task_id = rqst->rq_task->tk_pid; 565 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 566 __entry->xid = be32_to_cpu(rqst->rq_xid); 567 ), 568 569 TP_printk("task:%u@%u xid=0x%08x req=%p", 570 __entry->task_id, __entry->client_id, __entry->xid, 571 __entry->req 572 ) 573 ); 574 575 DEFINE_RDCH_EVENT(read); 576 DEFINE_WRCH_EVENT(write); 577 DEFINE_WRCH_EVENT(reply); 578 579 TRACE_DEFINE_ENUM(rpcrdma_noch); 580 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); 581 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); 582 TRACE_DEFINE_ENUM(rpcrdma_readch); 583 TRACE_DEFINE_ENUM(rpcrdma_areadch); 584 TRACE_DEFINE_ENUM(rpcrdma_writech); 585 TRACE_DEFINE_ENUM(rpcrdma_replych); 586 587 #define xprtrdma_show_chunktype(x) \ 588 __print_symbolic(x, \ 589 { rpcrdma_noch, "inline" }, \ 590 { rpcrdma_noch_pullup, "pullup" }, \ 591 { rpcrdma_noch_mapped, "mapped" }, \ 592 { rpcrdma_readch, "read list" }, \ 593 { rpcrdma_areadch, "*read list" }, \ 594 { rpcrdma_writech, "write list" }, \ 595 { rpcrdma_replych, "reply chunk" }) 596 597 TRACE_EVENT(xprtrdma_marshal, 598 TP_PROTO( 599 const struct rpcrdma_req *req, 600 unsigned int rtype, 601 unsigned int wtype 602 ), 603 604 TP_ARGS(req, rtype, wtype), 605 606 TP_STRUCT__entry( 607 __field(unsigned int, task_id) 608 __field(unsigned int, client_id) 609 __field(u32, xid) 610 __field(unsigned int, hdrlen) 611 __field(unsigned int, headlen) 612 __field(unsigned int, pagelen) 613 __field(unsigned int, taillen) 614 __field(unsigned int, rtype) 615 __field(unsigned int, wtype) 616 ), 617 618 TP_fast_assign( 619 const struct rpc_rqst *rqst = &req->rl_slot; 620 621 __entry->task_id = rqst->rq_task->tk_pid; 622 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 623 __entry->xid = be32_to_cpu(rqst->rq_xid); 624 __entry->hdrlen = req->rl_hdrbuf.len; 625 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 626 __entry->pagelen = rqst->rq_snd_buf.page_len; 627 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 628 __entry->rtype = rtype; 629 __entry->wtype = wtype; 630 ), 631 632 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 633 __entry->task_id, __entry->client_id, __entry->xid, 634 __entry->hdrlen, 635 __entry->headlen, __entry->pagelen, __entry->taillen, 636 xprtrdma_show_chunktype(__entry->rtype), 637 xprtrdma_show_chunktype(__entry->wtype) 638 ) 639 ); 640 641 TRACE_EVENT(xprtrdma_marshal_failed, 642 TP_PROTO(const struct rpc_rqst *rqst, 643 int ret 644 ), 645 646 TP_ARGS(rqst, ret), 647 648 TP_STRUCT__entry( 649 __field(unsigned int, task_id) 650 __field(unsigned int, client_id) 651 __field(u32, xid) 652 __field(int, ret) 653 ), 654 655 TP_fast_assign( 656 __entry->task_id = rqst->rq_task->tk_pid; 657 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 658 __entry->xid = be32_to_cpu(rqst->rq_xid); 659 __entry->ret = ret; 660 ), 661 662 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 663 __entry->task_id, __entry->client_id, __entry->xid, 664 __entry->ret 665 ) 666 ); 667 668 TRACE_EVENT(xprtrdma_prepsend_failed, 669 TP_PROTO(const struct rpc_rqst *rqst, 670 int ret 671 ), 672 673 TP_ARGS(rqst, ret), 674 675 TP_STRUCT__entry( 676 __field(unsigned int, task_id) 677 __field(unsigned int, client_id) 678 __field(u32, xid) 679 __field(int, ret) 680 ), 681 682 TP_fast_assign( 683 __entry->task_id = rqst->rq_task->tk_pid; 684 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 685 __entry->xid = be32_to_cpu(rqst->rq_xid); 686 __entry->ret = ret; 687 ), 688 689 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 690 __entry->task_id, __entry->client_id, __entry->xid, 691 __entry->ret 692 ) 693 ); 694 695 TRACE_EVENT(xprtrdma_post_send, 696 TP_PROTO( 697 const struct rpcrdma_req *req 698 ), 699 700 TP_ARGS(req), 701 702 TP_STRUCT__entry( 703 __field(u32, cq_id) 704 __field(int, completion_id) 705 __field(unsigned int, task_id) 706 __field(unsigned int, client_id) 707 __field(int, num_sge) 708 __field(int, signaled) 709 ), 710 711 TP_fast_assign( 712 const struct rpc_rqst *rqst = &req->rl_slot; 713 const struct rpcrdma_sendctx *sc = req->rl_sendctx; 714 715 __entry->cq_id = sc->sc_cid.ci_queue_id; 716 __entry->completion_id = sc->sc_cid.ci_completion_id; 717 __entry->task_id = rqst->rq_task->tk_pid; 718 __entry->client_id = rqst->rq_task->tk_client ? 719 rqst->rq_task->tk_client->cl_clid : -1; 720 __entry->num_sge = req->rl_wr.num_sge; 721 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 722 ), 723 724 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", 725 __entry->task_id, __entry->client_id, 726 __entry->cq_id, __entry->completion_id, 727 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), 728 (__entry->signaled ? "signaled" : "") 729 ) 730 ); 731 732 TRACE_EVENT(xprtrdma_post_recv, 733 TP_PROTO( 734 const struct rpcrdma_rep *rep 735 ), 736 737 TP_ARGS(rep), 738 739 TP_STRUCT__entry( 740 __field(u32, cq_id) 741 __field(int, completion_id) 742 ), 743 744 TP_fast_assign( 745 __entry->cq_id = rep->rr_cid.ci_queue_id; 746 __entry->completion_id = rep->rr_cid.ci_completion_id; 747 ), 748 749 TP_printk("cq.id=%d cid=%d", 750 __entry->cq_id, __entry->completion_id 751 ) 752 ); 753 754 TRACE_EVENT(xprtrdma_post_recvs, 755 TP_PROTO( 756 const struct rpcrdma_xprt *r_xprt, 757 unsigned int count, 758 int status 759 ), 760 761 TP_ARGS(r_xprt, count, status), 762 763 TP_STRUCT__entry( 764 __field(const void *, r_xprt) 765 __field(unsigned int, count) 766 __field(int, status) 767 __field(int, posted) 768 __string(addr, rpcrdma_addrstr(r_xprt)) 769 __string(port, rpcrdma_portstr(r_xprt)) 770 ), 771 772 TP_fast_assign( 773 __entry->r_xprt = r_xprt; 774 __entry->count = count; 775 __entry->status = status; 776 __entry->posted = r_xprt->rx_ep->re_receive_count; 777 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 778 __assign_str(port, rpcrdma_portstr(r_xprt)); 779 ), 780 781 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", 782 __get_str(addr), __get_str(port), __entry->r_xprt, 783 __entry->count, __entry->posted, __entry->status 784 ) 785 ); 786 787 TRACE_EVENT(xprtrdma_post_linv_err, 788 TP_PROTO( 789 const struct rpcrdma_req *req, 790 int status 791 ), 792 793 TP_ARGS(req, status), 794 795 TP_STRUCT__entry( 796 __field(unsigned int, task_id) 797 __field(unsigned int, client_id) 798 __field(int, status) 799 ), 800 801 TP_fast_assign( 802 const struct rpc_task *task = req->rl_slot.rq_task; 803 804 __entry->task_id = task->tk_pid; 805 __entry->client_id = task->tk_client->cl_clid; 806 __entry->status = status; 807 ), 808 809 TP_printk("task:%u@%u status=%d", 810 __entry->task_id, __entry->client_id, __entry->status 811 ) 812 ); 813 814 /** 815 ** Completion events 816 **/ 817 818 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); 819 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); 820 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); 821 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); 822 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); 823 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); 824 825 TRACE_EVENT(xprtrdma_frwr_alloc, 826 TP_PROTO( 827 const struct rpcrdma_mr *mr, 828 int rc 829 ), 830 831 TP_ARGS(mr, rc), 832 833 TP_STRUCT__entry( 834 __field(u32, mr_id) 835 __field(int, rc) 836 ), 837 838 TP_fast_assign( 839 __entry->mr_id = mr->frwr.fr_mr->res.id; 840 __entry->rc = rc; 841 ), 842 843 TP_printk("mr.id=%u: rc=%d", 844 __entry->mr_id, __entry->rc 845 ) 846 ); 847 848 TRACE_EVENT(xprtrdma_frwr_dereg, 849 TP_PROTO( 850 const struct rpcrdma_mr *mr, 851 int rc 852 ), 853 854 TP_ARGS(mr, rc), 855 856 TP_STRUCT__entry( 857 __field(u32, mr_id) 858 __field(int, nents) 859 __field(u32, handle) 860 __field(u32, length) 861 __field(u64, offset) 862 __field(u32, dir) 863 __field(int, rc) 864 ), 865 866 TP_fast_assign( 867 __entry->mr_id = mr->frwr.fr_mr->res.id; 868 __entry->nents = mr->mr_nents; 869 __entry->handle = mr->mr_handle; 870 __entry->length = mr->mr_length; 871 __entry->offset = mr->mr_offset; 872 __entry->dir = mr->mr_dir; 873 __entry->rc = rc; 874 ), 875 876 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 877 __entry->mr_id, __entry->nents, __entry->length, 878 (unsigned long long)__entry->offset, __entry->handle, 879 xprtrdma_show_direction(__entry->dir), 880 __entry->rc 881 ) 882 ); 883 884 TRACE_EVENT(xprtrdma_frwr_sgerr, 885 TP_PROTO( 886 const struct rpcrdma_mr *mr, 887 int sg_nents 888 ), 889 890 TP_ARGS(mr, sg_nents), 891 892 TP_STRUCT__entry( 893 __field(u32, mr_id) 894 __field(u64, addr) 895 __field(u32, dir) 896 __field(int, nents) 897 ), 898 899 TP_fast_assign( 900 __entry->mr_id = mr->frwr.fr_mr->res.id; 901 __entry->addr = mr->mr_sg->dma_address; 902 __entry->dir = mr->mr_dir; 903 __entry->nents = sg_nents; 904 ), 905 906 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 907 __entry->mr_id, __entry->addr, 908 xprtrdma_show_direction(__entry->dir), 909 __entry->nents 910 ) 911 ); 912 913 TRACE_EVENT(xprtrdma_frwr_maperr, 914 TP_PROTO( 915 const struct rpcrdma_mr *mr, 916 int num_mapped 917 ), 918 919 TP_ARGS(mr, num_mapped), 920 921 TP_STRUCT__entry( 922 __field(u32, mr_id) 923 __field(u64, addr) 924 __field(u32, dir) 925 __field(int, num_mapped) 926 __field(int, nents) 927 ), 928 929 TP_fast_assign( 930 __entry->mr_id = mr->frwr.fr_mr->res.id; 931 __entry->addr = mr->mr_sg->dma_address; 932 __entry->dir = mr->mr_dir; 933 __entry->num_mapped = num_mapped; 934 __entry->nents = mr->mr_nents; 935 ), 936 937 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 938 __entry->mr_id, __entry->addr, 939 xprtrdma_show_direction(__entry->dir), 940 __entry->num_mapped, __entry->nents 941 ) 942 ); 943 944 DEFINE_MR_EVENT(localinv); 945 DEFINE_MR_EVENT(map); 946 DEFINE_MR_EVENT(unmap); 947 DEFINE_MR_EVENT(reminv); 948 DEFINE_MR_EVENT(recycle); 949 950 TRACE_EVENT(xprtrdma_dma_maperr, 951 TP_PROTO( 952 u64 addr 953 ), 954 955 TP_ARGS(addr), 956 957 TP_STRUCT__entry( 958 __field(u64, addr) 959 ), 960 961 TP_fast_assign( 962 __entry->addr = addr; 963 ), 964 965 TP_printk("dma addr=0x%llx\n", __entry->addr) 966 ); 967 968 /** 969 ** Reply events 970 **/ 971 972 TRACE_EVENT(xprtrdma_reply, 973 TP_PROTO( 974 const struct rpc_task *task, 975 const struct rpcrdma_rep *rep, 976 const struct rpcrdma_req *req, 977 unsigned int credits 978 ), 979 980 TP_ARGS(task, rep, req, credits), 981 982 TP_STRUCT__entry( 983 __field(unsigned int, task_id) 984 __field(unsigned int, client_id) 985 __field(const void *, rep) 986 __field(const void *, req) 987 __field(u32, xid) 988 __field(unsigned int, credits) 989 ), 990 991 TP_fast_assign( 992 __entry->task_id = task->tk_pid; 993 __entry->client_id = task->tk_client->cl_clid; 994 __entry->rep = rep; 995 __entry->req = req; 996 __entry->xid = be32_to_cpu(rep->rr_xid); 997 __entry->credits = credits; 998 ), 999 1000 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", 1001 __entry->task_id, __entry->client_id, __entry->xid, 1002 __entry->credits, __entry->rep, __entry->req 1003 ) 1004 ); 1005 1006 TRACE_EVENT(xprtrdma_defer_cmp, 1007 TP_PROTO( 1008 const struct rpcrdma_rep *rep 1009 ), 1010 1011 TP_ARGS(rep), 1012 1013 TP_STRUCT__entry( 1014 __field(unsigned int, task_id) 1015 __field(unsigned int, client_id) 1016 __field(const void *, rep) 1017 __field(u32, xid) 1018 ), 1019 1020 TP_fast_assign( 1021 __entry->task_id = rep->rr_rqst->rq_task->tk_pid; 1022 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; 1023 __entry->rep = rep; 1024 __entry->xid = be32_to_cpu(rep->rr_xid); 1025 ), 1026 1027 TP_printk("task:%u@%u xid=0x%08x rep=%p", 1028 __entry->task_id, __entry->client_id, __entry->xid, 1029 __entry->rep 1030 ) 1031 ); 1032 1033 DEFINE_REPLY_EVENT(xprtrdma_reply_vers); 1034 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); 1035 DEFINE_REPLY_EVENT(xprtrdma_reply_short); 1036 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); 1037 1038 TRACE_EVENT(xprtrdma_err_vers, 1039 TP_PROTO( 1040 const struct rpc_rqst *rqst, 1041 __be32 *min, 1042 __be32 *max 1043 ), 1044 1045 TP_ARGS(rqst, min, max), 1046 1047 TP_STRUCT__entry( 1048 __field(unsigned int, task_id) 1049 __field(unsigned int, client_id) 1050 __field(u32, xid) 1051 __field(u32, min) 1052 __field(u32, max) 1053 ), 1054 1055 TP_fast_assign( 1056 __entry->task_id = rqst->rq_task->tk_pid; 1057 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1058 __entry->xid = be32_to_cpu(rqst->rq_xid); 1059 __entry->min = be32_to_cpup(min); 1060 __entry->max = be32_to_cpup(max); 1061 ), 1062 1063 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", 1064 __entry->task_id, __entry->client_id, __entry->xid, 1065 __entry->min, __entry->max 1066 ) 1067 ); 1068 1069 TRACE_EVENT(xprtrdma_err_chunk, 1070 TP_PROTO( 1071 const struct rpc_rqst *rqst 1072 ), 1073 1074 TP_ARGS(rqst), 1075 1076 TP_STRUCT__entry( 1077 __field(unsigned int, task_id) 1078 __field(unsigned int, client_id) 1079 __field(u32, xid) 1080 ), 1081 1082 TP_fast_assign( 1083 __entry->task_id = rqst->rq_task->tk_pid; 1084 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1085 __entry->xid = be32_to_cpu(rqst->rq_xid); 1086 ), 1087 1088 TP_printk("task:%u@%u xid=0x%08x", 1089 __entry->task_id, __entry->client_id, __entry->xid 1090 ) 1091 ); 1092 1093 TRACE_EVENT(xprtrdma_err_unrecognized, 1094 TP_PROTO( 1095 const struct rpc_rqst *rqst, 1096 __be32 *procedure 1097 ), 1098 1099 TP_ARGS(rqst, procedure), 1100 1101 TP_STRUCT__entry( 1102 __field(unsigned int, task_id) 1103 __field(unsigned int, client_id) 1104 __field(u32, xid) 1105 __field(u32, procedure) 1106 ), 1107 1108 TP_fast_assign( 1109 __entry->task_id = rqst->rq_task->tk_pid; 1110 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1111 __entry->procedure = be32_to_cpup(procedure); 1112 ), 1113 1114 TP_printk("task:%u@%u xid=0x%08x procedure=%u", 1115 __entry->task_id, __entry->client_id, __entry->xid, 1116 __entry->procedure 1117 ) 1118 ); 1119 1120 TRACE_EVENT(xprtrdma_fixup, 1121 TP_PROTO( 1122 const struct rpc_rqst *rqst, 1123 unsigned long fixup 1124 ), 1125 1126 TP_ARGS(rqst, fixup), 1127 1128 TP_STRUCT__entry( 1129 __field(unsigned int, task_id) 1130 __field(unsigned int, client_id) 1131 __field(unsigned long, fixup) 1132 __field(size_t, headlen) 1133 __field(unsigned int, pagelen) 1134 __field(size_t, taillen) 1135 ), 1136 1137 TP_fast_assign( 1138 __entry->task_id = rqst->rq_task->tk_pid; 1139 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1140 __entry->fixup = fixup; 1141 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; 1142 __entry->pagelen = rqst->rq_rcv_buf.page_len; 1143 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; 1144 ), 1145 1146 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", 1147 __entry->task_id, __entry->client_id, __entry->fixup, 1148 __entry->headlen, __entry->pagelen, __entry->taillen 1149 ) 1150 ); 1151 1152 TRACE_EVENT(xprtrdma_decode_seg, 1153 TP_PROTO( 1154 u32 handle, 1155 u32 length, 1156 u64 offset 1157 ), 1158 1159 TP_ARGS(handle, length, offset), 1160 1161 TP_STRUCT__entry( 1162 __field(u32, handle) 1163 __field(u32, length) 1164 __field(u64, offset) 1165 ), 1166 1167 TP_fast_assign( 1168 __entry->handle = handle; 1169 __entry->length = length; 1170 __entry->offset = offset; 1171 ), 1172 1173 TP_printk("%u@0x%016llx:0x%08x", 1174 __entry->length, (unsigned long long)__entry->offset, 1175 __entry->handle 1176 ) 1177 ); 1178 1179 /** 1180 ** Callback events 1181 **/ 1182 1183 TRACE_EVENT(xprtrdma_cb_setup, 1184 TP_PROTO( 1185 const struct rpcrdma_xprt *r_xprt, 1186 unsigned int reqs 1187 ), 1188 1189 TP_ARGS(r_xprt, reqs), 1190 1191 TP_STRUCT__entry( 1192 __field(const void *, r_xprt) 1193 __field(unsigned int, reqs) 1194 __string(addr, rpcrdma_addrstr(r_xprt)) 1195 __string(port, rpcrdma_portstr(r_xprt)) 1196 ), 1197 1198 TP_fast_assign( 1199 __entry->r_xprt = r_xprt; 1200 __entry->reqs = reqs; 1201 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 1202 __assign_str(port, rpcrdma_portstr(r_xprt)); 1203 ), 1204 1205 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", 1206 __get_str(addr), __get_str(port), 1207 __entry->r_xprt, __entry->reqs 1208 ) 1209 ); 1210 1211 DEFINE_CB_EVENT(xprtrdma_cb_call); 1212 DEFINE_CB_EVENT(xprtrdma_cb_reply); 1213 1214 TRACE_EVENT(xprtrdma_leaked_rep, 1215 TP_PROTO( 1216 const struct rpc_rqst *rqst, 1217 const struct rpcrdma_rep *rep 1218 ), 1219 1220 TP_ARGS(rqst, rep), 1221 1222 TP_STRUCT__entry( 1223 __field(unsigned int, task_id) 1224 __field(unsigned int, client_id) 1225 __field(u32, xid) 1226 __field(const void *, rep) 1227 ), 1228 1229 TP_fast_assign( 1230 __entry->task_id = rqst->rq_task->tk_pid; 1231 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1232 __entry->xid = be32_to_cpu(rqst->rq_xid); 1233 __entry->rep = rep; 1234 ), 1235 1236 TP_printk("task:%u@%u xid=0x%08x rep=%p", 1237 __entry->task_id, __entry->client_id, __entry->xid, 1238 __entry->rep 1239 ) 1240 ); 1241 1242 /** 1243 ** Server-side RPC/RDMA events 1244 **/ 1245 1246 DECLARE_EVENT_CLASS(svcrdma_accept_class, 1247 TP_PROTO( 1248 const struct svcxprt_rdma *rdma, 1249 long status 1250 ), 1251 1252 TP_ARGS(rdma, status), 1253 1254 TP_STRUCT__entry( 1255 __field(long, status) 1256 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1257 ), 1258 1259 TP_fast_assign( 1260 __entry->status = status; 1261 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1262 ), 1263 1264 TP_printk("addr=%s status=%ld", 1265 __get_str(addr), __entry->status 1266 ) 1267 ); 1268 1269 #define DEFINE_ACCEPT_EVENT(name) \ 1270 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ 1271 TP_PROTO( \ 1272 const struct svcxprt_rdma *rdma, \ 1273 long status \ 1274 ), \ 1275 TP_ARGS(rdma, status)) 1276 1277 DEFINE_ACCEPT_EVENT(pd); 1278 DEFINE_ACCEPT_EVENT(qp); 1279 DEFINE_ACCEPT_EVENT(fabric); 1280 DEFINE_ACCEPT_EVENT(initdepth); 1281 DEFINE_ACCEPT_EVENT(accept); 1282 1283 TRACE_DEFINE_ENUM(RDMA_MSG); 1284 TRACE_DEFINE_ENUM(RDMA_NOMSG); 1285 TRACE_DEFINE_ENUM(RDMA_MSGP); 1286 TRACE_DEFINE_ENUM(RDMA_DONE); 1287 TRACE_DEFINE_ENUM(RDMA_ERROR); 1288 1289 #define show_rpcrdma_proc(x) \ 1290 __print_symbolic(x, \ 1291 { RDMA_MSG, "RDMA_MSG" }, \ 1292 { RDMA_NOMSG, "RDMA_NOMSG" }, \ 1293 { RDMA_MSGP, "RDMA_MSGP" }, \ 1294 { RDMA_DONE, "RDMA_DONE" }, \ 1295 { RDMA_ERROR, "RDMA_ERROR" }) 1296 1297 TRACE_EVENT(svcrdma_decode_rqst, 1298 TP_PROTO( 1299 const struct svc_rdma_recv_ctxt *ctxt, 1300 __be32 *p, 1301 unsigned int hdrlen 1302 ), 1303 1304 TP_ARGS(ctxt, p, hdrlen), 1305 1306 TP_STRUCT__entry( 1307 __field(u32, cq_id) 1308 __field(int, completion_id) 1309 __field(u32, xid) 1310 __field(u32, vers) 1311 __field(u32, proc) 1312 __field(u32, credits) 1313 __field(unsigned int, hdrlen) 1314 ), 1315 1316 TP_fast_assign( 1317 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1318 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1319 __entry->xid = be32_to_cpup(p++); 1320 __entry->vers = be32_to_cpup(p++); 1321 __entry->credits = be32_to_cpup(p++); 1322 __entry->proc = be32_to_cpup(p); 1323 __entry->hdrlen = hdrlen; 1324 ), 1325 1326 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", 1327 __entry->cq_id, __entry->completion_id, 1328 __entry->xid, __entry->vers, __entry->credits, 1329 show_rpcrdma_proc(__entry->proc), __entry->hdrlen) 1330 ); 1331 1332 TRACE_EVENT(svcrdma_decode_short_err, 1333 TP_PROTO( 1334 const struct svc_rdma_recv_ctxt *ctxt, 1335 unsigned int hdrlen 1336 ), 1337 1338 TP_ARGS(ctxt, hdrlen), 1339 1340 TP_STRUCT__entry( 1341 __field(u32, cq_id) 1342 __field(int, completion_id) 1343 __field(unsigned int, hdrlen) 1344 ), 1345 1346 TP_fast_assign( 1347 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1348 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1349 __entry->hdrlen = hdrlen; 1350 ), 1351 1352 TP_printk("cq.id=%u cid=%d hdrlen=%u", 1353 __entry->cq_id, __entry->completion_id, 1354 __entry->hdrlen) 1355 ); 1356 1357 DECLARE_EVENT_CLASS(svcrdma_badreq_event, 1358 TP_PROTO( 1359 const struct svc_rdma_recv_ctxt *ctxt, 1360 __be32 *p 1361 ), 1362 1363 TP_ARGS(ctxt, p), 1364 1365 TP_STRUCT__entry( 1366 __field(u32, cq_id) 1367 __field(int, completion_id) 1368 __field(u32, xid) 1369 __field(u32, vers) 1370 __field(u32, proc) 1371 __field(u32, credits) 1372 ), 1373 1374 TP_fast_assign( 1375 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1376 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1377 __entry->xid = be32_to_cpup(p++); 1378 __entry->vers = be32_to_cpup(p++); 1379 __entry->credits = be32_to_cpup(p++); 1380 __entry->proc = be32_to_cpup(p); 1381 ), 1382 1383 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", 1384 __entry->cq_id, __entry->completion_id, 1385 __entry->xid, __entry->vers, __entry->credits, __entry->proc) 1386 ); 1387 1388 #define DEFINE_BADREQ_EVENT(name) \ 1389 DEFINE_EVENT(svcrdma_badreq_event, \ 1390 svcrdma_decode_##name##_err, \ 1391 TP_PROTO( \ 1392 const struct svc_rdma_recv_ctxt *ctxt, \ 1393 __be32 *p \ 1394 ), \ 1395 TP_ARGS(ctxt, p)) 1396 1397 DEFINE_BADREQ_EVENT(badvers); 1398 DEFINE_BADREQ_EVENT(drop); 1399 DEFINE_BADREQ_EVENT(badproc); 1400 DEFINE_BADREQ_EVENT(parse); 1401 1402 DECLARE_EVENT_CLASS(svcrdma_segment_event, 1403 TP_PROTO( 1404 u32 handle, 1405 u32 length, 1406 u64 offset 1407 ), 1408 1409 TP_ARGS(handle, length, offset), 1410 1411 TP_STRUCT__entry( 1412 __field(u32, handle) 1413 __field(u32, length) 1414 __field(u64, offset) 1415 ), 1416 1417 TP_fast_assign( 1418 __entry->handle = handle; 1419 __entry->length = length; 1420 __entry->offset = offset; 1421 ), 1422 1423 TP_printk("%u@0x%016llx:0x%08x", 1424 __entry->length, (unsigned long long)__entry->offset, 1425 __entry->handle 1426 ) 1427 ); 1428 1429 #define DEFINE_SEGMENT_EVENT(name) \ 1430 DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ 1431 TP_PROTO( \ 1432 u32 handle, \ 1433 u32 length, \ 1434 u64 offset \ 1435 ), \ 1436 TP_ARGS(handle, length, offset)) 1437 1438 DEFINE_SEGMENT_EVENT(decode_wseg); 1439 DEFINE_SEGMENT_EVENT(encode_rseg); 1440 DEFINE_SEGMENT_EVENT(send_rseg); 1441 DEFINE_SEGMENT_EVENT(encode_wseg); 1442 DEFINE_SEGMENT_EVENT(send_wseg); 1443 1444 DECLARE_EVENT_CLASS(svcrdma_chunk_event, 1445 TP_PROTO( 1446 u32 length 1447 ), 1448 1449 TP_ARGS(length), 1450 1451 TP_STRUCT__entry( 1452 __field(u32, length) 1453 ), 1454 1455 TP_fast_assign( 1456 __entry->length = length; 1457 ), 1458 1459 TP_printk("length=%u", 1460 __entry->length 1461 ) 1462 ); 1463 1464 #define DEFINE_CHUNK_EVENT(name) \ 1465 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \ 1466 TP_PROTO( \ 1467 u32 length \ 1468 ), \ 1469 TP_ARGS(length)) 1470 1471 DEFINE_CHUNK_EVENT(send_pzr); 1472 DEFINE_CHUNK_EVENT(encode_write_chunk); 1473 DEFINE_CHUNK_EVENT(send_write_chunk); 1474 DEFINE_CHUNK_EVENT(encode_read_chunk); 1475 DEFINE_CHUNK_EVENT(send_reply_chunk); 1476 1477 TRACE_EVENT(svcrdma_send_read_chunk, 1478 TP_PROTO( 1479 u32 length, 1480 u32 position 1481 ), 1482 1483 TP_ARGS(length, position), 1484 1485 TP_STRUCT__entry( 1486 __field(u32, length) 1487 __field(u32, position) 1488 ), 1489 1490 TP_fast_assign( 1491 __entry->length = length; 1492 __entry->position = position; 1493 ), 1494 1495 TP_printk("length=%u position=%u", 1496 __entry->length, __entry->position 1497 ) 1498 ); 1499 1500 DECLARE_EVENT_CLASS(svcrdma_error_event, 1501 TP_PROTO( 1502 __be32 xid 1503 ), 1504 1505 TP_ARGS(xid), 1506 1507 TP_STRUCT__entry( 1508 __field(u32, xid) 1509 ), 1510 1511 TP_fast_assign( 1512 __entry->xid = be32_to_cpu(xid); 1513 ), 1514 1515 TP_printk("xid=0x%08x", 1516 __entry->xid 1517 ) 1518 ); 1519 1520 #define DEFINE_ERROR_EVENT(name) \ 1521 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \ 1522 TP_PROTO( \ 1523 __be32 xid \ 1524 ), \ 1525 TP_ARGS(xid)) 1526 1527 DEFINE_ERROR_EVENT(vers); 1528 DEFINE_ERROR_EVENT(chunk); 1529 1530 /** 1531 ** Server-side RDMA API events 1532 **/ 1533 1534 DECLARE_EVENT_CLASS(svcrdma_dma_map_class, 1535 TP_PROTO( 1536 const struct svcxprt_rdma *rdma, 1537 u64 dma_addr, 1538 u32 length 1539 ), 1540 1541 TP_ARGS(rdma, dma_addr, length), 1542 1543 TP_STRUCT__entry( 1544 __field(u64, dma_addr) 1545 __field(u32, length) 1546 __string(device, rdma->sc_cm_id->device->name) 1547 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1548 ), 1549 1550 TP_fast_assign( 1551 __entry->dma_addr = dma_addr; 1552 __entry->length = length; 1553 __assign_str(device, rdma->sc_cm_id->device->name); 1554 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1555 ), 1556 1557 TP_printk("addr=%s device=%s dma_addr=%llu length=%u", 1558 __get_str(addr), __get_str(device), 1559 __entry->dma_addr, __entry->length 1560 ) 1561 ); 1562 1563 #define DEFINE_SVC_DMA_EVENT(name) \ 1564 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ 1565 TP_PROTO( \ 1566 const struct svcxprt_rdma *rdma,\ 1567 u64 dma_addr, \ 1568 u32 length \ 1569 ), \ 1570 TP_ARGS(rdma, dma_addr, length)) 1571 1572 DEFINE_SVC_DMA_EVENT(dma_map_page); 1573 DEFINE_SVC_DMA_EVENT(dma_unmap_page); 1574 1575 TRACE_EVENT(svcrdma_dma_map_rw_err, 1576 TP_PROTO( 1577 const struct svcxprt_rdma *rdma, 1578 unsigned int nents, 1579 int status 1580 ), 1581 1582 TP_ARGS(rdma, nents, status), 1583 1584 TP_STRUCT__entry( 1585 __field(int, status) 1586 __field(unsigned int, nents) 1587 __string(device, rdma->sc_cm_id->device->name) 1588 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1589 ), 1590 1591 TP_fast_assign( 1592 __entry->status = status; 1593 __entry->nents = nents; 1594 __assign_str(device, rdma->sc_cm_id->device->name); 1595 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1596 ), 1597 1598 TP_printk("addr=%s device=%s nents=%u status=%d", 1599 __get_str(addr), __get_str(device), __entry->nents, 1600 __entry->status 1601 ) 1602 ); 1603 1604 TRACE_EVENT(svcrdma_no_rwctx_err, 1605 TP_PROTO( 1606 const struct svcxprt_rdma *rdma, 1607 unsigned int num_sges 1608 ), 1609 1610 TP_ARGS(rdma, num_sges), 1611 1612 TP_STRUCT__entry( 1613 __field(unsigned int, num_sges) 1614 __string(device, rdma->sc_cm_id->device->name) 1615 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1616 ), 1617 1618 TP_fast_assign( 1619 __entry->num_sges = num_sges; 1620 __assign_str(device, rdma->sc_cm_id->device->name); 1621 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1622 ), 1623 1624 TP_printk("addr=%s device=%s num_sges=%d", 1625 __get_str(addr), __get_str(device), __entry->num_sges 1626 ) 1627 ); 1628 1629 TRACE_EVENT(svcrdma_page_overrun_err, 1630 TP_PROTO( 1631 const struct svcxprt_rdma *rdma, 1632 const struct svc_rqst *rqst, 1633 unsigned int pageno 1634 ), 1635 1636 TP_ARGS(rdma, rqst, pageno), 1637 1638 TP_STRUCT__entry( 1639 __field(unsigned int, pageno) 1640 __field(u32, xid) 1641 __string(device, rdma->sc_cm_id->device->name) 1642 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1643 ), 1644 1645 TP_fast_assign( 1646 __entry->pageno = pageno; 1647 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1648 __assign_str(device, rdma->sc_cm_id->device->name); 1649 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1650 ), 1651 1652 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), 1653 __get_str(device), __entry->xid, __entry->pageno 1654 ) 1655 ); 1656 1657 TRACE_EVENT(svcrdma_small_wrch_err, 1658 TP_PROTO( 1659 const struct svcxprt_rdma *rdma, 1660 unsigned int remaining, 1661 unsigned int seg_no, 1662 unsigned int num_segs 1663 ), 1664 1665 TP_ARGS(rdma, remaining, seg_no, num_segs), 1666 1667 TP_STRUCT__entry( 1668 __field(unsigned int, remaining) 1669 __field(unsigned int, seg_no) 1670 __field(unsigned int, num_segs) 1671 __string(device, rdma->sc_cm_id->device->name) 1672 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1673 ), 1674 1675 TP_fast_assign( 1676 __entry->remaining = remaining; 1677 __entry->seg_no = seg_no; 1678 __entry->num_segs = num_segs; 1679 __assign_str(device, rdma->sc_cm_id->device->name); 1680 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1681 ), 1682 1683 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", 1684 __get_str(addr), __get_str(device), __entry->remaining, 1685 __entry->seg_no, __entry->num_segs 1686 ) 1687 ); 1688 1689 TRACE_EVENT(svcrdma_send_pullup, 1690 TP_PROTO( 1691 unsigned int len 1692 ), 1693 1694 TP_ARGS(len), 1695 1696 TP_STRUCT__entry( 1697 __field(unsigned int, len) 1698 ), 1699 1700 TP_fast_assign( 1701 __entry->len = len; 1702 ), 1703 1704 TP_printk("len=%u", __entry->len) 1705 ); 1706 1707 TRACE_EVENT(svcrdma_send_err, 1708 TP_PROTO( 1709 const struct svc_rqst *rqst, 1710 int status 1711 ), 1712 1713 TP_ARGS(rqst, status), 1714 1715 TP_STRUCT__entry( 1716 __field(int, status) 1717 __field(u32, xid) 1718 __string(addr, rqst->rq_xprt->xpt_remotebuf) 1719 ), 1720 1721 TP_fast_assign( 1722 __entry->status = status; 1723 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1724 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); 1725 ), 1726 1727 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), 1728 __entry->xid, __entry->status 1729 ) 1730 ); 1731 1732 TRACE_EVENT(svcrdma_post_send, 1733 TP_PROTO( 1734 const struct svc_rdma_send_ctxt *ctxt 1735 ), 1736 1737 TP_ARGS(ctxt), 1738 1739 TP_STRUCT__entry( 1740 __field(u32, cq_id) 1741 __field(int, completion_id) 1742 __field(unsigned int, num_sge) 1743 __field(u32, inv_rkey) 1744 ), 1745 1746 TP_fast_assign( 1747 const struct ib_send_wr *wr = &ctxt->sc_send_wr; 1748 1749 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1750 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1751 __entry->num_sge = wr->num_sge; 1752 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? 1753 wr->ex.invalidate_rkey : 0; 1754 ), 1755 1756 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", 1757 __entry->cq_id, __entry->completion_id, 1758 __entry->num_sge, __entry->inv_rkey 1759 ) 1760 ); 1761 1762 DEFINE_COMPLETION_EVENT(svcrdma_wc_send); 1763 1764 TRACE_EVENT(svcrdma_post_recv, 1765 TP_PROTO( 1766 const struct svc_rdma_recv_ctxt *ctxt 1767 ), 1768 1769 TP_ARGS(ctxt), 1770 1771 TP_STRUCT__entry( 1772 __field(u32, cq_id) 1773 __field(int, completion_id) 1774 ), 1775 1776 TP_fast_assign( 1777 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1778 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1779 ), 1780 1781 TP_printk("cq.id=%d cid=%d", 1782 __entry->cq_id, __entry->completion_id 1783 ) 1784 ); 1785 1786 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); 1787 1788 TRACE_EVENT(svcrdma_rq_post_err, 1789 TP_PROTO( 1790 const struct svcxprt_rdma *rdma, 1791 int status 1792 ), 1793 1794 TP_ARGS(rdma, status), 1795 1796 TP_STRUCT__entry( 1797 __field(int, status) 1798 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1799 ), 1800 1801 TP_fast_assign( 1802 __entry->status = status; 1803 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1804 ), 1805 1806 TP_printk("addr=%s status=%d", 1807 __get_str(addr), __entry->status 1808 ) 1809 ); 1810 1811 TRACE_EVENT(svcrdma_post_chunk, 1812 TP_PROTO( 1813 const struct rpc_rdma_cid *cid, 1814 int sqecount 1815 ), 1816 1817 TP_ARGS(cid, sqecount), 1818 1819 TP_STRUCT__entry( 1820 __field(u32, cq_id) 1821 __field(int, completion_id) 1822 __field(int, sqecount) 1823 ), 1824 1825 TP_fast_assign( 1826 __entry->cq_id = cid->ci_queue_id; 1827 __entry->completion_id = cid->ci_completion_id; 1828 __entry->sqecount = sqecount; 1829 ), 1830 1831 TP_printk("cq.id=%u cid=%d sqecount=%d", 1832 __entry->cq_id, __entry->completion_id, 1833 __entry->sqecount 1834 ) 1835 ); 1836 1837 DEFINE_COMPLETION_EVENT(svcrdma_wc_read); 1838 DEFINE_COMPLETION_EVENT(svcrdma_wc_write); 1839 1840 TRACE_EVENT(svcrdma_qp_error, 1841 TP_PROTO( 1842 const struct ib_event *event, 1843 const struct sockaddr *sap 1844 ), 1845 1846 TP_ARGS(event, sap), 1847 1848 TP_STRUCT__entry( 1849 __field(unsigned int, event) 1850 __string(device, event->device->name) 1851 __array(__u8, addr, INET6_ADDRSTRLEN + 10) 1852 ), 1853 1854 TP_fast_assign( 1855 __entry->event = event->event; 1856 __assign_str(device, event->device->name); 1857 snprintf(__entry->addr, sizeof(__entry->addr) - 1, 1858 "%pISpc", sap); 1859 ), 1860 1861 TP_printk("addr=%s dev=%s event=%s (%u)", 1862 __entry->addr, __get_str(device), 1863 rdma_show_ib_event(__entry->event), __entry->event 1864 ) 1865 ); 1866 1867 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, 1868 TP_PROTO( 1869 const struct svcxprt_rdma *rdma 1870 ), 1871 1872 TP_ARGS(rdma), 1873 1874 TP_STRUCT__entry( 1875 __field(int, avail) 1876 __field(int, depth) 1877 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1878 ), 1879 1880 TP_fast_assign( 1881 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1882 __entry->depth = rdma->sc_sq_depth; 1883 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1884 ), 1885 1886 TP_printk("addr=%s sc_sq_avail=%d/%d", 1887 __get_str(addr), __entry->avail, __entry->depth 1888 ) 1889 ); 1890 1891 #define DEFINE_SQ_EVENT(name) \ 1892 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ 1893 TP_PROTO( \ 1894 const struct svcxprt_rdma *rdma \ 1895 ), \ 1896 TP_ARGS(rdma)) 1897 1898 DEFINE_SQ_EVENT(full); 1899 DEFINE_SQ_EVENT(retry); 1900 1901 TRACE_EVENT(svcrdma_sq_post_err, 1902 TP_PROTO( 1903 const struct svcxprt_rdma *rdma, 1904 int status 1905 ), 1906 1907 TP_ARGS(rdma, status), 1908 1909 TP_STRUCT__entry( 1910 __field(int, avail) 1911 __field(int, depth) 1912 __field(int, status) 1913 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1914 ), 1915 1916 TP_fast_assign( 1917 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1918 __entry->depth = rdma->sc_sq_depth; 1919 __entry->status = status; 1920 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1921 ), 1922 1923 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", 1924 __get_str(addr), __entry->avail, __entry->depth, 1925 __entry->status 1926 ) 1927 ); 1928 1929 #endif /* _TRACE_RPCRDMA_H */ 1930 1931 #include <trace/define_trace.h> 1932