1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017, 2018 Oracle. All rights reserved. 4 * 5 * Trace point definitions for the "rpcrdma" subsystem. 6 */ 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM rpcrdma 9 10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_RPCRDMA_H 12 13 #include <linux/scatterlist.h> 14 #include <linux/sunrpc/rpc_rdma_cid.h> 15 #include <linux/tracepoint.h> 16 #include <rdma/ib_cm.h> 17 #include <trace/events/rdma.h> 18 19 /** 20 ** Event classes 21 **/ 22 23 DECLARE_EVENT_CLASS(rpcrdma_completion_class, 24 TP_PROTO( 25 const struct ib_wc *wc, 26 const struct rpc_rdma_cid *cid 27 ), 28 29 TP_ARGS(wc, cid), 30 31 TP_STRUCT__entry( 32 __field(u32, cq_id) 33 __field(int, completion_id) 34 __field(unsigned long, status) 35 __field(unsigned int, vendor_err) 36 ), 37 38 TP_fast_assign( 39 __entry->cq_id = cid->ci_queue_id; 40 __entry->completion_id = cid->ci_completion_id; 41 __entry->status = wc->status; 42 if (wc->status) 43 __entry->vendor_err = wc->vendor_err; 44 else 45 __entry->vendor_err = 0; 46 ), 47 48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", 49 __entry->cq_id, __entry->completion_id, 50 rdma_show_wc_status(__entry->status), 51 __entry->status, __entry->vendor_err 52 ) 53 ); 54 55 #define DEFINE_COMPLETION_EVENT(name) \ 56 DEFINE_EVENT(rpcrdma_completion_class, name, \ 57 TP_PROTO( \ 58 const struct ib_wc *wc, \ 59 const struct rpc_rdma_cid *cid \ 60 ), \ 61 TP_ARGS(wc, cid)) 62 63 DECLARE_EVENT_CLASS(xprtrdma_reply_class, 64 TP_PROTO( 65 const struct rpcrdma_rep *rep 66 ), 67 68 TP_ARGS(rep), 69 70 TP_STRUCT__entry( 71 __field(u32, xid) 72 __field(u32, version) 73 __field(u32, proc) 74 __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) 75 __string(port, rpcrdma_portstr(rep->rr_rxprt)) 76 ), 77 78 TP_fast_assign( 79 __entry->xid = be32_to_cpu(rep->rr_xid); 80 __entry->version = be32_to_cpu(rep->rr_vers); 81 __entry->proc = be32_to_cpu(rep->rr_proc); 82 __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); 83 __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); 84 ), 85 86 TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", 87 __get_str(addr), __get_str(port), 88 __entry->xid, __entry->version, __entry->proc 89 ) 90 ); 91 92 #define DEFINE_REPLY_EVENT(name) \ 93 DEFINE_EVENT(xprtrdma_reply_class, \ 94 xprtrdma_reply_##name##_err, \ 95 TP_PROTO( \ 96 const struct rpcrdma_rep *rep \ 97 ), \ 98 TP_ARGS(rep)) 99 100 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 101 TP_PROTO( 102 const struct rpcrdma_xprt *r_xprt 103 ), 104 105 TP_ARGS(r_xprt), 106 107 TP_STRUCT__entry( 108 __field(const void *, r_xprt) 109 __string(addr, rpcrdma_addrstr(r_xprt)) 110 __string(port, rpcrdma_portstr(r_xprt)) 111 ), 112 113 TP_fast_assign( 114 __entry->r_xprt = r_xprt; 115 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 116 __assign_str(port, rpcrdma_portstr(r_xprt)); 117 ), 118 119 TP_printk("peer=[%s]:%s r_xprt=%p", 120 __get_str(addr), __get_str(port), __entry->r_xprt 121 ) 122 ); 123 124 #define DEFINE_RXPRT_EVENT(name) \ 125 DEFINE_EVENT(xprtrdma_rxprt, name, \ 126 TP_PROTO( \ 127 const struct rpcrdma_xprt *r_xprt \ 128 ), \ 129 TP_ARGS(r_xprt)) 130 131 DECLARE_EVENT_CLASS(xprtrdma_connect_class, 132 TP_PROTO( 133 const struct rpcrdma_xprt *r_xprt, 134 int rc 135 ), 136 137 TP_ARGS(r_xprt, rc), 138 139 TP_STRUCT__entry( 140 __field(const void *, r_xprt) 141 __field(int, rc) 142 __field(int, connect_status) 143 __string(addr, rpcrdma_addrstr(r_xprt)) 144 __string(port, rpcrdma_portstr(r_xprt)) 145 ), 146 147 TP_fast_assign( 148 __entry->r_xprt = r_xprt; 149 __entry->rc = rc; 150 __entry->connect_status = r_xprt->rx_ep->re_connect_status; 151 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 152 __assign_str(port, rpcrdma_portstr(r_xprt)); 153 ), 154 155 TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", 156 __get_str(addr), __get_str(port), __entry->r_xprt, 157 __entry->rc, __entry->connect_status 158 ) 159 ); 160 161 #define DEFINE_CONN_EVENT(name) \ 162 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ 163 TP_PROTO( \ 164 const struct rpcrdma_xprt *r_xprt, \ 165 int rc \ 166 ), \ 167 TP_ARGS(r_xprt, rc)) 168 169 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 170 TP_PROTO( 171 const struct rpc_task *task, 172 unsigned int pos, 173 struct rpcrdma_mr *mr, 174 int nsegs 175 ), 176 177 TP_ARGS(task, pos, mr, nsegs), 178 179 TP_STRUCT__entry( 180 __field(unsigned int, task_id) 181 __field(unsigned int, client_id) 182 __field(unsigned int, pos) 183 __field(int, nents) 184 __field(u32, handle) 185 __field(u32, length) 186 __field(u64, offset) 187 __field(int, nsegs) 188 ), 189 190 TP_fast_assign( 191 __entry->task_id = task->tk_pid; 192 __entry->client_id = task->tk_client->cl_clid; 193 __entry->pos = pos; 194 __entry->nents = mr->mr_nents; 195 __entry->handle = mr->mr_handle; 196 __entry->length = mr->mr_length; 197 __entry->offset = mr->mr_offset; 198 __entry->nsegs = nsegs; 199 ), 200 201 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", 202 __entry->task_id, __entry->client_id, 203 __entry->pos, __entry->length, 204 (unsigned long long)__entry->offset, __entry->handle, 205 __entry->nents < __entry->nsegs ? "more" : "last" 206 ) 207 ); 208 209 #define DEFINE_RDCH_EVENT(name) \ 210 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ 211 TP_PROTO( \ 212 const struct rpc_task *task, \ 213 unsigned int pos, \ 214 struct rpcrdma_mr *mr, \ 215 int nsegs \ 216 ), \ 217 TP_ARGS(task, pos, mr, nsegs)) 218 219 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 220 TP_PROTO( 221 const struct rpc_task *task, 222 struct rpcrdma_mr *mr, 223 int nsegs 224 ), 225 226 TP_ARGS(task, mr, nsegs), 227 228 TP_STRUCT__entry( 229 __field(unsigned int, task_id) 230 __field(unsigned int, client_id) 231 __field(int, nents) 232 __field(u32, handle) 233 __field(u32, length) 234 __field(u64, offset) 235 __field(int, nsegs) 236 ), 237 238 TP_fast_assign( 239 __entry->task_id = task->tk_pid; 240 __entry->client_id = task->tk_client->cl_clid; 241 __entry->nents = mr->mr_nents; 242 __entry->handle = mr->mr_handle; 243 __entry->length = mr->mr_length; 244 __entry->offset = mr->mr_offset; 245 __entry->nsegs = nsegs; 246 ), 247 248 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", 249 __entry->task_id, __entry->client_id, 250 __entry->length, (unsigned long long)__entry->offset, 251 __entry->handle, 252 __entry->nents < __entry->nsegs ? "more" : "last" 253 ) 254 ); 255 256 #define DEFINE_WRCH_EVENT(name) \ 257 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ 258 TP_PROTO( \ 259 const struct rpc_task *task, \ 260 struct rpcrdma_mr *mr, \ 261 int nsegs \ 262 ), \ 263 TP_ARGS(task, mr, nsegs)) 264 265 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); 266 TRACE_DEFINE_ENUM(DMA_TO_DEVICE); 267 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); 268 TRACE_DEFINE_ENUM(DMA_NONE); 269 270 #define xprtrdma_show_direction(x) \ 271 __print_symbolic(x, \ 272 { DMA_BIDIRECTIONAL, "BIDIR" }, \ 273 { DMA_TO_DEVICE, "TO_DEVICE" }, \ 274 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ 275 { DMA_NONE, "NONE" }) 276 277 DECLARE_EVENT_CLASS(xprtrdma_mr, 278 TP_PROTO( 279 const struct rpcrdma_mr *mr 280 ), 281 282 TP_ARGS(mr), 283 284 TP_STRUCT__entry( 285 __field(u32, mr_id) 286 __field(int, nents) 287 __field(u32, handle) 288 __field(u32, length) 289 __field(u64, offset) 290 __field(u32, dir) 291 ), 292 293 TP_fast_assign( 294 __entry->mr_id = mr->frwr.fr_mr->res.id; 295 __entry->nents = mr->mr_nents; 296 __entry->handle = mr->mr_handle; 297 __entry->length = mr->mr_length; 298 __entry->offset = mr->mr_offset; 299 __entry->dir = mr->mr_dir; 300 ), 301 302 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 303 __entry->mr_id, __entry->nents, __entry->length, 304 (unsigned long long)__entry->offset, __entry->handle, 305 xprtrdma_show_direction(__entry->dir) 306 ) 307 ); 308 309 #define DEFINE_MR_EVENT(name) \ 310 DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ 311 TP_PROTO( \ 312 const struct rpcrdma_mr *mr \ 313 ), \ 314 TP_ARGS(mr)) 315 316 DECLARE_EVENT_CLASS(xprtrdma_cb_event, 317 TP_PROTO( 318 const struct rpc_rqst *rqst 319 ), 320 321 TP_ARGS(rqst), 322 323 TP_STRUCT__entry( 324 __field(const void *, rqst) 325 __field(const void *, rep) 326 __field(const void *, req) 327 __field(u32, xid) 328 ), 329 330 TP_fast_assign( 331 __entry->rqst = rqst; 332 __entry->req = rpcr_to_rdmar(rqst); 333 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; 334 __entry->xid = be32_to_cpu(rqst->rq_xid); 335 ), 336 337 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", 338 __entry->xid, __entry->rqst, __entry->req, __entry->rep 339 ) 340 ); 341 342 #define DEFINE_CB_EVENT(name) \ 343 DEFINE_EVENT(xprtrdma_cb_event, name, \ 344 TP_PROTO( \ 345 const struct rpc_rqst *rqst \ 346 ), \ 347 TP_ARGS(rqst)) 348 349 /** 350 ** Connection events 351 **/ 352 353 TRACE_EVENT(xprtrdma_inline_thresh, 354 TP_PROTO( 355 const struct rpcrdma_ep *ep 356 ), 357 358 TP_ARGS(ep), 359 360 TP_STRUCT__entry( 361 __field(unsigned int, inline_send) 362 __field(unsigned int, inline_recv) 363 __field(unsigned int, max_send) 364 __field(unsigned int, max_recv) 365 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 366 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 367 ), 368 369 TP_fast_assign( 370 const struct rdma_cm_id *id = ep->re_id; 371 372 __entry->inline_send = ep->re_inline_send; 373 __entry->inline_recv = ep->re_inline_recv; 374 __entry->max_send = ep->re_max_inline_send; 375 __entry->max_recv = ep->re_max_inline_recv; 376 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 377 sizeof(struct sockaddr_in6)); 378 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 379 sizeof(struct sockaddr_in6)); 380 ), 381 382 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 383 __entry->srcaddr, __entry->dstaddr, 384 __entry->inline_send, __entry->inline_recv, 385 __entry->max_send, __entry->max_recv 386 ) 387 ); 388 389 DEFINE_CONN_EVENT(connect); 390 DEFINE_CONN_EVENT(disconnect); 391 392 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 393 394 TRACE_EVENT(xprtrdma_op_connect, 395 TP_PROTO( 396 const struct rpcrdma_xprt *r_xprt, 397 unsigned long delay 398 ), 399 400 TP_ARGS(r_xprt, delay), 401 402 TP_STRUCT__entry( 403 __field(const void *, r_xprt) 404 __field(unsigned long, delay) 405 __string(addr, rpcrdma_addrstr(r_xprt)) 406 __string(port, rpcrdma_portstr(r_xprt)) 407 ), 408 409 TP_fast_assign( 410 __entry->r_xprt = r_xprt; 411 __entry->delay = delay; 412 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 413 __assign_str(port, rpcrdma_portstr(r_xprt)); 414 ), 415 416 TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", 417 __get_str(addr), __get_str(port), __entry->r_xprt, 418 __entry->delay 419 ) 420 ); 421 422 423 TRACE_EVENT(xprtrdma_op_set_cto, 424 TP_PROTO( 425 const struct rpcrdma_xprt *r_xprt, 426 unsigned long connect, 427 unsigned long reconnect 428 ), 429 430 TP_ARGS(r_xprt, connect, reconnect), 431 432 TP_STRUCT__entry( 433 __field(const void *, r_xprt) 434 __field(unsigned long, connect) 435 __field(unsigned long, reconnect) 436 __string(addr, rpcrdma_addrstr(r_xprt)) 437 __string(port, rpcrdma_portstr(r_xprt)) 438 ), 439 440 TP_fast_assign( 441 __entry->r_xprt = r_xprt; 442 __entry->connect = connect; 443 __entry->reconnect = reconnect; 444 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 445 __assign_str(port, rpcrdma_portstr(r_xprt)); 446 ), 447 448 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", 449 __get_str(addr), __get_str(port), __entry->r_xprt, 450 __entry->connect / HZ, __entry->reconnect / HZ 451 ) 452 ); 453 454 TRACE_EVENT(xprtrdma_qp_event, 455 TP_PROTO( 456 const struct rpcrdma_ep *ep, 457 const struct ib_event *event 458 ), 459 460 TP_ARGS(ep, event), 461 462 TP_STRUCT__entry( 463 __field(unsigned long, event) 464 __string(name, event->device->name) 465 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 466 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 467 ), 468 469 TP_fast_assign( 470 const struct rdma_cm_id *id = ep->re_id; 471 472 __entry->event = event->event; 473 __assign_str(name, event->device->name); 474 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 475 sizeof(struct sockaddr_in6)); 476 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 477 sizeof(struct sockaddr_in6)); 478 ), 479 480 TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", 481 __entry->srcaddr, __entry->dstaddr, __get_str(name), 482 rdma_show_ib_event(__entry->event), __entry->event 483 ) 484 ); 485 486 /** 487 ** Call events 488 **/ 489 490 TRACE_EVENT(xprtrdma_createmrs, 491 TP_PROTO( 492 const struct rpcrdma_xprt *r_xprt, 493 unsigned int count 494 ), 495 496 TP_ARGS(r_xprt, count), 497 498 TP_STRUCT__entry( 499 __field(const void *, r_xprt) 500 __string(addr, rpcrdma_addrstr(r_xprt)) 501 __string(port, rpcrdma_portstr(r_xprt)) 502 __field(unsigned int, count) 503 ), 504 505 TP_fast_assign( 506 __entry->r_xprt = r_xprt; 507 __entry->count = count; 508 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 509 __assign_str(port, rpcrdma_portstr(r_xprt)); 510 ), 511 512 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", 513 __get_str(addr), __get_str(port), __entry->r_xprt, 514 __entry->count 515 ) 516 ); 517 518 TRACE_EVENT(xprtrdma_mr_get, 519 TP_PROTO( 520 const struct rpcrdma_req *req 521 ), 522 523 TP_ARGS(req), 524 525 TP_STRUCT__entry( 526 __field(const void *, req) 527 __field(unsigned int, task_id) 528 __field(unsigned int, client_id) 529 __field(u32, xid) 530 ), 531 532 TP_fast_assign( 533 const struct rpc_rqst *rqst = &req->rl_slot; 534 535 __entry->req = req; 536 __entry->task_id = rqst->rq_task->tk_pid; 537 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 538 __entry->xid = be32_to_cpu(rqst->rq_xid); 539 ), 540 541 TP_printk("task:%u@%u xid=0x%08x req=%p", 542 __entry->task_id, __entry->client_id, __entry->xid, 543 __entry->req 544 ) 545 ); 546 547 TRACE_EVENT(xprtrdma_nomrs, 548 TP_PROTO( 549 const struct rpcrdma_req *req 550 ), 551 552 TP_ARGS(req), 553 554 TP_STRUCT__entry( 555 __field(const void *, req) 556 __field(unsigned int, task_id) 557 __field(unsigned int, client_id) 558 __field(u32, xid) 559 ), 560 561 TP_fast_assign( 562 const struct rpc_rqst *rqst = &req->rl_slot; 563 564 __entry->req = req; 565 __entry->task_id = rqst->rq_task->tk_pid; 566 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 567 __entry->xid = be32_to_cpu(rqst->rq_xid); 568 ), 569 570 TP_printk("task:%u@%u xid=0x%08x req=%p", 571 __entry->task_id, __entry->client_id, __entry->xid, 572 __entry->req 573 ) 574 ); 575 576 DEFINE_RDCH_EVENT(read); 577 DEFINE_WRCH_EVENT(write); 578 DEFINE_WRCH_EVENT(reply); 579 580 TRACE_DEFINE_ENUM(rpcrdma_noch); 581 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); 582 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); 583 TRACE_DEFINE_ENUM(rpcrdma_readch); 584 TRACE_DEFINE_ENUM(rpcrdma_areadch); 585 TRACE_DEFINE_ENUM(rpcrdma_writech); 586 TRACE_DEFINE_ENUM(rpcrdma_replych); 587 588 #define xprtrdma_show_chunktype(x) \ 589 __print_symbolic(x, \ 590 { rpcrdma_noch, "inline" }, \ 591 { rpcrdma_noch_pullup, "pullup" }, \ 592 { rpcrdma_noch_mapped, "mapped" }, \ 593 { rpcrdma_readch, "read list" }, \ 594 { rpcrdma_areadch, "*read list" }, \ 595 { rpcrdma_writech, "write list" }, \ 596 { rpcrdma_replych, "reply chunk" }) 597 598 TRACE_EVENT(xprtrdma_marshal, 599 TP_PROTO( 600 const struct rpcrdma_req *req, 601 unsigned int rtype, 602 unsigned int wtype 603 ), 604 605 TP_ARGS(req, rtype, wtype), 606 607 TP_STRUCT__entry( 608 __field(unsigned int, task_id) 609 __field(unsigned int, client_id) 610 __field(u32, xid) 611 __field(unsigned int, hdrlen) 612 __field(unsigned int, headlen) 613 __field(unsigned int, pagelen) 614 __field(unsigned int, taillen) 615 __field(unsigned int, rtype) 616 __field(unsigned int, wtype) 617 ), 618 619 TP_fast_assign( 620 const struct rpc_rqst *rqst = &req->rl_slot; 621 622 __entry->task_id = rqst->rq_task->tk_pid; 623 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 624 __entry->xid = be32_to_cpu(rqst->rq_xid); 625 __entry->hdrlen = req->rl_hdrbuf.len; 626 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 627 __entry->pagelen = rqst->rq_snd_buf.page_len; 628 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 629 __entry->rtype = rtype; 630 __entry->wtype = wtype; 631 ), 632 633 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 634 __entry->task_id, __entry->client_id, __entry->xid, 635 __entry->hdrlen, 636 __entry->headlen, __entry->pagelen, __entry->taillen, 637 xprtrdma_show_chunktype(__entry->rtype), 638 xprtrdma_show_chunktype(__entry->wtype) 639 ) 640 ); 641 642 TRACE_EVENT(xprtrdma_marshal_failed, 643 TP_PROTO(const struct rpc_rqst *rqst, 644 int ret 645 ), 646 647 TP_ARGS(rqst, ret), 648 649 TP_STRUCT__entry( 650 __field(unsigned int, task_id) 651 __field(unsigned int, client_id) 652 __field(u32, xid) 653 __field(int, ret) 654 ), 655 656 TP_fast_assign( 657 __entry->task_id = rqst->rq_task->tk_pid; 658 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 659 __entry->xid = be32_to_cpu(rqst->rq_xid); 660 __entry->ret = ret; 661 ), 662 663 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 664 __entry->task_id, __entry->client_id, __entry->xid, 665 __entry->ret 666 ) 667 ); 668 669 TRACE_EVENT(xprtrdma_prepsend_failed, 670 TP_PROTO(const struct rpc_rqst *rqst, 671 int ret 672 ), 673 674 TP_ARGS(rqst, ret), 675 676 TP_STRUCT__entry( 677 __field(unsigned int, task_id) 678 __field(unsigned int, client_id) 679 __field(u32, xid) 680 __field(int, ret) 681 ), 682 683 TP_fast_assign( 684 __entry->task_id = rqst->rq_task->tk_pid; 685 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 686 __entry->xid = be32_to_cpu(rqst->rq_xid); 687 __entry->ret = ret; 688 ), 689 690 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 691 __entry->task_id, __entry->client_id, __entry->xid, 692 __entry->ret 693 ) 694 ); 695 696 TRACE_EVENT(xprtrdma_post_send, 697 TP_PROTO( 698 const struct rpcrdma_req *req 699 ), 700 701 TP_ARGS(req), 702 703 TP_STRUCT__entry( 704 __field(u32, cq_id) 705 __field(int, completion_id) 706 __field(unsigned int, task_id) 707 __field(unsigned int, client_id) 708 __field(int, num_sge) 709 __field(int, signaled) 710 ), 711 712 TP_fast_assign( 713 const struct rpc_rqst *rqst = &req->rl_slot; 714 const struct rpcrdma_sendctx *sc = req->rl_sendctx; 715 716 __entry->cq_id = sc->sc_cid.ci_queue_id; 717 __entry->completion_id = sc->sc_cid.ci_completion_id; 718 __entry->task_id = rqst->rq_task->tk_pid; 719 __entry->client_id = rqst->rq_task->tk_client ? 720 rqst->rq_task->tk_client->cl_clid : -1; 721 __entry->num_sge = req->rl_wr.num_sge; 722 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 723 ), 724 725 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", 726 __entry->task_id, __entry->client_id, 727 __entry->cq_id, __entry->completion_id, 728 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), 729 (__entry->signaled ? "signaled" : "") 730 ) 731 ); 732 733 TRACE_EVENT(xprtrdma_post_recv, 734 TP_PROTO( 735 const struct rpcrdma_rep *rep 736 ), 737 738 TP_ARGS(rep), 739 740 TP_STRUCT__entry( 741 __field(u32, cq_id) 742 __field(int, completion_id) 743 ), 744 745 TP_fast_assign( 746 __entry->cq_id = rep->rr_cid.ci_queue_id; 747 __entry->completion_id = rep->rr_cid.ci_completion_id; 748 ), 749 750 TP_printk("cq.id=%d cid=%d", 751 __entry->cq_id, __entry->completion_id 752 ) 753 ); 754 755 TRACE_EVENT(xprtrdma_post_recvs, 756 TP_PROTO( 757 const struct rpcrdma_xprt *r_xprt, 758 unsigned int count, 759 int status 760 ), 761 762 TP_ARGS(r_xprt, count, status), 763 764 TP_STRUCT__entry( 765 __field(const void *, r_xprt) 766 __field(unsigned int, count) 767 __field(int, status) 768 __field(int, posted) 769 __string(addr, rpcrdma_addrstr(r_xprt)) 770 __string(port, rpcrdma_portstr(r_xprt)) 771 ), 772 773 TP_fast_assign( 774 __entry->r_xprt = r_xprt; 775 __entry->count = count; 776 __entry->status = status; 777 __entry->posted = r_xprt->rx_ep->re_receive_count; 778 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 779 __assign_str(port, rpcrdma_portstr(r_xprt)); 780 ), 781 782 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", 783 __get_str(addr), __get_str(port), __entry->r_xprt, 784 __entry->count, __entry->posted, __entry->status 785 ) 786 ); 787 788 TRACE_EVENT(xprtrdma_post_linv_err, 789 TP_PROTO( 790 const struct rpcrdma_req *req, 791 int status 792 ), 793 794 TP_ARGS(req, status), 795 796 TP_STRUCT__entry( 797 __field(unsigned int, task_id) 798 __field(unsigned int, client_id) 799 __field(int, status) 800 ), 801 802 TP_fast_assign( 803 const struct rpc_task *task = req->rl_slot.rq_task; 804 805 __entry->task_id = task->tk_pid; 806 __entry->client_id = task->tk_client->cl_clid; 807 __entry->status = status; 808 ), 809 810 TP_printk("task:%u@%u status=%d", 811 __entry->task_id, __entry->client_id, __entry->status 812 ) 813 ); 814 815 /** 816 ** Completion events 817 **/ 818 819 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); 820 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); 821 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); 822 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); 823 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); 824 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); 825 826 TRACE_EVENT(xprtrdma_frwr_alloc, 827 TP_PROTO( 828 const struct rpcrdma_mr *mr, 829 int rc 830 ), 831 832 TP_ARGS(mr, rc), 833 834 TP_STRUCT__entry( 835 __field(u32, mr_id) 836 __field(int, rc) 837 ), 838 839 TP_fast_assign( 840 __entry->mr_id = mr->frwr.fr_mr->res.id; 841 __entry->rc = rc; 842 ), 843 844 TP_printk("mr.id=%u: rc=%d", 845 __entry->mr_id, __entry->rc 846 ) 847 ); 848 849 TRACE_EVENT(xprtrdma_frwr_dereg, 850 TP_PROTO( 851 const struct rpcrdma_mr *mr, 852 int rc 853 ), 854 855 TP_ARGS(mr, rc), 856 857 TP_STRUCT__entry( 858 __field(u32, mr_id) 859 __field(int, nents) 860 __field(u32, handle) 861 __field(u32, length) 862 __field(u64, offset) 863 __field(u32, dir) 864 __field(int, rc) 865 ), 866 867 TP_fast_assign( 868 __entry->mr_id = mr->frwr.fr_mr->res.id; 869 __entry->nents = mr->mr_nents; 870 __entry->handle = mr->mr_handle; 871 __entry->length = mr->mr_length; 872 __entry->offset = mr->mr_offset; 873 __entry->dir = mr->mr_dir; 874 __entry->rc = rc; 875 ), 876 877 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 878 __entry->mr_id, __entry->nents, __entry->length, 879 (unsigned long long)__entry->offset, __entry->handle, 880 xprtrdma_show_direction(__entry->dir), 881 __entry->rc 882 ) 883 ); 884 885 TRACE_EVENT(xprtrdma_frwr_sgerr, 886 TP_PROTO( 887 const struct rpcrdma_mr *mr, 888 int sg_nents 889 ), 890 891 TP_ARGS(mr, sg_nents), 892 893 TP_STRUCT__entry( 894 __field(u32, mr_id) 895 __field(u64, addr) 896 __field(u32, dir) 897 __field(int, nents) 898 ), 899 900 TP_fast_assign( 901 __entry->mr_id = mr->frwr.fr_mr->res.id; 902 __entry->addr = mr->mr_sg->dma_address; 903 __entry->dir = mr->mr_dir; 904 __entry->nents = sg_nents; 905 ), 906 907 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 908 __entry->mr_id, __entry->addr, 909 xprtrdma_show_direction(__entry->dir), 910 __entry->nents 911 ) 912 ); 913 914 TRACE_EVENT(xprtrdma_frwr_maperr, 915 TP_PROTO( 916 const struct rpcrdma_mr *mr, 917 int num_mapped 918 ), 919 920 TP_ARGS(mr, num_mapped), 921 922 TP_STRUCT__entry( 923 __field(u32, mr_id) 924 __field(u64, addr) 925 __field(u32, dir) 926 __field(int, num_mapped) 927 __field(int, nents) 928 ), 929 930 TP_fast_assign( 931 __entry->mr_id = mr->frwr.fr_mr->res.id; 932 __entry->addr = mr->mr_sg->dma_address; 933 __entry->dir = mr->mr_dir; 934 __entry->num_mapped = num_mapped; 935 __entry->nents = mr->mr_nents; 936 ), 937 938 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 939 __entry->mr_id, __entry->addr, 940 xprtrdma_show_direction(__entry->dir), 941 __entry->num_mapped, __entry->nents 942 ) 943 ); 944 945 DEFINE_MR_EVENT(localinv); 946 DEFINE_MR_EVENT(map); 947 DEFINE_MR_EVENT(unmap); 948 DEFINE_MR_EVENT(reminv); 949 DEFINE_MR_EVENT(recycle); 950 951 TRACE_EVENT(xprtrdma_dma_maperr, 952 TP_PROTO( 953 u64 addr 954 ), 955 956 TP_ARGS(addr), 957 958 TP_STRUCT__entry( 959 __field(u64, addr) 960 ), 961 962 TP_fast_assign( 963 __entry->addr = addr; 964 ), 965 966 TP_printk("dma addr=0x%llx\n", __entry->addr) 967 ); 968 969 /** 970 ** Reply events 971 **/ 972 973 TRACE_EVENT(xprtrdma_reply, 974 TP_PROTO( 975 const struct rpc_task *task, 976 const struct rpcrdma_rep *rep, 977 unsigned int credits 978 ), 979 980 TP_ARGS(task, rep, credits), 981 982 TP_STRUCT__entry( 983 __field(unsigned int, task_id) 984 __field(unsigned int, client_id) 985 __field(u32, xid) 986 __field(unsigned int, credits) 987 ), 988 989 TP_fast_assign( 990 __entry->task_id = task->tk_pid; 991 __entry->client_id = task->tk_client->cl_clid; 992 __entry->xid = be32_to_cpu(rep->rr_xid); 993 __entry->credits = credits; 994 ), 995 996 TP_printk("task:%u@%u xid=0x%08x credits=%u", 997 __entry->task_id, __entry->client_id, __entry->xid, 998 __entry->credits 999 ) 1000 ); 1001 1002 DEFINE_REPLY_EVENT(vers); 1003 DEFINE_REPLY_EVENT(rqst); 1004 DEFINE_REPLY_EVENT(short); 1005 DEFINE_REPLY_EVENT(hdr); 1006 1007 TRACE_EVENT(xprtrdma_err_vers, 1008 TP_PROTO( 1009 const struct rpc_rqst *rqst, 1010 __be32 *min, 1011 __be32 *max 1012 ), 1013 1014 TP_ARGS(rqst, min, max), 1015 1016 TP_STRUCT__entry( 1017 __field(unsigned int, task_id) 1018 __field(unsigned int, client_id) 1019 __field(u32, xid) 1020 __field(u32, min) 1021 __field(u32, max) 1022 ), 1023 1024 TP_fast_assign( 1025 __entry->task_id = rqst->rq_task->tk_pid; 1026 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1027 __entry->xid = be32_to_cpu(rqst->rq_xid); 1028 __entry->min = be32_to_cpup(min); 1029 __entry->max = be32_to_cpup(max); 1030 ), 1031 1032 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", 1033 __entry->task_id, __entry->client_id, __entry->xid, 1034 __entry->min, __entry->max 1035 ) 1036 ); 1037 1038 TRACE_EVENT(xprtrdma_err_chunk, 1039 TP_PROTO( 1040 const struct rpc_rqst *rqst 1041 ), 1042 1043 TP_ARGS(rqst), 1044 1045 TP_STRUCT__entry( 1046 __field(unsigned int, task_id) 1047 __field(unsigned int, client_id) 1048 __field(u32, xid) 1049 ), 1050 1051 TP_fast_assign( 1052 __entry->task_id = rqst->rq_task->tk_pid; 1053 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1054 __entry->xid = be32_to_cpu(rqst->rq_xid); 1055 ), 1056 1057 TP_printk("task:%u@%u xid=0x%08x", 1058 __entry->task_id, __entry->client_id, __entry->xid 1059 ) 1060 ); 1061 1062 TRACE_EVENT(xprtrdma_err_unrecognized, 1063 TP_PROTO( 1064 const struct rpc_rqst *rqst, 1065 __be32 *procedure 1066 ), 1067 1068 TP_ARGS(rqst, procedure), 1069 1070 TP_STRUCT__entry( 1071 __field(unsigned int, task_id) 1072 __field(unsigned int, client_id) 1073 __field(u32, xid) 1074 __field(u32, procedure) 1075 ), 1076 1077 TP_fast_assign( 1078 __entry->task_id = rqst->rq_task->tk_pid; 1079 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1080 __entry->procedure = be32_to_cpup(procedure); 1081 ), 1082 1083 TP_printk("task:%u@%u xid=0x%08x procedure=%u", 1084 __entry->task_id, __entry->client_id, __entry->xid, 1085 __entry->procedure 1086 ) 1087 ); 1088 1089 TRACE_EVENT(xprtrdma_fixup, 1090 TP_PROTO( 1091 const struct rpc_rqst *rqst, 1092 unsigned long fixup 1093 ), 1094 1095 TP_ARGS(rqst, fixup), 1096 1097 TP_STRUCT__entry( 1098 __field(unsigned int, task_id) 1099 __field(unsigned int, client_id) 1100 __field(unsigned long, fixup) 1101 __field(size_t, headlen) 1102 __field(unsigned int, pagelen) 1103 __field(size_t, taillen) 1104 ), 1105 1106 TP_fast_assign( 1107 __entry->task_id = rqst->rq_task->tk_pid; 1108 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1109 __entry->fixup = fixup; 1110 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; 1111 __entry->pagelen = rqst->rq_rcv_buf.page_len; 1112 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; 1113 ), 1114 1115 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", 1116 __entry->task_id, __entry->client_id, __entry->fixup, 1117 __entry->headlen, __entry->pagelen, __entry->taillen 1118 ) 1119 ); 1120 1121 TRACE_EVENT(xprtrdma_decode_seg, 1122 TP_PROTO( 1123 u32 handle, 1124 u32 length, 1125 u64 offset 1126 ), 1127 1128 TP_ARGS(handle, length, offset), 1129 1130 TP_STRUCT__entry( 1131 __field(u32, handle) 1132 __field(u32, length) 1133 __field(u64, offset) 1134 ), 1135 1136 TP_fast_assign( 1137 __entry->handle = handle; 1138 __entry->length = length; 1139 __entry->offset = offset; 1140 ), 1141 1142 TP_printk("%u@0x%016llx:0x%08x", 1143 __entry->length, (unsigned long long)__entry->offset, 1144 __entry->handle 1145 ) 1146 ); 1147 1148 /** 1149 ** Callback events 1150 **/ 1151 1152 TRACE_EVENT(xprtrdma_cb_setup, 1153 TP_PROTO( 1154 const struct rpcrdma_xprt *r_xprt, 1155 unsigned int reqs 1156 ), 1157 1158 TP_ARGS(r_xprt, reqs), 1159 1160 TP_STRUCT__entry( 1161 __field(const void *, r_xprt) 1162 __field(unsigned int, reqs) 1163 __string(addr, rpcrdma_addrstr(r_xprt)) 1164 __string(port, rpcrdma_portstr(r_xprt)) 1165 ), 1166 1167 TP_fast_assign( 1168 __entry->r_xprt = r_xprt; 1169 __entry->reqs = reqs; 1170 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 1171 __assign_str(port, rpcrdma_portstr(r_xprt)); 1172 ), 1173 1174 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", 1175 __get_str(addr), __get_str(port), 1176 __entry->r_xprt, __entry->reqs 1177 ) 1178 ); 1179 1180 DEFINE_CB_EVENT(xprtrdma_cb_call); 1181 DEFINE_CB_EVENT(xprtrdma_cb_reply); 1182 1183 /** 1184 ** Server-side RPC/RDMA events 1185 **/ 1186 1187 DECLARE_EVENT_CLASS(svcrdma_accept_class, 1188 TP_PROTO( 1189 const struct svcxprt_rdma *rdma, 1190 long status 1191 ), 1192 1193 TP_ARGS(rdma, status), 1194 1195 TP_STRUCT__entry( 1196 __field(long, status) 1197 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1198 ), 1199 1200 TP_fast_assign( 1201 __entry->status = status; 1202 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1203 ), 1204 1205 TP_printk("addr=%s status=%ld", 1206 __get_str(addr), __entry->status 1207 ) 1208 ); 1209 1210 #define DEFINE_ACCEPT_EVENT(name) \ 1211 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ 1212 TP_PROTO( \ 1213 const struct svcxprt_rdma *rdma, \ 1214 long status \ 1215 ), \ 1216 TP_ARGS(rdma, status)) 1217 1218 DEFINE_ACCEPT_EVENT(pd); 1219 DEFINE_ACCEPT_EVENT(qp); 1220 DEFINE_ACCEPT_EVENT(fabric); 1221 DEFINE_ACCEPT_EVENT(initdepth); 1222 DEFINE_ACCEPT_EVENT(accept); 1223 1224 TRACE_DEFINE_ENUM(RDMA_MSG); 1225 TRACE_DEFINE_ENUM(RDMA_NOMSG); 1226 TRACE_DEFINE_ENUM(RDMA_MSGP); 1227 TRACE_DEFINE_ENUM(RDMA_DONE); 1228 TRACE_DEFINE_ENUM(RDMA_ERROR); 1229 1230 #define show_rpcrdma_proc(x) \ 1231 __print_symbolic(x, \ 1232 { RDMA_MSG, "RDMA_MSG" }, \ 1233 { RDMA_NOMSG, "RDMA_NOMSG" }, \ 1234 { RDMA_MSGP, "RDMA_MSGP" }, \ 1235 { RDMA_DONE, "RDMA_DONE" }, \ 1236 { RDMA_ERROR, "RDMA_ERROR" }) 1237 1238 TRACE_EVENT(svcrdma_decode_rqst, 1239 TP_PROTO( 1240 const struct svc_rdma_recv_ctxt *ctxt, 1241 __be32 *p, 1242 unsigned int hdrlen 1243 ), 1244 1245 TP_ARGS(ctxt, p, hdrlen), 1246 1247 TP_STRUCT__entry( 1248 __field(u32, cq_id) 1249 __field(int, completion_id) 1250 __field(u32, xid) 1251 __field(u32, vers) 1252 __field(u32, proc) 1253 __field(u32, credits) 1254 __field(unsigned int, hdrlen) 1255 ), 1256 1257 TP_fast_assign( 1258 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1259 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1260 __entry->xid = be32_to_cpup(p++); 1261 __entry->vers = be32_to_cpup(p++); 1262 __entry->credits = be32_to_cpup(p++); 1263 __entry->proc = be32_to_cpup(p); 1264 __entry->hdrlen = hdrlen; 1265 ), 1266 1267 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", 1268 __entry->cq_id, __entry->completion_id, 1269 __entry->xid, __entry->vers, __entry->credits, 1270 show_rpcrdma_proc(__entry->proc), __entry->hdrlen) 1271 ); 1272 1273 TRACE_EVENT(svcrdma_decode_short_err, 1274 TP_PROTO( 1275 const struct svc_rdma_recv_ctxt *ctxt, 1276 unsigned int hdrlen 1277 ), 1278 1279 TP_ARGS(ctxt, hdrlen), 1280 1281 TP_STRUCT__entry( 1282 __field(u32, cq_id) 1283 __field(int, completion_id) 1284 __field(unsigned int, hdrlen) 1285 ), 1286 1287 TP_fast_assign( 1288 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1289 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1290 __entry->hdrlen = hdrlen; 1291 ), 1292 1293 TP_printk("cq.id=%u cid=%d hdrlen=%u", 1294 __entry->cq_id, __entry->completion_id, 1295 __entry->hdrlen) 1296 ); 1297 1298 DECLARE_EVENT_CLASS(svcrdma_badreq_event, 1299 TP_PROTO( 1300 const struct svc_rdma_recv_ctxt *ctxt, 1301 __be32 *p 1302 ), 1303 1304 TP_ARGS(ctxt, p), 1305 1306 TP_STRUCT__entry( 1307 __field(u32, cq_id) 1308 __field(int, completion_id) 1309 __field(u32, xid) 1310 __field(u32, vers) 1311 __field(u32, proc) 1312 __field(u32, credits) 1313 ), 1314 1315 TP_fast_assign( 1316 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1317 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1318 __entry->xid = be32_to_cpup(p++); 1319 __entry->vers = be32_to_cpup(p++); 1320 __entry->credits = be32_to_cpup(p++); 1321 __entry->proc = be32_to_cpup(p); 1322 ), 1323 1324 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", 1325 __entry->cq_id, __entry->completion_id, 1326 __entry->xid, __entry->vers, __entry->credits, __entry->proc) 1327 ); 1328 1329 #define DEFINE_BADREQ_EVENT(name) \ 1330 DEFINE_EVENT(svcrdma_badreq_event, \ 1331 svcrdma_decode_##name##_err, \ 1332 TP_PROTO( \ 1333 const struct svc_rdma_recv_ctxt *ctxt, \ 1334 __be32 *p \ 1335 ), \ 1336 TP_ARGS(ctxt, p)) 1337 1338 DEFINE_BADREQ_EVENT(badvers); 1339 DEFINE_BADREQ_EVENT(drop); 1340 DEFINE_BADREQ_EVENT(badproc); 1341 DEFINE_BADREQ_EVENT(parse); 1342 1343 DECLARE_EVENT_CLASS(svcrdma_segment_event, 1344 TP_PROTO( 1345 u32 handle, 1346 u32 length, 1347 u64 offset 1348 ), 1349 1350 TP_ARGS(handle, length, offset), 1351 1352 TP_STRUCT__entry( 1353 __field(u32, handle) 1354 __field(u32, length) 1355 __field(u64, offset) 1356 ), 1357 1358 TP_fast_assign( 1359 __entry->handle = handle; 1360 __entry->length = length; 1361 __entry->offset = offset; 1362 ), 1363 1364 TP_printk("%u@0x%016llx:0x%08x", 1365 __entry->length, (unsigned long long)__entry->offset, 1366 __entry->handle 1367 ) 1368 ); 1369 1370 #define DEFINE_SEGMENT_EVENT(name) \ 1371 DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ 1372 TP_PROTO( \ 1373 u32 handle, \ 1374 u32 length, \ 1375 u64 offset \ 1376 ), \ 1377 TP_ARGS(handle, length, offset)) 1378 1379 DEFINE_SEGMENT_EVENT(decode_wseg); 1380 DEFINE_SEGMENT_EVENT(encode_rseg); 1381 DEFINE_SEGMENT_EVENT(send_rseg); 1382 DEFINE_SEGMENT_EVENT(encode_wseg); 1383 DEFINE_SEGMENT_EVENT(send_wseg); 1384 1385 DECLARE_EVENT_CLASS(svcrdma_chunk_event, 1386 TP_PROTO( 1387 u32 length 1388 ), 1389 1390 TP_ARGS(length), 1391 1392 TP_STRUCT__entry( 1393 __field(u32, length) 1394 ), 1395 1396 TP_fast_assign( 1397 __entry->length = length; 1398 ), 1399 1400 TP_printk("length=%u", 1401 __entry->length 1402 ) 1403 ); 1404 1405 #define DEFINE_CHUNK_EVENT(name) \ 1406 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \ 1407 TP_PROTO( \ 1408 u32 length \ 1409 ), \ 1410 TP_ARGS(length)) 1411 1412 DEFINE_CHUNK_EVENT(send_pzr); 1413 DEFINE_CHUNK_EVENT(encode_write_chunk); 1414 DEFINE_CHUNK_EVENT(send_write_chunk); 1415 DEFINE_CHUNK_EVENT(encode_read_chunk); 1416 DEFINE_CHUNK_EVENT(send_reply_chunk); 1417 1418 TRACE_EVENT(svcrdma_send_read_chunk, 1419 TP_PROTO( 1420 u32 length, 1421 u32 position 1422 ), 1423 1424 TP_ARGS(length, position), 1425 1426 TP_STRUCT__entry( 1427 __field(u32, length) 1428 __field(u32, position) 1429 ), 1430 1431 TP_fast_assign( 1432 __entry->length = length; 1433 __entry->position = position; 1434 ), 1435 1436 TP_printk("length=%u position=%u", 1437 __entry->length, __entry->position 1438 ) 1439 ); 1440 1441 DECLARE_EVENT_CLASS(svcrdma_error_event, 1442 TP_PROTO( 1443 __be32 xid 1444 ), 1445 1446 TP_ARGS(xid), 1447 1448 TP_STRUCT__entry( 1449 __field(u32, xid) 1450 ), 1451 1452 TP_fast_assign( 1453 __entry->xid = be32_to_cpu(xid); 1454 ), 1455 1456 TP_printk("xid=0x%08x", 1457 __entry->xid 1458 ) 1459 ); 1460 1461 #define DEFINE_ERROR_EVENT(name) \ 1462 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \ 1463 TP_PROTO( \ 1464 __be32 xid \ 1465 ), \ 1466 TP_ARGS(xid)) 1467 1468 DEFINE_ERROR_EVENT(vers); 1469 DEFINE_ERROR_EVENT(chunk); 1470 1471 /** 1472 ** Server-side RDMA API events 1473 **/ 1474 1475 DECLARE_EVENT_CLASS(svcrdma_dma_map_class, 1476 TP_PROTO( 1477 const struct svcxprt_rdma *rdma, 1478 u64 dma_addr, 1479 u32 length 1480 ), 1481 1482 TP_ARGS(rdma, dma_addr, length), 1483 1484 TP_STRUCT__entry( 1485 __field(u64, dma_addr) 1486 __field(u32, length) 1487 __string(device, rdma->sc_cm_id->device->name) 1488 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1489 ), 1490 1491 TP_fast_assign( 1492 __entry->dma_addr = dma_addr; 1493 __entry->length = length; 1494 __assign_str(device, rdma->sc_cm_id->device->name); 1495 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1496 ), 1497 1498 TP_printk("addr=%s device=%s dma_addr=%llu length=%u", 1499 __get_str(addr), __get_str(device), 1500 __entry->dma_addr, __entry->length 1501 ) 1502 ); 1503 1504 #define DEFINE_SVC_DMA_EVENT(name) \ 1505 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ 1506 TP_PROTO( \ 1507 const struct svcxprt_rdma *rdma,\ 1508 u64 dma_addr, \ 1509 u32 length \ 1510 ), \ 1511 TP_ARGS(rdma, dma_addr, length)) 1512 1513 DEFINE_SVC_DMA_EVENT(dma_map_page); 1514 DEFINE_SVC_DMA_EVENT(dma_unmap_page); 1515 1516 TRACE_EVENT(svcrdma_dma_map_rw_err, 1517 TP_PROTO( 1518 const struct svcxprt_rdma *rdma, 1519 unsigned int nents, 1520 int status 1521 ), 1522 1523 TP_ARGS(rdma, nents, status), 1524 1525 TP_STRUCT__entry( 1526 __field(int, status) 1527 __field(unsigned int, nents) 1528 __string(device, rdma->sc_cm_id->device->name) 1529 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1530 ), 1531 1532 TP_fast_assign( 1533 __entry->status = status; 1534 __entry->nents = nents; 1535 __assign_str(device, rdma->sc_cm_id->device->name); 1536 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1537 ), 1538 1539 TP_printk("addr=%s device=%s nents=%u status=%d", 1540 __get_str(addr), __get_str(device), __entry->nents, 1541 __entry->status 1542 ) 1543 ); 1544 1545 TRACE_EVENT(svcrdma_no_rwctx_err, 1546 TP_PROTO( 1547 const struct svcxprt_rdma *rdma, 1548 unsigned int num_sges 1549 ), 1550 1551 TP_ARGS(rdma, num_sges), 1552 1553 TP_STRUCT__entry( 1554 __field(unsigned int, num_sges) 1555 __string(device, rdma->sc_cm_id->device->name) 1556 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1557 ), 1558 1559 TP_fast_assign( 1560 __entry->num_sges = num_sges; 1561 __assign_str(device, rdma->sc_cm_id->device->name); 1562 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1563 ), 1564 1565 TP_printk("addr=%s device=%s num_sges=%d", 1566 __get_str(addr), __get_str(device), __entry->num_sges 1567 ) 1568 ); 1569 1570 TRACE_EVENT(svcrdma_page_overrun_err, 1571 TP_PROTO( 1572 const struct svcxprt_rdma *rdma, 1573 const struct svc_rqst *rqst, 1574 unsigned int pageno 1575 ), 1576 1577 TP_ARGS(rdma, rqst, pageno), 1578 1579 TP_STRUCT__entry( 1580 __field(unsigned int, pageno) 1581 __field(u32, xid) 1582 __string(device, rdma->sc_cm_id->device->name) 1583 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1584 ), 1585 1586 TP_fast_assign( 1587 __entry->pageno = pageno; 1588 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1589 __assign_str(device, rdma->sc_cm_id->device->name); 1590 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1591 ), 1592 1593 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), 1594 __get_str(device), __entry->xid, __entry->pageno 1595 ) 1596 ); 1597 1598 TRACE_EVENT(svcrdma_small_wrch_err, 1599 TP_PROTO( 1600 const struct svcxprt_rdma *rdma, 1601 unsigned int remaining, 1602 unsigned int seg_no, 1603 unsigned int num_segs 1604 ), 1605 1606 TP_ARGS(rdma, remaining, seg_no, num_segs), 1607 1608 TP_STRUCT__entry( 1609 __field(unsigned int, remaining) 1610 __field(unsigned int, seg_no) 1611 __field(unsigned int, num_segs) 1612 __string(device, rdma->sc_cm_id->device->name) 1613 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1614 ), 1615 1616 TP_fast_assign( 1617 __entry->remaining = remaining; 1618 __entry->seg_no = seg_no; 1619 __entry->num_segs = num_segs; 1620 __assign_str(device, rdma->sc_cm_id->device->name); 1621 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1622 ), 1623 1624 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", 1625 __get_str(addr), __get_str(device), __entry->remaining, 1626 __entry->seg_no, __entry->num_segs 1627 ) 1628 ); 1629 1630 TRACE_EVENT(svcrdma_send_pullup, 1631 TP_PROTO( 1632 unsigned int len 1633 ), 1634 1635 TP_ARGS(len), 1636 1637 TP_STRUCT__entry( 1638 __field(unsigned int, len) 1639 ), 1640 1641 TP_fast_assign( 1642 __entry->len = len; 1643 ), 1644 1645 TP_printk("len=%u", __entry->len) 1646 ); 1647 1648 TRACE_EVENT(svcrdma_send_err, 1649 TP_PROTO( 1650 const struct svc_rqst *rqst, 1651 int status 1652 ), 1653 1654 TP_ARGS(rqst, status), 1655 1656 TP_STRUCT__entry( 1657 __field(int, status) 1658 __field(u32, xid) 1659 __string(addr, rqst->rq_xprt->xpt_remotebuf) 1660 ), 1661 1662 TP_fast_assign( 1663 __entry->status = status; 1664 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1665 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); 1666 ), 1667 1668 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), 1669 __entry->xid, __entry->status 1670 ) 1671 ); 1672 1673 TRACE_EVENT(svcrdma_post_send, 1674 TP_PROTO( 1675 const struct svc_rdma_send_ctxt *ctxt 1676 ), 1677 1678 TP_ARGS(ctxt), 1679 1680 TP_STRUCT__entry( 1681 __field(u32, cq_id) 1682 __field(int, completion_id) 1683 __field(unsigned int, num_sge) 1684 __field(u32, inv_rkey) 1685 ), 1686 1687 TP_fast_assign( 1688 const struct ib_send_wr *wr = &ctxt->sc_send_wr; 1689 1690 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1691 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1692 __entry->num_sge = wr->num_sge; 1693 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? 1694 wr->ex.invalidate_rkey : 0; 1695 ), 1696 1697 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", 1698 __entry->cq_id, __entry->completion_id, 1699 __entry->num_sge, __entry->inv_rkey 1700 ) 1701 ); 1702 1703 DEFINE_COMPLETION_EVENT(svcrdma_wc_send); 1704 1705 TRACE_EVENT(svcrdma_post_recv, 1706 TP_PROTO( 1707 const struct svc_rdma_recv_ctxt *ctxt 1708 ), 1709 1710 TP_ARGS(ctxt), 1711 1712 TP_STRUCT__entry( 1713 __field(u32, cq_id) 1714 __field(int, completion_id) 1715 ), 1716 1717 TP_fast_assign( 1718 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1719 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1720 ), 1721 1722 TP_printk("cq.id=%d cid=%d", 1723 __entry->cq_id, __entry->completion_id 1724 ) 1725 ); 1726 1727 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); 1728 1729 TRACE_EVENT(svcrdma_rq_post_err, 1730 TP_PROTO( 1731 const struct svcxprt_rdma *rdma, 1732 int status 1733 ), 1734 1735 TP_ARGS(rdma, status), 1736 1737 TP_STRUCT__entry( 1738 __field(int, status) 1739 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1740 ), 1741 1742 TP_fast_assign( 1743 __entry->status = status; 1744 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1745 ), 1746 1747 TP_printk("addr=%s status=%d", 1748 __get_str(addr), __entry->status 1749 ) 1750 ); 1751 1752 TRACE_EVENT(svcrdma_post_chunk, 1753 TP_PROTO( 1754 const struct rpc_rdma_cid *cid, 1755 int sqecount 1756 ), 1757 1758 TP_ARGS(cid, sqecount), 1759 1760 TP_STRUCT__entry( 1761 __field(u32, cq_id) 1762 __field(int, completion_id) 1763 __field(int, sqecount) 1764 ), 1765 1766 TP_fast_assign( 1767 __entry->cq_id = cid->ci_queue_id; 1768 __entry->completion_id = cid->ci_completion_id; 1769 __entry->sqecount = sqecount; 1770 ), 1771 1772 TP_printk("cq.id=%u cid=%d sqecount=%d", 1773 __entry->cq_id, __entry->completion_id, 1774 __entry->sqecount 1775 ) 1776 ); 1777 1778 DEFINE_COMPLETION_EVENT(svcrdma_wc_read); 1779 DEFINE_COMPLETION_EVENT(svcrdma_wc_write); 1780 1781 TRACE_EVENT(svcrdma_qp_error, 1782 TP_PROTO( 1783 const struct ib_event *event, 1784 const struct sockaddr *sap 1785 ), 1786 1787 TP_ARGS(event, sap), 1788 1789 TP_STRUCT__entry( 1790 __field(unsigned int, event) 1791 __string(device, event->device->name) 1792 __array(__u8, addr, INET6_ADDRSTRLEN + 10) 1793 ), 1794 1795 TP_fast_assign( 1796 __entry->event = event->event; 1797 __assign_str(device, event->device->name); 1798 snprintf(__entry->addr, sizeof(__entry->addr) - 1, 1799 "%pISpc", sap); 1800 ), 1801 1802 TP_printk("addr=%s dev=%s event=%s (%u)", 1803 __entry->addr, __get_str(device), 1804 rdma_show_ib_event(__entry->event), __entry->event 1805 ) 1806 ); 1807 1808 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, 1809 TP_PROTO( 1810 const struct svcxprt_rdma *rdma 1811 ), 1812 1813 TP_ARGS(rdma), 1814 1815 TP_STRUCT__entry( 1816 __field(int, avail) 1817 __field(int, depth) 1818 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1819 ), 1820 1821 TP_fast_assign( 1822 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1823 __entry->depth = rdma->sc_sq_depth; 1824 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1825 ), 1826 1827 TP_printk("addr=%s sc_sq_avail=%d/%d", 1828 __get_str(addr), __entry->avail, __entry->depth 1829 ) 1830 ); 1831 1832 #define DEFINE_SQ_EVENT(name) \ 1833 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ 1834 TP_PROTO( \ 1835 const struct svcxprt_rdma *rdma \ 1836 ), \ 1837 TP_ARGS(rdma)) 1838 1839 DEFINE_SQ_EVENT(full); 1840 DEFINE_SQ_EVENT(retry); 1841 1842 TRACE_EVENT(svcrdma_sq_post_err, 1843 TP_PROTO( 1844 const struct svcxprt_rdma *rdma, 1845 int status 1846 ), 1847 1848 TP_ARGS(rdma, status), 1849 1850 TP_STRUCT__entry( 1851 __field(int, avail) 1852 __field(int, depth) 1853 __field(int, status) 1854 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1855 ), 1856 1857 TP_fast_assign( 1858 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1859 __entry->depth = rdma->sc_sq_depth; 1860 __entry->status = status; 1861 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1862 ), 1863 1864 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", 1865 __get_str(addr), __entry->avail, __entry->depth, 1866 __entry->status 1867 ) 1868 ); 1869 1870 #endif /* _TRACE_RPCRDMA_H */ 1871 1872 #include <trace/define_trace.h> 1873