1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017, 2018 Oracle. All rights reserved. 4 * 5 * Trace point definitions for the "rpcrdma" subsystem. 6 */ 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM rpcrdma 9 10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_RPCRDMA_H 12 13 #include <linux/scatterlist.h> 14 #include <linux/sunrpc/rpc_rdma_cid.h> 15 #include <linux/tracepoint.h> 16 #include <rdma/ib_cm.h> 17 #include <trace/events/rdma.h> 18 19 /** 20 ** Event classes 21 **/ 22 23 DECLARE_EVENT_CLASS(rpcrdma_completion_class, 24 TP_PROTO( 25 const struct ib_wc *wc, 26 const struct rpc_rdma_cid *cid 27 ), 28 29 TP_ARGS(wc, cid), 30 31 TP_STRUCT__entry( 32 __field(u32, cq_id) 33 __field(int, completion_id) 34 __field(unsigned long, status) 35 __field(unsigned int, vendor_err) 36 ), 37 38 TP_fast_assign( 39 __entry->cq_id = cid->ci_queue_id; 40 __entry->completion_id = cid->ci_completion_id; 41 __entry->status = wc->status; 42 if (wc->status) 43 __entry->vendor_err = wc->vendor_err; 44 else 45 __entry->vendor_err = 0; 46 ), 47 48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", 49 __entry->cq_id, __entry->completion_id, 50 rdma_show_wc_status(__entry->status), 51 __entry->status, __entry->vendor_err 52 ) 53 ); 54 55 #define DEFINE_COMPLETION_EVENT(name) \ 56 DEFINE_EVENT(rpcrdma_completion_class, name, \ 57 TP_PROTO( \ 58 const struct ib_wc *wc, \ 59 const struct rpc_rdma_cid *cid \ 60 ), \ 61 TP_ARGS(wc, cid)) 62 63 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, 64 TP_PROTO( 65 const struct ib_wc *wc, 66 const struct rpc_rdma_cid *cid 67 ), 68 69 TP_ARGS(wc, cid), 70 71 TP_STRUCT__entry( 72 __field(u32, cq_id) 73 __field(int, completion_id) 74 __field(u32, received) 75 __field(unsigned long, status) 76 __field(unsigned int, vendor_err) 77 ), 78 79 TP_fast_assign( 80 __entry->cq_id = cid->ci_queue_id; 81 __entry->completion_id = cid->ci_completion_id; 82 __entry->status = wc->status; 83 if (wc->status) { 84 __entry->received = 0; 85 __entry->vendor_err = wc->vendor_err; 86 } else { 87 __entry->received = wc->byte_len; 88 __entry->vendor_err = 0; 89 } 90 ), 91 92 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u", 93 __entry->cq_id, __entry->completion_id, 94 rdma_show_wc_status(__entry->status), 95 __entry->status, __entry->vendor_err, 96 __entry->received 97 ) 98 ); 99 100 #define DEFINE_RECEIVE_COMPLETION_EVENT(name) \ 101 DEFINE_EVENT(rpcrdma_receive_completion_class, name, \ 102 TP_PROTO( \ 103 const struct ib_wc *wc, \ 104 const struct rpc_rdma_cid *cid \ 105 ), \ 106 TP_ARGS(wc, cid)) 107 108 DECLARE_EVENT_CLASS(xprtrdma_reply_class, 109 TP_PROTO( 110 const struct rpcrdma_rep *rep 111 ), 112 113 TP_ARGS(rep), 114 115 TP_STRUCT__entry( 116 __field(u32, xid) 117 __field(u32, version) 118 __field(u32, proc) 119 __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) 120 __string(port, rpcrdma_portstr(rep->rr_rxprt)) 121 ), 122 123 TP_fast_assign( 124 __entry->xid = be32_to_cpu(rep->rr_xid); 125 __entry->version = be32_to_cpu(rep->rr_vers); 126 __entry->proc = be32_to_cpu(rep->rr_proc); 127 __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); 128 __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); 129 ), 130 131 TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", 132 __get_str(addr), __get_str(port), 133 __entry->xid, __entry->version, __entry->proc 134 ) 135 ); 136 137 #define DEFINE_REPLY_EVENT(name) \ 138 DEFINE_EVENT(xprtrdma_reply_class, \ 139 xprtrdma_reply_##name##_err, \ 140 TP_PROTO( \ 141 const struct rpcrdma_rep *rep \ 142 ), \ 143 TP_ARGS(rep)) 144 145 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 146 TP_PROTO( 147 const struct rpcrdma_xprt *r_xprt 148 ), 149 150 TP_ARGS(r_xprt), 151 152 TP_STRUCT__entry( 153 __field(const void *, r_xprt) 154 __string(addr, rpcrdma_addrstr(r_xprt)) 155 __string(port, rpcrdma_portstr(r_xprt)) 156 ), 157 158 TP_fast_assign( 159 __entry->r_xprt = r_xprt; 160 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 161 __assign_str(port, rpcrdma_portstr(r_xprt)); 162 ), 163 164 TP_printk("peer=[%s]:%s r_xprt=%p", 165 __get_str(addr), __get_str(port), __entry->r_xprt 166 ) 167 ); 168 169 #define DEFINE_RXPRT_EVENT(name) \ 170 DEFINE_EVENT(xprtrdma_rxprt, name, \ 171 TP_PROTO( \ 172 const struct rpcrdma_xprt *r_xprt \ 173 ), \ 174 TP_ARGS(r_xprt)) 175 176 DECLARE_EVENT_CLASS(xprtrdma_connect_class, 177 TP_PROTO( 178 const struct rpcrdma_xprt *r_xprt, 179 int rc 180 ), 181 182 TP_ARGS(r_xprt, rc), 183 184 TP_STRUCT__entry( 185 __field(const void *, r_xprt) 186 __field(int, rc) 187 __field(int, connect_status) 188 __string(addr, rpcrdma_addrstr(r_xprt)) 189 __string(port, rpcrdma_portstr(r_xprt)) 190 ), 191 192 TP_fast_assign( 193 __entry->r_xprt = r_xprt; 194 __entry->rc = rc; 195 __entry->connect_status = r_xprt->rx_ep->re_connect_status; 196 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 197 __assign_str(port, rpcrdma_portstr(r_xprt)); 198 ), 199 200 TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", 201 __get_str(addr), __get_str(port), __entry->r_xprt, 202 __entry->rc, __entry->connect_status 203 ) 204 ); 205 206 #define DEFINE_CONN_EVENT(name) \ 207 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ 208 TP_PROTO( \ 209 const struct rpcrdma_xprt *r_xprt, \ 210 int rc \ 211 ), \ 212 TP_ARGS(r_xprt, rc)) 213 214 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 215 TP_PROTO( 216 const struct rpc_task *task, 217 unsigned int pos, 218 struct rpcrdma_mr *mr, 219 int nsegs 220 ), 221 222 TP_ARGS(task, pos, mr, nsegs), 223 224 TP_STRUCT__entry( 225 __field(unsigned int, task_id) 226 __field(unsigned int, client_id) 227 __field(unsigned int, pos) 228 __field(int, nents) 229 __field(u32, handle) 230 __field(u32, length) 231 __field(u64, offset) 232 __field(int, nsegs) 233 ), 234 235 TP_fast_assign( 236 __entry->task_id = task->tk_pid; 237 __entry->client_id = task->tk_client->cl_clid; 238 __entry->pos = pos; 239 __entry->nents = mr->mr_nents; 240 __entry->handle = mr->mr_handle; 241 __entry->length = mr->mr_length; 242 __entry->offset = mr->mr_offset; 243 __entry->nsegs = nsegs; 244 ), 245 246 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", 247 __entry->task_id, __entry->client_id, 248 __entry->pos, __entry->length, 249 (unsigned long long)__entry->offset, __entry->handle, 250 __entry->nents < __entry->nsegs ? "more" : "last" 251 ) 252 ); 253 254 #define DEFINE_RDCH_EVENT(name) \ 255 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ 256 TP_PROTO( \ 257 const struct rpc_task *task, \ 258 unsigned int pos, \ 259 struct rpcrdma_mr *mr, \ 260 int nsegs \ 261 ), \ 262 TP_ARGS(task, pos, mr, nsegs)) 263 264 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 265 TP_PROTO( 266 const struct rpc_task *task, 267 struct rpcrdma_mr *mr, 268 int nsegs 269 ), 270 271 TP_ARGS(task, mr, nsegs), 272 273 TP_STRUCT__entry( 274 __field(unsigned int, task_id) 275 __field(unsigned int, client_id) 276 __field(int, nents) 277 __field(u32, handle) 278 __field(u32, length) 279 __field(u64, offset) 280 __field(int, nsegs) 281 ), 282 283 TP_fast_assign( 284 __entry->task_id = task->tk_pid; 285 __entry->client_id = task->tk_client->cl_clid; 286 __entry->nents = mr->mr_nents; 287 __entry->handle = mr->mr_handle; 288 __entry->length = mr->mr_length; 289 __entry->offset = mr->mr_offset; 290 __entry->nsegs = nsegs; 291 ), 292 293 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", 294 __entry->task_id, __entry->client_id, 295 __entry->length, (unsigned long long)__entry->offset, 296 __entry->handle, 297 __entry->nents < __entry->nsegs ? "more" : "last" 298 ) 299 ); 300 301 #define DEFINE_WRCH_EVENT(name) \ 302 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ 303 TP_PROTO( \ 304 const struct rpc_task *task, \ 305 struct rpcrdma_mr *mr, \ 306 int nsegs \ 307 ), \ 308 TP_ARGS(task, mr, nsegs)) 309 310 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); 311 TRACE_DEFINE_ENUM(DMA_TO_DEVICE); 312 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); 313 TRACE_DEFINE_ENUM(DMA_NONE); 314 315 #define xprtrdma_show_direction(x) \ 316 __print_symbolic(x, \ 317 { DMA_BIDIRECTIONAL, "BIDIR" }, \ 318 { DMA_TO_DEVICE, "TO_DEVICE" }, \ 319 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ 320 { DMA_NONE, "NONE" }) 321 322 DECLARE_EVENT_CLASS(xprtrdma_mr_class, 323 TP_PROTO( 324 const struct rpcrdma_mr *mr 325 ), 326 327 TP_ARGS(mr), 328 329 TP_STRUCT__entry( 330 __field(unsigned int, task_id) 331 __field(unsigned int, client_id) 332 __field(u32, mr_id) 333 __field(int, nents) 334 __field(u32, handle) 335 __field(u32, length) 336 __field(u64, offset) 337 __field(u32, dir) 338 ), 339 340 TP_fast_assign( 341 const struct rpcrdma_req *req = mr->mr_req; 342 const struct rpc_task *task = req->rl_slot.rq_task; 343 344 __entry->task_id = task->tk_pid; 345 __entry->client_id = task->tk_client->cl_clid; 346 __entry->mr_id = mr->frwr.fr_mr->res.id; 347 __entry->nents = mr->mr_nents; 348 __entry->handle = mr->mr_handle; 349 __entry->length = mr->mr_length; 350 __entry->offset = mr->mr_offset; 351 __entry->dir = mr->mr_dir; 352 ), 353 354 TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 355 __entry->task_id, __entry->client_id, 356 __entry->mr_id, __entry->nents, __entry->length, 357 (unsigned long long)__entry->offset, __entry->handle, 358 xprtrdma_show_direction(__entry->dir) 359 ) 360 ); 361 362 #define DEFINE_MR_EVENT(name) \ 363 DEFINE_EVENT(xprtrdma_mr_class, \ 364 xprtrdma_mr_##name, \ 365 TP_PROTO( \ 366 const struct rpcrdma_mr *mr \ 367 ), \ 368 TP_ARGS(mr)) 369 370 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class, 371 TP_PROTO( 372 const struct rpcrdma_mr *mr 373 ), 374 375 TP_ARGS(mr), 376 377 TP_STRUCT__entry( 378 __field(u32, mr_id) 379 __field(int, nents) 380 __field(u32, handle) 381 __field(u32, length) 382 __field(u64, offset) 383 __field(u32, dir) 384 ), 385 386 TP_fast_assign( 387 __entry->mr_id = mr->frwr.fr_mr->res.id; 388 __entry->nents = mr->mr_nents; 389 __entry->handle = mr->mr_handle; 390 __entry->length = mr->mr_length; 391 __entry->offset = mr->mr_offset; 392 __entry->dir = mr->mr_dir; 393 ), 394 395 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 396 __entry->mr_id, __entry->nents, __entry->length, 397 (unsigned long long)__entry->offset, __entry->handle, 398 xprtrdma_show_direction(__entry->dir) 399 ) 400 ); 401 402 #define DEFINE_ANON_MR_EVENT(name) \ 403 DEFINE_EVENT(xprtrdma_anonymous_mr_class, \ 404 xprtrdma_mr_##name, \ 405 TP_PROTO( \ 406 const struct rpcrdma_mr *mr \ 407 ), \ 408 TP_ARGS(mr)) 409 410 DECLARE_EVENT_CLASS(xprtrdma_callback_class, 411 TP_PROTO( 412 const struct rpcrdma_xprt *r_xprt, 413 const struct rpc_rqst *rqst 414 ), 415 416 TP_ARGS(r_xprt, rqst), 417 418 TP_STRUCT__entry( 419 __field(u32, xid) 420 __string(addr, rpcrdma_addrstr(r_xprt)) 421 __string(port, rpcrdma_portstr(r_xprt)) 422 ), 423 424 TP_fast_assign( 425 __entry->xid = be32_to_cpu(rqst->rq_xid); 426 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 427 __assign_str(port, rpcrdma_portstr(r_xprt)); 428 ), 429 430 TP_printk("peer=[%s]:%s xid=0x%08x", 431 __get_str(addr), __get_str(port), __entry->xid 432 ) 433 ); 434 435 #define DEFINE_CALLBACK_EVENT(name) \ 436 DEFINE_EVENT(xprtrdma_callback_class, \ 437 xprtrdma_cb_##name, \ 438 TP_PROTO( \ 439 const struct rpcrdma_xprt *r_xprt, \ 440 const struct rpc_rqst *rqst \ 441 ), \ 442 TP_ARGS(r_xprt, rqst)) 443 444 /** 445 ** Connection events 446 **/ 447 448 TRACE_EVENT(xprtrdma_inline_thresh, 449 TP_PROTO( 450 const struct rpcrdma_ep *ep 451 ), 452 453 TP_ARGS(ep), 454 455 TP_STRUCT__entry( 456 __field(unsigned int, inline_send) 457 __field(unsigned int, inline_recv) 458 __field(unsigned int, max_send) 459 __field(unsigned int, max_recv) 460 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 461 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 462 ), 463 464 TP_fast_assign( 465 const struct rdma_cm_id *id = ep->re_id; 466 467 __entry->inline_send = ep->re_inline_send; 468 __entry->inline_recv = ep->re_inline_recv; 469 __entry->max_send = ep->re_max_inline_send; 470 __entry->max_recv = ep->re_max_inline_recv; 471 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 472 sizeof(struct sockaddr_in6)); 473 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 474 sizeof(struct sockaddr_in6)); 475 ), 476 477 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 478 __entry->srcaddr, __entry->dstaddr, 479 __entry->inline_send, __entry->inline_recv, 480 __entry->max_send, __entry->max_recv 481 ) 482 ); 483 484 DEFINE_CONN_EVENT(connect); 485 DEFINE_CONN_EVENT(disconnect); 486 487 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 488 489 TRACE_EVENT(xprtrdma_op_connect, 490 TP_PROTO( 491 const struct rpcrdma_xprt *r_xprt, 492 unsigned long delay 493 ), 494 495 TP_ARGS(r_xprt, delay), 496 497 TP_STRUCT__entry( 498 __field(const void *, r_xprt) 499 __field(unsigned long, delay) 500 __string(addr, rpcrdma_addrstr(r_xprt)) 501 __string(port, rpcrdma_portstr(r_xprt)) 502 ), 503 504 TP_fast_assign( 505 __entry->r_xprt = r_xprt; 506 __entry->delay = delay; 507 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 508 __assign_str(port, rpcrdma_portstr(r_xprt)); 509 ), 510 511 TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", 512 __get_str(addr), __get_str(port), __entry->r_xprt, 513 __entry->delay 514 ) 515 ); 516 517 518 TRACE_EVENT(xprtrdma_op_set_cto, 519 TP_PROTO( 520 const struct rpcrdma_xprt *r_xprt, 521 unsigned long connect, 522 unsigned long reconnect 523 ), 524 525 TP_ARGS(r_xprt, connect, reconnect), 526 527 TP_STRUCT__entry( 528 __field(const void *, r_xprt) 529 __field(unsigned long, connect) 530 __field(unsigned long, reconnect) 531 __string(addr, rpcrdma_addrstr(r_xprt)) 532 __string(port, rpcrdma_portstr(r_xprt)) 533 ), 534 535 TP_fast_assign( 536 __entry->r_xprt = r_xprt; 537 __entry->connect = connect; 538 __entry->reconnect = reconnect; 539 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 540 __assign_str(port, rpcrdma_portstr(r_xprt)); 541 ), 542 543 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", 544 __get_str(addr), __get_str(port), __entry->r_xprt, 545 __entry->connect / HZ, __entry->reconnect / HZ 546 ) 547 ); 548 549 TRACE_EVENT(xprtrdma_qp_event, 550 TP_PROTO( 551 const struct rpcrdma_ep *ep, 552 const struct ib_event *event 553 ), 554 555 TP_ARGS(ep, event), 556 557 TP_STRUCT__entry( 558 __field(unsigned long, event) 559 __string(name, event->device->name) 560 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 561 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 562 ), 563 564 TP_fast_assign( 565 const struct rdma_cm_id *id = ep->re_id; 566 567 __entry->event = event->event; 568 __assign_str(name, event->device->name); 569 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 570 sizeof(struct sockaddr_in6)); 571 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 572 sizeof(struct sockaddr_in6)); 573 ), 574 575 TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", 576 __entry->srcaddr, __entry->dstaddr, __get_str(name), 577 rdma_show_ib_event(__entry->event), __entry->event 578 ) 579 ); 580 581 /** 582 ** Call events 583 **/ 584 585 TRACE_EVENT(xprtrdma_createmrs, 586 TP_PROTO( 587 const struct rpcrdma_xprt *r_xprt, 588 unsigned int count 589 ), 590 591 TP_ARGS(r_xprt, count), 592 593 TP_STRUCT__entry( 594 __field(const void *, r_xprt) 595 __string(addr, rpcrdma_addrstr(r_xprt)) 596 __string(port, rpcrdma_portstr(r_xprt)) 597 __field(unsigned int, count) 598 ), 599 600 TP_fast_assign( 601 __entry->r_xprt = r_xprt; 602 __entry->count = count; 603 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 604 __assign_str(port, rpcrdma_portstr(r_xprt)); 605 ), 606 607 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", 608 __get_str(addr), __get_str(port), __entry->r_xprt, 609 __entry->count 610 ) 611 ); 612 613 TRACE_EVENT(xprtrdma_nomrs_err, 614 TP_PROTO( 615 const struct rpcrdma_xprt *r_xprt, 616 const struct rpcrdma_req *req 617 ), 618 619 TP_ARGS(r_xprt, req), 620 621 TP_STRUCT__entry( 622 __field(unsigned int, task_id) 623 __field(unsigned int, client_id) 624 __string(addr, rpcrdma_addrstr(r_xprt)) 625 __string(port, rpcrdma_portstr(r_xprt)) 626 ), 627 628 TP_fast_assign( 629 const struct rpc_rqst *rqst = &req->rl_slot; 630 631 __entry->task_id = rqst->rq_task->tk_pid; 632 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 633 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 634 __assign_str(port, rpcrdma_portstr(r_xprt)); 635 ), 636 637 TP_printk("peer=[%s]:%s task:%u@%u", 638 __get_str(addr), __get_str(port), 639 __entry->task_id, __entry->client_id 640 ) 641 ); 642 643 DEFINE_RDCH_EVENT(read); 644 DEFINE_WRCH_EVENT(write); 645 DEFINE_WRCH_EVENT(reply); 646 647 TRACE_DEFINE_ENUM(rpcrdma_noch); 648 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); 649 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); 650 TRACE_DEFINE_ENUM(rpcrdma_readch); 651 TRACE_DEFINE_ENUM(rpcrdma_areadch); 652 TRACE_DEFINE_ENUM(rpcrdma_writech); 653 TRACE_DEFINE_ENUM(rpcrdma_replych); 654 655 #define xprtrdma_show_chunktype(x) \ 656 __print_symbolic(x, \ 657 { rpcrdma_noch, "inline" }, \ 658 { rpcrdma_noch_pullup, "pullup" }, \ 659 { rpcrdma_noch_mapped, "mapped" }, \ 660 { rpcrdma_readch, "read list" }, \ 661 { rpcrdma_areadch, "*read list" }, \ 662 { rpcrdma_writech, "write list" }, \ 663 { rpcrdma_replych, "reply chunk" }) 664 665 TRACE_EVENT(xprtrdma_marshal, 666 TP_PROTO( 667 const struct rpcrdma_req *req, 668 unsigned int rtype, 669 unsigned int wtype 670 ), 671 672 TP_ARGS(req, rtype, wtype), 673 674 TP_STRUCT__entry( 675 __field(unsigned int, task_id) 676 __field(unsigned int, client_id) 677 __field(u32, xid) 678 __field(unsigned int, hdrlen) 679 __field(unsigned int, headlen) 680 __field(unsigned int, pagelen) 681 __field(unsigned int, taillen) 682 __field(unsigned int, rtype) 683 __field(unsigned int, wtype) 684 ), 685 686 TP_fast_assign( 687 const struct rpc_rqst *rqst = &req->rl_slot; 688 689 __entry->task_id = rqst->rq_task->tk_pid; 690 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 691 __entry->xid = be32_to_cpu(rqst->rq_xid); 692 __entry->hdrlen = req->rl_hdrbuf.len; 693 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 694 __entry->pagelen = rqst->rq_snd_buf.page_len; 695 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 696 __entry->rtype = rtype; 697 __entry->wtype = wtype; 698 ), 699 700 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 701 __entry->task_id, __entry->client_id, __entry->xid, 702 __entry->hdrlen, 703 __entry->headlen, __entry->pagelen, __entry->taillen, 704 xprtrdma_show_chunktype(__entry->rtype), 705 xprtrdma_show_chunktype(__entry->wtype) 706 ) 707 ); 708 709 TRACE_EVENT(xprtrdma_marshal_failed, 710 TP_PROTO(const struct rpc_rqst *rqst, 711 int ret 712 ), 713 714 TP_ARGS(rqst, ret), 715 716 TP_STRUCT__entry( 717 __field(unsigned int, task_id) 718 __field(unsigned int, client_id) 719 __field(u32, xid) 720 __field(int, ret) 721 ), 722 723 TP_fast_assign( 724 __entry->task_id = rqst->rq_task->tk_pid; 725 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 726 __entry->xid = be32_to_cpu(rqst->rq_xid); 727 __entry->ret = ret; 728 ), 729 730 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 731 __entry->task_id, __entry->client_id, __entry->xid, 732 __entry->ret 733 ) 734 ); 735 736 TRACE_EVENT(xprtrdma_prepsend_failed, 737 TP_PROTO(const struct rpc_rqst *rqst, 738 int ret 739 ), 740 741 TP_ARGS(rqst, ret), 742 743 TP_STRUCT__entry( 744 __field(unsigned int, task_id) 745 __field(unsigned int, client_id) 746 __field(u32, xid) 747 __field(int, ret) 748 ), 749 750 TP_fast_assign( 751 __entry->task_id = rqst->rq_task->tk_pid; 752 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 753 __entry->xid = be32_to_cpu(rqst->rq_xid); 754 __entry->ret = ret; 755 ), 756 757 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 758 __entry->task_id, __entry->client_id, __entry->xid, 759 __entry->ret 760 ) 761 ); 762 763 TRACE_EVENT(xprtrdma_post_send, 764 TP_PROTO( 765 const struct rpcrdma_req *req 766 ), 767 768 TP_ARGS(req), 769 770 TP_STRUCT__entry( 771 __field(u32, cq_id) 772 __field(int, completion_id) 773 __field(unsigned int, task_id) 774 __field(unsigned int, client_id) 775 __field(int, num_sge) 776 __field(int, signaled) 777 ), 778 779 TP_fast_assign( 780 const struct rpc_rqst *rqst = &req->rl_slot; 781 const struct rpcrdma_sendctx *sc = req->rl_sendctx; 782 783 __entry->cq_id = sc->sc_cid.ci_queue_id; 784 __entry->completion_id = sc->sc_cid.ci_completion_id; 785 __entry->task_id = rqst->rq_task->tk_pid; 786 __entry->client_id = rqst->rq_task->tk_client ? 787 rqst->rq_task->tk_client->cl_clid : -1; 788 __entry->num_sge = req->rl_wr.num_sge; 789 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 790 ), 791 792 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", 793 __entry->task_id, __entry->client_id, 794 __entry->cq_id, __entry->completion_id, 795 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), 796 (__entry->signaled ? "signaled" : "") 797 ) 798 ); 799 800 TRACE_EVENT(xprtrdma_post_recv, 801 TP_PROTO( 802 const struct rpcrdma_rep *rep 803 ), 804 805 TP_ARGS(rep), 806 807 TP_STRUCT__entry( 808 __field(u32, cq_id) 809 __field(int, completion_id) 810 ), 811 812 TP_fast_assign( 813 __entry->cq_id = rep->rr_cid.ci_queue_id; 814 __entry->completion_id = rep->rr_cid.ci_completion_id; 815 ), 816 817 TP_printk("cq.id=%d cid=%d", 818 __entry->cq_id, __entry->completion_id 819 ) 820 ); 821 822 TRACE_EVENT(xprtrdma_post_recvs, 823 TP_PROTO( 824 const struct rpcrdma_xprt *r_xprt, 825 unsigned int count, 826 int status 827 ), 828 829 TP_ARGS(r_xprt, count, status), 830 831 TP_STRUCT__entry( 832 __field(const void *, r_xprt) 833 __field(unsigned int, count) 834 __field(int, status) 835 __field(int, posted) 836 __string(addr, rpcrdma_addrstr(r_xprt)) 837 __string(port, rpcrdma_portstr(r_xprt)) 838 ), 839 840 TP_fast_assign( 841 __entry->r_xprt = r_xprt; 842 __entry->count = count; 843 __entry->status = status; 844 __entry->posted = r_xprt->rx_ep->re_receive_count; 845 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 846 __assign_str(port, rpcrdma_portstr(r_xprt)); 847 ), 848 849 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", 850 __get_str(addr), __get_str(port), __entry->r_xprt, 851 __entry->count, __entry->posted, __entry->status 852 ) 853 ); 854 855 TRACE_EVENT(xprtrdma_post_linv_err, 856 TP_PROTO( 857 const struct rpcrdma_req *req, 858 int status 859 ), 860 861 TP_ARGS(req, status), 862 863 TP_STRUCT__entry( 864 __field(unsigned int, task_id) 865 __field(unsigned int, client_id) 866 __field(int, status) 867 ), 868 869 TP_fast_assign( 870 const struct rpc_task *task = req->rl_slot.rq_task; 871 872 __entry->task_id = task->tk_pid; 873 __entry->client_id = task->tk_client->cl_clid; 874 __entry->status = status; 875 ), 876 877 TP_printk("task:%u@%u status=%d", 878 __entry->task_id, __entry->client_id, __entry->status 879 ) 880 ); 881 882 /** 883 ** Completion events 884 **/ 885 886 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive); 887 888 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); 889 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); 890 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); 891 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); 892 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); 893 894 TRACE_EVENT(xprtrdma_frwr_alloc, 895 TP_PROTO( 896 const struct rpcrdma_mr *mr, 897 int rc 898 ), 899 900 TP_ARGS(mr, rc), 901 902 TP_STRUCT__entry( 903 __field(u32, mr_id) 904 __field(int, rc) 905 ), 906 907 TP_fast_assign( 908 __entry->mr_id = mr->frwr.fr_mr->res.id; 909 __entry->rc = rc; 910 ), 911 912 TP_printk("mr.id=%u: rc=%d", 913 __entry->mr_id, __entry->rc 914 ) 915 ); 916 917 TRACE_EVENT(xprtrdma_frwr_dereg, 918 TP_PROTO( 919 const struct rpcrdma_mr *mr, 920 int rc 921 ), 922 923 TP_ARGS(mr, rc), 924 925 TP_STRUCT__entry( 926 __field(u32, mr_id) 927 __field(int, nents) 928 __field(u32, handle) 929 __field(u32, length) 930 __field(u64, offset) 931 __field(u32, dir) 932 __field(int, rc) 933 ), 934 935 TP_fast_assign( 936 __entry->mr_id = mr->frwr.fr_mr->res.id; 937 __entry->nents = mr->mr_nents; 938 __entry->handle = mr->mr_handle; 939 __entry->length = mr->mr_length; 940 __entry->offset = mr->mr_offset; 941 __entry->dir = mr->mr_dir; 942 __entry->rc = rc; 943 ), 944 945 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 946 __entry->mr_id, __entry->nents, __entry->length, 947 (unsigned long long)__entry->offset, __entry->handle, 948 xprtrdma_show_direction(__entry->dir), 949 __entry->rc 950 ) 951 ); 952 953 TRACE_EVENT(xprtrdma_frwr_sgerr, 954 TP_PROTO( 955 const struct rpcrdma_mr *mr, 956 int sg_nents 957 ), 958 959 TP_ARGS(mr, sg_nents), 960 961 TP_STRUCT__entry( 962 __field(u32, mr_id) 963 __field(u64, addr) 964 __field(u32, dir) 965 __field(int, nents) 966 ), 967 968 TP_fast_assign( 969 __entry->mr_id = mr->frwr.fr_mr->res.id; 970 __entry->addr = mr->mr_sg->dma_address; 971 __entry->dir = mr->mr_dir; 972 __entry->nents = sg_nents; 973 ), 974 975 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 976 __entry->mr_id, __entry->addr, 977 xprtrdma_show_direction(__entry->dir), 978 __entry->nents 979 ) 980 ); 981 982 TRACE_EVENT(xprtrdma_frwr_maperr, 983 TP_PROTO( 984 const struct rpcrdma_mr *mr, 985 int num_mapped 986 ), 987 988 TP_ARGS(mr, num_mapped), 989 990 TP_STRUCT__entry( 991 __field(u32, mr_id) 992 __field(u64, addr) 993 __field(u32, dir) 994 __field(int, num_mapped) 995 __field(int, nents) 996 ), 997 998 TP_fast_assign( 999 __entry->mr_id = mr->frwr.fr_mr->res.id; 1000 __entry->addr = mr->mr_sg->dma_address; 1001 __entry->dir = mr->mr_dir; 1002 __entry->num_mapped = num_mapped; 1003 __entry->nents = mr->mr_nents; 1004 ), 1005 1006 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 1007 __entry->mr_id, __entry->addr, 1008 xprtrdma_show_direction(__entry->dir), 1009 __entry->num_mapped, __entry->nents 1010 ) 1011 ); 1012 1013 DEFINE_MR_EVENT(localinv); 1014 DEFINE_MR_EVENT(map); 1015 1016 DEFINE_ANON_MR_EVENT(unmap); 1017 1018 TRACE_EVENT(xprtrdma_dma_maperr, 1019 TP_PROTO( 1020 u64 addr 1021 ), 1022 1023 TP_ARGS(addr), 1024 1025 TP_STRUCT__entry( 1026 __field(u64, addr) 1027 ), 1028 1029 TP_fast_assign( 1030 __entry->addr = addr; 1031 ), 1032 1033 TP_printk("dma addr=0x%llx\n", __entry->addr) 1034 ); 1035 1036 /** 1037 ** Reply events 1038 **/ 1039 1040 TRACE_EVENT(xprtrdma_reply, 1041 TP_PROTO( 1042 const struct rpc_task *task, 1043 const struct rpcrdma_rep *rep, 1044 unsigned int credits 1045 ), 1046 1047 TP_ARGS(task, rep, credits), 1048 1049 TP_STRUCT__entry( 1050 __field(unsigned int, task_id) 1051 __field(unsigned int, client_id) 1052 __field(u32, xid) 1053 __field(unsigned int, credits) 1054 ), 1055 1056 TP_fast_assign( 1057 __entry->task_id = task->tk_pid; 1058 __entry->client_id = task->tk_client->cl_clid; 1059 __entry->xid = be32_to_cpu(rep->rr_xid); 1060 __entry->credits = credits; 1061 ), 1062 1063 TP_printk("task:%u@%u xid=0x%08x credits=%u", 1064 __entry->task_id, __entry->client_id, __entry->xid, 1065 __entry->credits 1066 ) 1067 ); 1068 1069 DEFINE_REPLY_EVENT(vers); 1070 DEFINE_REPLY_EVENT(rqst); 1071 DEFINE_REPLY_EVENT(short); 1072 DEFINE_REPLY_EVENT(hdr); 1073 1074 TRACE_EVENT(xprtrdma_err_vers, 1075 TP_PROTO( 1076 const struct rpc_rqst *rqst, 1077 __be32 *min, 1078 __be32 *max 1079 ), 1080 1081 TP_ARGS(rqst, min, max), 1082 1083 TP_STRUCT__entry( 1084 __field(unsigned int, task_id) 1085 __field(unsigned int, client_id) 1086 __field(u32, xid) 1087 __field(u32, min) 1088 __field(u32, max) 1089 ), 1090 1091 TP_fast_assign( 1092 __entry->task_id = rqst->rq_task->tk_pid; 1093 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1094 __entry->xid = be32_to_cpu(rqst->rq_xid); 1095 __entry->min = be32_to_cpup(min); 1096 __entry->max = be32_to_cpup(max); 1097 ), 1098 1099 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", 1100 __entry->task_id, __entry->client_id, __entry->xid, 1101 __entry->min, __entry->max 1102 ) 1103 ); 1104 1105 TRACE_EVENT(xprtrdma_err_chunk, 1106 TP_PROTO( 1107 const struct rpc_rqst *rqst 1108 ), 1109 1110 TP_ARGS(rqst), 1111 1112 TP_STRUCT__entry( 1113 __field(unsigned int, task_id) 1114 __field(unsigned int, client_id) 1115 __field(u32, xid) 1116 ), 1117 1118 TP_fast_assign( 1119 __entry->task_id = rqst->rq_task->tk_pid; 1120 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1121 __entry->xid = be32_to_cpu(rqst->rq_xid); 1122 ), 1123 1124 TP_printk("task:%u@%u xid=0x%08x", 1125 __entry->task_id, __entry->client_id, __entry->xid 1126 ) 1127 ); 1128 1129 TRACE_EVENT(xprtrdma_err_unrecognized, 1130 TP_PROTO( 1131 const struct rpc_rqst *rqst, 1132 __be32 *procedure 1133 ), 1134 1135 TP_ARGS(rqst, procedure), 1136 1137 TP_STRUCT__entry( 1138 __field(unsigned int, task_id) 1139 __field(unsigned int, client_id) 1140 __field(u32, xid) 1141 __field(u32, procedure) 1142 ), 1143 1144 TP_fast_assign( 1145 __entry->task_id = rqst->rq_task->tk_pid; 1146 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1147 __entry->procedure = be32_to_cpup(procedure); 1148 ), 1149 1150 TP_printk("task:%u@%u xid=0x%08x procedure=%u", 1151 __entry->task_id, __entry->client_id, __entry->xid, 1152 __entry->procedure 1153 ) 1154 ); 1155 1156 TRACE_EVENT(xprtrdma_fixup, 1157 TP_PROTO( 1158 const struct rpc_rqst *rqst, 1159 unsigned long fixup 1160 ), 1161 1162 TP_ARGS(rqst, fixup), 1163 1164 TP_STRUCT__entry( 1165 __field(unsigned int, task_id) 1166 __field(unsigned int, client_id) 1167 __field(unsigned long, fixup) 1168 __field(size_t, headlen) 1169 __field(unsigned int, pagelen) 1170 __field(size_t, taillen) 1171 ), 1172 1173 TP_fast_assign( 1174 __entry->task_id = rqst->rq_task->tk_pid; 1175 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1176 __entry->fixup = fixup; 1177 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; 1178 __entry->pagelen = rqst->rq_rcv_buf.page_len; 1179 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; 1180 ), 1181 1182 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", 1183 __entry->task_id, __entry->client_id, __entry->fixup, 1184 __entry->headlen, __entry->pagelen, __entry->taillen 1185 ) 1186 ); 1187 1188 TRACE_EVENT(xprtrdma_decode_seg, 1189 TP_PROTO( 1190 u32 handle, 1191 u32 length, 1192 u64 offset 1193 ), 1194 1195 TP_ARGS(handle, length, offset), 1196 1197 TP_STRUCT__entry( 1198 __field(u32, handle) 1199 __field(u32, length) 1200 __field(u64, offset) 1201 ), 1202 1203 TP_fast_assign( 1204 __entry->handle = handle; 1205 __entry->length = length; 1206 __entry->offset = offset; 1207 ), 1208 1209 TP_printk("%u@0x%016llx:0x%08x", 1210 __entry->length, (unsigned long long)__entry->offset, 1211 __entry->handle 1212 ) 1213 ); 1214 1215 TRACE_EVENT(xprtrdma_mrs_zap, 1216 TP_PROTO( 1217 const struct rpc_task *task 1218 ), 1219 1220 TP_ARGS(task), 1221 1222 TP_STRUCT__entry( 1223 __field(unsigned int, task_id) 1224 __field(unsigned int, client_id) 1225 ), 1226 1227 TP_fast_assign( 1228 __entry->task_id = task->tk_pid; 1229 __entry->client_id = task->tk_client->cl_clid; 1230 ), 1231 1232 TP_printk("task:%u@%u", 1233 __entry->task_id, __entry->client_id 1234 ) 1235 ); 1236 1237 /** 1238 ** Callback events 1239 **/ 1240 1241 TRACE_EVENT(xprtrdma_cb_setup, 1242 TP_PROTO( 1243 const struct rpcrdma_xprt *r_xprt, 1244 unsigned int reqs 1245 ), 1246 1247 TP_ARGS(r_xprt, reqs), 1248 1249 TP_STRUCT__entry( 1250 __field(const void *, r_xprt) 1251 __field(unsigned int, reqs) 1252 __string(addr, rpcrdma_addrstr(r_xprt)) 1253 __string(port, rpcrdma_portstr(r_xprt)) 1254 ), 1255 1256 TP_fast_assign( 1257 __entry->r_xprt = r_xprt; 1258 __entry->reqs = reqs; 1259 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 1260 __assign_str(port, rpcrdma_portstr(r_xprt)); 1261 ), 1262 1263 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", 1264 __get_str(addr), __get_str(port), 1265 __entry->r_xprt, __entry->reqs 1266 ) 1267 ); 1268 1269 DEFINE_CALLBACK_EVENT(call); 1270 DEFINE_CALLBACK_EVENT(reply); 1271 1272 /** 1273 ** Server-side RPC/RDMA events 1274 **/ 1275 1276 DECLARE_EVENT_CLASS(svcrdma_accept_class, 1277 TP_PROTO( 1278 const struct svcxprt_rdma *rdma, 1279 long status 1280 ), 1281 1282 TP_ARGS(rdma, status), 1283 1284 TP_STRUCT__entry( 1285 __field(long, status) 1286 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1287 ), 1288 1289 TP_fast_assign( 1290 __entry->status = status; 1291 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1292 ), 1293 1294 TP_printk("addr=%s status=%ld", 1295 __get_str(addr), __entry->status 1296 ) 1297 ); 1298 1299 #define DEFINE_ACCEPT_EVENT(name) \ 1300 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ 1301 TP_PROTO( \ 1302 const struct svcxprt_rdma *rdma, \ 1303 long status \ 1304 ), \ 1305 TP_ARGS(rdma, status)) 1306 1307 DEFINE_ACCEPT_EVENT(pd); 1308 DEFINE_ACCEPT_EVENT(qp); 1309 DEFINE_ACCEPT_EVENT(fabric); 1310 DEFINE_ACCEPT_EVENT(initdepth); 1311 DEFINE_ACCEPT_EVENT(accept); 1312 1313 TRACE_DEFINE_ENUM(RDMA_MSG); 1314 TRACE_DEFINE_ENUM(RDMA_NOMSG); 1315 TRACE_DEFINE_ENUM(RDMA_MSGP); 1316 TRACE_DEFINE_ENUM(RDMA_DONE); 1317 TRACE_DEFINE_ENUM(RDMA_ERROR); 1318 1319 #define show_rpcrdma_proc(x) \ 1320 __print_symbolic(x, \ 1321 { RDMA_MSG, "RDMA_MSG" }, \ 1322 { RDMA_NOMSG, "RDMA_NOMSG" }, \ 1323 { RDMA_MSGP, "RDMA_MSGP" }, \ 1324 { RDMA_DONE, "RDMA_DONE" }, \ 1325 { RDMA_ERROR, "RDMA_ERROR" }) 1326 1327 TRACE_EVENT(svcrdma_decode_rqst, 1328 TP_PROTO( 1329 const struct svc_rdma_recv_ctxt *ctxt, 1330 __be32 *p, 1331 unsigned int hdrlen 1332 ), 1333 1334 TP_ARGS(ctxt, p, hdrlen), 1335 1336 TP_STRUCT__entry( 1337 __field(u32, cq_id) 1338 __field(int, completion_id) 1339 __field(u32, xid) 1340 __field(u32, vers) 1341 __field(u32, proc) 1342 __field(u32, credits) 1343 __field(unsigned int, hdrlen) 1344 ), 1345 1346 TP_fast_assign( 1347 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1348 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1349 __entry->xid = be32_to_cpup(p++); 1350 __entry->vers = be32_to_cpup(p++); 1351 __entry->credits = be32_to_cpup(p++); 1352 __entry->proc = be32_to_cpup(p); 1353 __entry->hdrlen = hdrlen; 1354 ), 1355 1356 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", 1357 __entry->cq_id, __entry->completion_id, 1358 __entry->xid, __entry->vers, __entry->credits, 1359 show_rpcrdma_proc(__entry->proc), __entry->hdrlen) 1360 ); 1361 1362 TRACE_EVENT(svcrdma_decode_short_err, 1363 TP_PROTO( 1364 const struct svc_rdma_recv_ctxt *ctxt, 1365 unsigned int hdrlen 1366 ), 1367 1368 TP_ARGS(ctxt, hdrlen), 1369 1370 TP_STRUCT__entry( 1371 __field(u32, cq_id) 1372 __field(int, completion_id) 1373 __field(unsigned int, hdrlen) 1374 ), 1375 1376 TP_fast_assign( 1377 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1378 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1379 __entry->hdrlen = hdrlen; 1380 ), 1381 1382 TP_printk("cq.id=%u cid=%d hdrlen=%u", 1383 __entry->cq_id, __entry->completion_id, 1384 __entry->hdrlen) 1385 ); 1386 1387 DECLARE_EVENT_CLASS(svcrdma_badreq_event, 1388 TP_PROTO( 1389 const struct svc_rdma_recv_ctxt *ctxt, 1390 __be32 *p 1391 ), 1392 1393 TP_ARGS(ctxt, p), 1394 1395 TP_STRUCT__entry( 1396 __field(u32, cq_id) 1397 __field(int, completion_id) 1398 __field(u32, xid) 1399 __field(u32, vers) 1400 __field(u32, proc) 1401 __field(u32, credits) 1402 ), 1403 1404 TP_fast_assign( 1405 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1406 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1407 __entry->xid = be32_to_cpup(p++); 1408 __entry->vers = be32_to_cpup(p++); 1409 __entry->credits = be32_to_cpup(p++); 1410 __entry->proc = be32_to_cpup(p); 1411 ), 1412 1413 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", 1414 __entry->cq_id, __entry->completion_id, 1415 __entry->xid, __entry->vers, __entry->credits, __entry->proc) 1416 ); 1417 1418 #define DEFINE_BADREQ_EVENT(name) \ 1419 DEFINE_EVENT(svcrdma_badreq_event, \ 1420 svcrdma_decode_##name##_err, \ 1421 TP_PROTO( \ 1422 const struct svc_rdma_recv_ctxt *ctxt, \ 1423 __be32 *p \ 1424 ), \ 1425 TP_ARGS(ctxt, p)) 1426 1427 DEFINE_BADREQ_EVENT(badvers); 1428 DEFINE_BADREQ_EVENT(drop); 1429 DEFINE_BADREQ_EVENT(badproc); 1430 DEFINE_BADREQ_EVENT(parse); 1431 1432 TRACE_EVENT(svcrdma_encode_wseg, 1433 TP_PROTO( 1434 const struct svc_rdma_send_ctxt *ctxt, 1435 u32 segno, 1436 u32 handle, 1437 u32 length, 1438 u64 offset 1439 ), 1440 1441 TP_ARGS(ctxt, segno, handle, length, offset), 1442 1443 TP_STRUCT__entry( 1444 __field(u32, cq_id) 1445 __field(int, completion_id) 1446 __field(u32, segno) 1447 __field(u32, handle) 1448 __field(u32, length) 1449 __field(u64, offset) 1450 ), 1451 1452 TP_fast_assign( 1453 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1454 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1455 __entry->segno = segno; 1456 __entry->handle = handle; 1457 __entry->length = length; 1458 __entry->offset = offset; 1459 ), 1460 1461 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", 1462 __entry->cq_id, __entry->completion_id, 1463 __entry->segno, __entry->length, 1464 (unsigned long long)__entry->offset, __entry->handle 1465 ) 1466 ); 1467 1468 TRACE_EVENT(svcrdma_decode_rseg, 1469 TP_PROTO( 1470 const struct rpc_rdma_cid *cid, 1471 const struct svc_rdma_chunk *chunk, 1472 const struct svc_rdma_segment *segment 1473 ), 1474 1475 TP_ARGS(cid, chunk, segment), 1476 1477 TP_STRUCT__entry( 1478 __field(u32, cq_id) 1479 __field(int, completion_id) 1480 __field(u32, segno) 1481 __field(u32, position) 1482 __field(u32, handle) 1483 __field(u32, length) 1484 __field(u64, offset) 1485 ), 1486 1487 TP_fast_assign( 1488 __entry->cq_id = cid->ci_queue_id; 1489 __entry->completion_id = cid->ci_completion_id; 1490 __entry->segno = chunk->ch_segcount; 1491 __entry->position = chunk->ch_position; 1492 __entry->handle = segment->rs_handle; 1493 __entry->length = segment->rs_length; 1494 __entry->offset = segment->rs_offset; 1495 ), 1496 1497 TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", 1498 __entry->cq_id, __entry->completion_id, 1499 __entry->segno, __entry->position, __entry->length, 1500 (unsigned long long)__entry->offset, __entry->handle 1501 ) 1502 ); 1503 1504 TRACE_EVENT(svcrdma_decode_wseg, 1505 TP_PROTO( 1506 const struct rpc_rdma_cid *cid, 1507 const struct svc_rdma_chunk *chunk, 1508 u32 segno 1509 ), 1510 1511 TP_ARGS(cid, chunk, segno), 1512 1513 TP_STRUCT__entry( 1514 __field(u32, cq_id) 1515 __field(int, completion_id) 1516 __field(u32, segno) 1517 __field(u32, handle) 1518 __field(u32, length) 1519 __field(u64, offset) 1520 ), 1521 1522 TP_fast_assign( 1523 const struct svc_rdma_segment *segment = 1524 &chunk->ch_segments[segno]; 1525 1526 __entry->cq_id = cid->ci_queue_id; 1527 __entry->completion_id = cid->ci_completion_id; 1528 __entry->segno = segno; 1529 __entry->handle = segment->rs_handle; 1530 __entry->length = segment->rs_length; 1531 __entry->offset = segment->rs_offset; 1532 ), 1533 1534 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", 1535 __entry->cq_id, __entry->completion_id, 1536 __entry->segno, __entry->length, 1537 (unsigned long long)__entry->offset, __entry->handle 1538 ) 1539 ); 1540 1541 DECLARE_EVENT_CLASS(svcrdma_error_event, 1542 TP_PROTO( 1543 __be32 xid 1544 ), 1545 1546 TP_ARGS(xid), 1547 1548 TP_STRUCT__entry( 1549 __field(u32, xid) 1550 ), 1551 1552 TP_fast_assign( 1553 __entry->xid = be32_to_cpu(xid); 1554 ), 1555 1556 TP_printk("xid=0x%08x", 1557 __entry->xid 1558 ) 1559 ); 1560 1561 #define DEFINE_ERROR_EVENT(name) \ 1562 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \ 1563 TP_PROTO( \ 1564 __be32 xid \ 1565 ), \ 1566 TP_ARGS(xid)) 1567 1568 DEFINE_ERROR_EVENT(vers); 1569 DEFINE_ERROR_EVENT(chunk); 1570 1571 /** 1572 ** Server-side RDMA API events 1573 **/ 1574 1575 DECLARE_EVENT_CLASS(svcrdma_dma_map_class, 1576 TP_PROTO( 1577 const struct svcxprt_rdma *rdma, 1578 u64 dma_addr, 1579 u32 length 1580 ), 1581 1582 TP_ARGS(rdma, dma_addr, length), 1583 1584 TP_STRUCT__entry( 1585 __field(u64, dma_addr) 1586 __field(u32, length) 1587 __string(device, rdma->sc_cm_id->device->name) 1588 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1589 ), 1590 1591 TP_fast_assign( 1592 __entry->dma_addr = dma_addr; 1593 __entry->length = length; 1594 __assign_str(device, rdma->sc_cm_id->device->name); 1595 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1596 ), 1597 1598 TP_printk("addr=%s device=%s dma_addr=%llu length=%u", 1599 __get_str(addr), __get_str(device), 1600 __entry->dma_addr, __entry->length 1601 ) 1602 ); 1603 1604 #define DEFINE_SVC_DMA_EVENT(name) \ 1605 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ 1606 TP_PROTO( \ 1607 const struct svcxprt_rdma *rdma,\ 1608 u64 dma_addr, \ 1609 u32 length \ 1610 ), \ 1611 TP_ARGS(rdma, dma_addr, length)) 1612 1613 DEFINE_SVC_DMA_EVENT(dma_map_page); 1614 DEFINE_SVC_DMA_EVENT(dma_map_err); 1615 DEFINE_SVC_DMA_EVENT(dma_unmap_page); 1616 1617 TRACE_EVENT(svcrdma_dma_map_rw_err, 1618 TP_PROTO( 1619 const struct svcxprt_rdma *rdma, 1620 unsigned int nents, 1621 int status 1622 ), 1623 1624 TP_ARGS(rdma, nents, status), 1625 1626 TP_STRUCT__entry( 1627 __field(int, status) 1628 __field(unsigned int, nents) 1629 __string(device, rdma->sc_cm_id->device->name) 1630 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1631 ), 1632 1633 TP_fast_assign( 1634 __entry->status = status; 1635 __entry->nents = nents; 1636 __assign_str(device, rdma->sc_cm_id->device->name); 1637 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1638 ), 1639 1640 TP_printk("addr=%s device=%s nents=%u status=%d", 1641 __get_str(addr), __get_str(device), __entry->nents, 1642 __entry->status 1643 ) 1644 ); 1645 1646 TRACE_EVENT(svcrdma_no_rwctx_err, 1647 TP_PROTO( 1648 const struct svcxprt_rdma *rdma, 1649 unsigned int num_sges 1650 ), 1651 1652 TP_ARGS(rdma, num_sges), 1653 1654 TP_STRUCT__entry( 1655 __field(unsigned int, num_sges) 1656 __string(device, rdma->sc_cm_id->device->name) 1657 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1658 ), 1659 1660 TP_fast_assign( 1661 __entry->num_sges = num_sges; 1662 __assign_str(device, rdma->sc_cm_id->device->name); 1663 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1664 ), 1665 1666 TP_printk("addr=%s device=%s num_sges=%d", 1667 __get_str(addr), __get_str(device), __entry->num_sges 1668 ) 1669 ); 1670 1671 TRACE_EVENT(svcrdma_page_overrun_err, 1672 TP_PROTO( 1673 const struct svcxprt_rdma *rdma, 1674 const struct svc_rqst *rqst, 1675 unsigned int pageno 1676 ), 1677 1678 TP_ARGS(rdma, rqst, pageno), 1679 1680 TP_STRUCT__entry( 1681 __field(unsigned int, pageno) 1682 __field(u32, xid) 1683 __string(device, rdma->sc_cm_id->device->name) 1684 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1685 ), 1686 1687 TP_fast_assign( 1688 __entry->pageno = pageno; 1689 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1690 __assign_str(device, rdma->sc_cm_id->device->name); 1691 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1692 ), 1693 1694 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), 1695 __get_str(device), __entry->xid, __entry->pageno 1696 ) 1697 ); 1698 1699 TRACE_EVENT(svcrdma_small_wrch_err, 1700 TP_PROTO( 1701 const struct svcxprt_rdma *rdma, 1702 unsigned int remaining, 1703 unsigned int seg_no, 1704 unsigned int num_segs 1705 ), 1706 1707 TP_ARGS(rdma, remaining, seg_no, num_segs), 1708 1709 TP_STRUCT__entry( 1710 __field(unsigned int, remaining) 1711 __field(unsigned int, seg_no) 1712 __field(unsigned int, num_segs) 1713 __string(device, rdma->sc_cm_id->device->name) 1714 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1715 ), 1716 1717 TP_fast_assign( 1718 __entry->remaining = remaining; 1719 __entry->seg_no = seg_no; 1720 __entry->num_segs = num_segs; 1721 __assign_str(device, rdma->sc_cm_id->device->name); 1722 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1723 ), 1724 1725 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", 1726 __get_str(addr), __get_str(device), __entry->remaining, 1727 __entry->seg_no, __entry->num_segs 1728 ) 1729 ); 1730 1731 TRACE_EVENT(svcrdma_send_pullup, 1732 TP_PROTO( 1733 const struct svc_rdma_send_ctxt *ctxt, 1734 unsigned int msglen 1735 ), 1736 1737 TP_ARGS(ctxt, msglen), 1738 1739 TP_STRUCT__entry( 1740 __field(u32, cq_id) 1741 __field(int, completion_id) 1742 __field(unsigned int, hdrlen) 1743 __field(unsigned int, msglen) 1744 ), 1745 1746 TP_fast_assign( 1747 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1748 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1749 __entry->hdrlen = ctxt->sc_hdrbuf.len, 1750 __entry->msglen = msglen; 1751 ), 1752 1753 TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)", 1754 __entry->cq_id, __entry->completion_id, 1755 __entry->hdrlen, __entry->msglen, 1756 __entry->hdrlen + __entry->msglen) 1757 ); 1758 1759 TRACE_EVENT(svcrdma_send_err, 1760 TP_PROTO( 1761 const struct svc_rqst *rqst, 1762 int status 1763 ), 1764 1765 TP_ARGS(rqst, status), 1766 1767 TP_STRUCT__entry( 1768 __field(int, status) 1769 __field(u32, xid) 1770 __string(addr, rqst->rq_xprt->xpt_remotebuf) 1771 ), 1772 1773 TP_fast_assign( 1774 __entry->status = status; 1775 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1776 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); 1777 ), 1778 1779 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), 1780 __entry->xid, __entry->status 1781 ) 1782 ); 1783 1784 TRACE_EVENT(svcrdma_post_send, 1785 TP_PROTO( 1786 const struct svc_rdma_send_ctxt *ctxt 1787 ), 1788 1789 TP_ARGS(ctxt), 1790 1791 TP_STRUCT__entry( 1792 __field(u32, cq_id) 1793 __field(int, completion_id) 1794 __field(unsigned int, num_sge) 1795 __field(u32, inv_rkey) 1796 ), 1797 1798 TP_fast_assign( 1799 const struct ib_send_wr *wr = &ctxt->sc_send_wr; 1800 1801 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1802 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1803 __entry->num_sge = wr->num_sge; 1804 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? 1805 wr->ex.invalidate_rkey : 0; 1806 ), 1807 1808 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", 1809 __entry->cq_id, __entry->completion_id, 1810 __entry->num_sge, __entry->inv_rkey 1811 ) 1812 ); 1813 1814 DEFINE_COMPLETION_EVENT(svcrdma_wc_send); 1815 1816 TRACE_EVENT(svcrdma_post_recv, 1817 TP_PROTO( 1818 const struct svc_rdma_recv_ctxt *ctxt 1819 ), 1820 1821 TP_ARGS(ctxt), 1822 1823 TP_STRUCT__entry( 1824 __field(u32, cq_id) 1825 __field(int, completion_id) 1826 ), 1827 1828 TP_fast_assign( 1829 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1830 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1831 ), 1832 1833 TP_printk("cq.id=%d cid=%d", 1834 __entry->cq_id, __entry->completion_id 1835 ) 1836 ); 1837 1838 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive); 1839 1840 TRACE_EVENT(svcrdma_rq_post_err, 1841 TP_PROTO( 1842 const struct svcxprt_rdma *rdma, 1843 int status 1844 ), 1845 1846 TP_ARGS(rdma, status), 1847 1848 TP_STRUCT__entry( 1849 __field(int, status) 1850 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1851 ), 1852 1853 TP_fast_assign( 1854 __entry->status = status; 1855 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1856 ), 1857 1858 TP_printk("addr=%s status=%d", 1859 __get_str(addr), __entry->status 1860 ) 1861 ); 1862 1863 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class, 1864 TP_PROTO( 1865 const struct rpc_rdma_cid *cid, 1866 int sqecount 1867 ), 1868 1869 TP_ARGS(cid, sqecount), 1870 1871 TP_STRUCT__entry( 1872 __field(u32, cq_id) 1873 __field(int, completion_id) 1874 __field(int, sqecount) 1875 ), 1876 1877 TP_fast_assign( 1878 __entry->cq_id = cid->ci_queue_id; 1879 __entry->completion_id = cid->ci_completion_id; 1880 __entry->sqecount = sqecount; 1881 ), 1882 1883 TP_printk("cq.id=%u cid=%d sqecount=%d", 1884 __entry->cq_id, __entry->completion_id, 1885 __entry->sqecount 1886 ) 1887 ); 1888 1889 #define DEFINE_POST_CHUNK_EVENT(name) \ 1890 DEFINE_EVENT(svcrdma_post_chunk_class, \ 1891 svcrdma_post_##name##_chunk, \ 1892 TP_PROTO( \ 1893 const struct rpc_rdma_cid *cid, \ 1894 int sqecount \ 1895 ), \ 1896 TP_ARGS(cid, sqecount)) 1897 1898 DEFINE_POST_CHUNK_EVENT(read); 1899 DEFINE_POST_CHUNK_EVENT(write); 1900 DEFINE_POST_CHUNK_EVENT(reply); 1901 1902 DEFINE_COMPLETION_EVENT(svcrdma_wc_read); 1903 DEFINE_COMPLETION_EVENT(svcrdma_wc_write); 1904 1905 TRACE_EVENT(svcrdma_qp_error, 1906 TP_PROTO( 1907 const struct ib_event *event, 1908 const struct sockaddr *sap 1909 ), 1910 1911 TP_ARGS(event, sap), 1912 1913 TP_STRUCT__entry( 1914 __field(unsigned int, event) 1915 __string(device, event->device->name) 1916 __array(__u8, addr, INET6_ADDRSTRLEN + 10) 1917 ), 1918 1919 TP_fast_assign( 1920 __entry->event = event->event; 1921 __assign_str(device, event->device->name); 1922 snprintf(__entry->addr, sizeof(__entry->addr) - 1, 1923 "%pISpc", sap); 1924 ), 1925 1926 TP_printk("addr=%s dev=%s event=%s (%u)", 1927 __entry->addr, __get_str(device), 1928 rdma_show_ib_event(__entry->event), __entry->event 1929 ) 1930 ); 1931 1932 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, 1933 TP_PROTO( 1934 const struct svcxprt_rdma *rdma 1935 ), 1936 1937 TP_ARGS(rdma), 1938 1939 TP_STRUCT__entry( 1940 __field(int, avail) 1941 __field(int, depth) 1942 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1943 ), 1944 1945 TP_fast_assign( 1946 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1947 __entry->depth = rdma->sc_sq_depth; 1948 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1949 ), 1950 1951 TP_printk("addr=%s sc_sq_avail=%d/%d", 1952 __get_str(addr), __entry->avail, __entry->depth 1953 ) 1954 ); 1955 1956 #define DEFINE_SQ_EVENT(name) \ 1957 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ 1958 TP_PROTO( \ 1959 const struct svcxprt_rdma *rdma \ 1960 ), \ 1961 TP_ARGS(rdma)) 1962 1963 DEFINE_SQ_EVENT(full); 1964 DEFINE_SQ_EVENT(retry); 1965 1966 TRACE_EVENT(svcrdma_sq_post_err, 1967 TP_PROTO( 1968 const struct svcxprt_rdma *rdma, 1969 int status 1970 ), 1971 1972 TP_ARGS(rdma, status), 1973 1974 TP_STRUCT__entry( 1975 __field(int, avail) 1976 __field(int, depth) 1977 __field(int, status) 1978 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1979 ), 1980 1981 TP_fast_assign( 1982 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1983 __entry->depth = rdma->sc_sq_depth; 1984 __entry->status = status; 1985 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1986 ), 1987 1988 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", 1989 __get_str(addr), __entry->avail, __entry->depth, 1990 __entry->status 1991 ) 1992 ); 1993 1994 #endif /* _TRACE_RPCRDMA_H */ 1995 1996 #include <trace/define_trace.h> 1997