1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2017, 2018 Oracle. All rights reserved. 4 * 5 * Trace point definitions for the "rpcrdma" subsystem. 6 */ 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM rpcrdma 9 10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_RPCRDMA_H 12 13 #include <linux/scatterlist.h> 14 #include <linux/sunrpc/rpc_rdma_cid.h> 15 #include <linux/tracepoint.h> 16 #include <rdma/ib_cm.h> 17 #include <trace/events/rdma.h> 18 19 /** 20 ** Event classes 21 **/ 22 23 DECLARE_EVENT_CLASS(rpcrdma_completion_class, 24 TP_PROTO( 25 const struct ib_wc *wc, 26 const struct rpc_rdma_cid *cid 27 ), 28 29 TP_ARGS(wc, cid), 30 31 TP_STRUCT__entry( 32 __field(u32, cq_id) 33 __field(int, completion_id) 34 __field(unsigned long, status) 35 __field(unsigned int, vendor_err) 36 ), 37 38 TP_fast_assign( 39 __entry->cq_id = cid->ci_queue_id; 40 __entry->completion_id = cid->ci_completion_id; 41 __entry->status = wc->status; 42 if (wc->status) 43 __entry->vendor_err = wc->vendor_err; 44 else 45 __entry->vendor_err = 0; 46 ), 47 48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", 49 __entry->cq_id, __entry->completion_id, 50 rdma_show_wc_status(__entry->status), 51 __entry->status, __entry->vendor_err 52 ) 53 ); 54 55 #define DEFINE_COMPLETION_EVENT(name) \ 56 DEFINE_EVENT(rpcrdma_completion_class, name, \ 57 TP_PROTO( \ 58 const struct ib_wc *wc, \ 59 const struct rpc_rdma_cid *cid \ 60 ), \ 61 TP_ARGS(wc, cid)) 62 63 DECLARE_EVENT_CLASS(xprtrdma_reply_class, 64 TP_PROTO( 65 const struct rpcrdma_rep *rep 66 ), 67 68 TP_ARGS(rep), 69 70 TP_STRUCT__entry( 71 __field(u32, xid) 72 __field(u32, version) 73 __field(u32, proc) 74 __string(addr, rpcrdma_addrstr(rep->rr_rxprt)) 75 __string(port, rpcrdma_portstr(rep->rr_rxprt)) 76 ), 77 78 TP_fast_assign( 79 __entry->xid = be32_to_cpu(rep->rr_xid); 80 __entry->version = be32_to_cpu(rep->rr_vers); 81 __entry->proc = be32_to_cpu(rep->rr_proc); 82 __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); 83 __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); 84 ), 85 86 TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", 87 __get_str(addr), __get_str(port), 88 __entry->xid, __entry->version, __entry->proc 89 ) 90 ); 91 92 #define DEFINE_REPLY_EVENT(name) \ 93 DEFINE_EVENT(xprtrdma_reply_class, \ 94 xprtrdma_reply_##name##_err, \ 95 TP_PROTO( \ 96 const struct rpcrdma_rep *rep \ 97 ), \ 98 TP_ARGS(rep)) 99 100 DECLARE_EVENT_CLASS(xprtrdma_rxprt, 101 TP_PROTO( 102 const struct rpcrdma_xprt *r_xprt 103 ), 104 105 TP_ARGS(r_xprt), 106 107 TP_STRUCT__entry( 108 __field(const void *, r_xprt) 109 __string(addr, rpcrdma_addrstr(r_xprt)) 110 __string(port, rpcrdma_portstr(r_xprt)) 111 ), 112 113 TP_fast_assign( 114 __entry->r_xprt = r_xprt; 115 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 116 __assign_str(port, rpcrdma_portstr(r_xprt)); 117 ), 118 119 TP_printk("peer=[%s]:%s r_xprt=%p", 120 __get_str(addr), __get_str(port), __entry->r_xprt 121 ) 122 ); 123 124 #define DEFINE_RXPRT_EVENT(name) \ 125 DEFINE_EVENT(xprtrdma_rxprt, name, \ 126 TP_PROTO( \ 127 const struct rpcrdma_xprt *r_xprt \ 128 ), \ 129 TP_ARGS(r_xprt)) 130 131 DECLARE_EVENT_CLASS(xprtrdma_connect_class, 132 TP_PROTO( 133 const struct rpcrdma_xprt *r_xprt, 134 int rc 135 ), 136 137 TP_ARGS(r_xprt, rc), 138 139 TP_STRUCT__entry( 140 __field(const void *, r_xprt) 141 __field(int, rc) 142 __field(int, connect_status) 143 __string(addr, rpcrdma_addrstr(r_xprt)) 144 __string(port, rpcrdma_portstr(r_xprt)) 145 ), 146 147 TP_fast_assign( 148 __entry->r_xprt = r_xprt; 149 __entry->rc = rc; 150 __entry->connect_status = r_xprt->rx_ep->re_connect_status; 151 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 152 __assign_str(port, rpcrdma_portstr(r_xprt)); 153 ), 154 155 TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", 156 __get_str(addr), __get_str(port), __entry->r_xprt, 157 __entry->rc, __entry->connect_status 158 ) 159 ); 160 161 #define DEFINE_CONN_EVENT(name) \ 162 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ 163 TP_PROTO( \ 164 const struct rpcrdma_xprt *r_xprt, \ 165 int rc \ 166 ), \ 167 TP_ARGS(r_xprt, rc)) 168 169 DECLARE_EVENT_CLASS(xprtrdma_rdch_event, 170 TP_PROTO( 171 const struct rpc_task *task, 172 unsigned int pos, 173 struct rpcrdma_mr *mr, 174 int nsegs 175 ), 176 177 TP_ARGS(task, pos, mr, nsegs), 178 179 TP_STRUCT__entry( 180 __field(unsigned int, task_id) 181 __field(unsigned int, client_id) 182 __field(unsigned int, pos) 183 __field(int, nents) 184 __field(u32, handle) 185 __field(u32, length) 186 __field(u64, offset) 187 __field(int, nsegs) 188 ), 189 190 TP_fast_assign( 191 __entry->task_id = task->tk_pid; 192 __entry->client_id = task->tk_client->cl_clid; 193 __entry->pos = pos; 194 __entry->nents = mr->mr_nents; 195 __entry->handle = mr->mr_handle; 196 __entry->length = mr->mr_length; 197 __entry->offset = mr->mr_offset; 198 __entry->nsegs = nsegs; 199 ), 200 201 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", 202 __entry->task_id, __entry->client_id, 203 __entry->pos, __entry->length, 204 (unsigned long long)__entry->offset, __entry->handle, 205 __entry->nents < __entry->nsegs ? "more" : "last" 206 ) 207 ); 208 209 #define DEFINE_RDCH_EVENT(name) \ 210 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ 211 TP_PROTO( \ 212 const struct rpc_task *task, \ 213 unsigned int pos, \ 214 struct rpcrdma_mr *mr, \ 215 int nsegs \ 216 ), \ 217 TP_ARGS(task, pos, mr, nsegs)) 218 219 DECLARE_EVENT_CLASS(xprtrdma_wrch_event, 220 TP_PROTO( 221 const struct rpc_task *task, 222 struct rpcrdma_mr *mr, 223 int nsegs 224 ), 225 226 TP_ARGS(task, mr, nsegs), 227 228 TP_STRUCT__entry( 229 __field(unsigned int, task_id) 230 __field(unsigned int, client_id) 231 __field(int, nents) 232 __field(u32, handle) 233 __field(u32, length) 234 __field(u64, offset) 235 __field(int, nsegs) 236 ), 237 238 TP_fast_assign( 239 __entry->task_id = task->tk_pid; 240 __entry->client_id = task->tk_client->cl_clid; 241 __entry->nents = mr->mr_nents; 242 __entry->handle = mr->mr_handle; 243 __entry->length = mr->mr_length; 244 __entry->offset = mr->mr_offset; 245 __entry->nsegs = nsegs; 246 ), 247 248 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", 249 __entry->task_id, __entry->client_id, 250 __entry->length, (unsigned long long)__entry->offset, 251 __entry->handle, 252 __entry->nents < __entry->nsegs ? "more" : "last" 253 ) 254 ); 255 256 #define DEFINE_WRCH_EVENT(name) \ 257 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ 258 TP_PROTO( \ 259 const struct rpc_task *task, \ 260 struct rpcrdma_mr *mr, \ 261 int nsegs \ 262 ), \ 263 TP_ARGS(task, mr, nsegs)) 264 265 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); 266 TRACE_DEFINE_ENUM(DMA_TO_DEVICE); 267 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); 268 TRACE_DEFINE_ENUM(DMA_NONE); 269 270 #define xprtrdma_show_direction(x) \ 271 __print_symbolic(x, \ 272 { DMA_BIDIRECTIONAL, "BIDIR" }, \ 273 { DMA_TO_DEVICE, "TO_DEVICE" }, \ 274 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ 275 { DMA_NONE, "NONE" }) 276 277 DECLARE_EVENT_CLASS(xprtrdma_mr, 278 TP_PROTO( 279 const struct rpcrdma_mr *mr 280 ), 281 282 TP_ARGS(mr), 283 284 TP_STRUCT__entry( 285 __field(u32, mr_id) 286 __field(int, nents) 287 __field(u32, handle) 288 __field(u32, length) 289 __field(u64, offset) 290 __field(u32, dir) 291 ), 292 293 TP_fast_assign( 294 __entry->mr_id = mr->frwr.fr_mr->res.id; 295 __entry->nents = mr->mr_nents; 296 __entry->handle = mr->mr_handle; 297 __entry->length = mr->mr_length; 298 __entry->offset = mr->mr_offset; 299 __entry->dir = mr->mr_dir; 300 ), 301 302 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", 303 __entry->mr_id, __entry->nents, __entry->length, 304 (unsigned long long)__entry->offset, __entry->handle, 305 xprtrdma_show_direction(__entry->dir) 306 ) 307 ); 308 309 #define DEFINE_MR_EVENT(name) \ 310 DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ 311 TP_PROTO( \ 312 const struct rpcrdma_mr *mr \ 313 ), \ 314 TP_ARGS(mr)) 315 316 DECLARE_EVENT_CLASS(xprtrdma_callback_class, 317 TP_PROTO( 318 const struct rpcrdma_xprt *r_xprt, 319 const struct rpc_rqst *rqst 320 ), 321 322 TP_ARGS(r_xprt, rqst), 323 324 TP_STRUCT__entry( 325 __field(u32, xid) 326 __string(addr, rpcrdma_addrstr(r_xprt)) 327 __string(port, rpcrdma_portstr(r_xprt)) 328 ), 329 330 TP_fast_assign( 331 __entry->xid = be32_to_cpu(rqst->rq_xid); 332 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 333 __assign_str(port, rpcrdma_portstr(r_xprt)); 334 ), 335 336 TP_printk("peer=[%s]:%s xid=0x%08x", 337 __get_str(addr), __get_str(port), __entry->xid 338 ) 339 ); 340 341 #define DEFINE_CALLBACK_EVENT(name) \ 342 DEFINE_EVENT(xprtrdma_callback_class, \ 343 xprtrdma_cb_##name, \ 344 TP_PROTO( \ 345 const struct rpcrdma_xprt *r_xprt, \ 346 const struct rpc_rqst *rqst \ 347 ), \ 348 TP_ARGS(r_xprt, rqst)) 349 350 /** 351 ** Connection events 352 **/ 353 354 TRACE_EVENT(xprtrdma_inline_thresh, 355 TP_PROTO( 356 const struct rpcrdma_ep *ep 357 ), 358 359 TP_ARGS(ep), 360 361 TP_STRUCT__entry( 362 __field(unsigned int, inline_send) 363 __field(unsigned int, inline_recv) 364 __field(unsigned int, max_send) 365 __field(unsigned int, max_recv) 366 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 367 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 368 ), 369 370 TP_fast_assign( 371 const struct rdma_cm_id *id = ep->re_id; 372 373 __entry->inline_send = ep->re_inline_send; 374 __entry->inline_recv = ep->re_inline_recv; 375 __entry->max_send = ep->re_max_inline_send; 376 __entry->max_recv = ep->re_max_inline_recv; 377 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 378 sizeof(struct sockaddr_in6)); 379 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 380 sizeof(struct sockaddr_in6)); 381 ), 382 383 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", 384 __entry->srcaddr, __entry->dstaddr, 385 __entry->inline_send, __entry->inline_recv, 386 __entry->max_send, __entry->max_recv 387 ) 388 ); 389 390 DEFINE_CONN_EVENT(connect); 391 DEFINE_CONN_EVENT(disconnect); 392 393 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); 394 395 TRACE_EVENT(xprtrdma_op_connect, 396 TP_PROTO( 397 const struct rpcrdma_xprt *r_xprt, 398 unsigned long delay 399 ), 400 401 TP_ARGS(r_xprt, delay), 402 403 TP_STRUCT__entry( 404 __field(const void *, r_xprt) 405 __field(unsigned long, delay) 406 __string(addr, rpcrdma_addrstr(r_xprt)) 407 __string(port, rpcrdma_portstr(r_xprt)) 408 ), 409 410 TP_fast_assign( 411 __entry->r_xprt = r_xprt; 412 __entry->delay = delay; 413 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 414 __assign_str(port, rpcrdma_portstr(r_xprt)); 415 ), 416 417 TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", 418 __get_str(addr), __get_str(port), __entry->r_xprt, 419 __entry->delay 420 ) 421 ); 422 423 424 TRACE_EVENT(xprtrdma_op_set_cto, 425 TP_PROTO( 426 const struct rpcrdma_xprt *r_xprt, 427 unsigned long connect, 428 unsigned long reconnect 429 ), 430 431 TP_ARGS(r_xprt, connect, reconnect), 432 433 TP_STRUCT__entry( 434 __field(const void *, r_xprt) 435 __field(unsigned long, connect) 436 __field(unsigned long, reconnect) 437 __string(addr, rpcrdma_addrstr(r_xprt)) 438 __string(port, rpcrdma_portstr(r_xprt)) 439 ), 440 441 TP_fast_assign( 442 __entry->r_xprt = r_xprt; 443 __entry->connect = connect; 444 __entry->reconnect = reconnect; 445 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 446 __assign_str(port, rpcrdma_portstr(r_xprt)); 447 ), 448 449 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", 450 __get_str(addr), __get_str(port), __entry->r_xprt, 451 __entry->connect / HZ, __entry->reconnect / HZ 452 ) 453 ); 454 455 TRACE_EVENT(xprtrdma_qp_event, 456 TP_PROTO( 457 const struct rpcrdma_ep *ep, 458 const struct ib_event *event 459 ), 460 461 TP_ARGS(ep, event), 462 463 TP_STRUCT__entry( 464 __field(unsigned long, event) 465 __string(name, event->device->name) 466 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 467 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 468 ), 469 470 TP_fast_assign( 471 const struct rdma_cm_id *id = ep->re_id; 472 473 __entry->event = event->event; 474 __assign_str(name, event->device->name); 475 memcpy(__entry->srcaddr, &id->route.addr.src_addr, 476 sizeof(struct sockaddr_in6)); 477 memcpy(__entry->dstaddr, &id->route.addr.dst_addr, 478 sizeof(struct sockaddr_in6)); 479 ), 480 481 TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", 482 __entry->srcaddr, __entry->dstaddr, __get_str(name), 483 rdma_show_ib_event(__entry->event), __entry->event 484 ) 485 ); 486 487 /** 488 ** Call events 489 **/ 490 491 TRACE_EVENT(xprtrdma_createmrs, 492 TP_PROTO( 493 const struct rpcrdma_xprt *r_xprt, 494 unsigned int count 495 ), 496 497 TP_ARGS(r_xprt, count), 498 499 TP_STRUCT__entry( 500 __field(const void *, r_xprt) 501 __string(addr, rpcrdma_addrstr(r_xprt)) 502 __string(port, rpcrdma_portstr(r_xprt)) 503 __field(unsigned int, count) 504 ), 505 506 TP_fast_assign( 507 __entry->r_xprt = r_xprt; 508 __entry->count = count; 509 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 510 __assign_str(port, rpcrdma_portstr(r_xprt)); 511 ), 512 513 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", 514 __get_str(addr), __get_str(port), __entry->r_xprt, 515 __entry->count 516 ) 517 ); 518 519 TRACE_EVENT(xprtrdma_mr_get, 520 TP_PROTO( 521 const struct rpcrdma_req *req 522 ), 523 524 TP_ARGS(req), 525 526 TP_STRUCT__entry( 527 __field(const void *, req) 528 __field(unsigned int, task_id) 529 __field(unsigned int, client_id) 530 __field(u32, xid) 531 ), 532 533 TP_fast_assign( 534 const struct rpc_rqst *rqst = &req->rl_slot; 535 536 __entry->req = req; 537 __entry->task_id = rqst->rq_task->tk_pid; 538 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 539 __entry->xid = be32_to_cpu(rqst->rq_xid); 540 ), 541 542 TP_printk("task:%u@%u xid=0x%08x req=%p", 543 __entry->task_id, __entry->client_id, __entry->xid, 544 __entry->req 545 ) 546 ); 547 548 TRACE_EVENT(xprtrdma_nomrs, 549 TP_PROTO( 550 const struct rpcrdma_req *req 551 ), 552 553 TP_ARGS(req), 554 555 TP_STRUCT__entry( 556 __field(const void *, req) 557 __field(unsigned int, task_id) 558 __field(unsigned int, client_id) 559 __field(u32, xid) 560 ), 561 562 TP_fast_assign( 563 const struct rpc_rqst *rqst = &req->rl_slot; 564 565 __entry->req = req; 566 __entry->task_id = rqst->rq_task->tk_pid; 567 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 568 __entry->xid = be32_to_cpu(rqst->rq_xid); 569 ), 570 571 TP_printk("task:%u@%u xid=0x%08x req=%p", 572 __entry->task_id, __entry->client_id, __entry->xid, 573 __entry->req 574 ) 575 ); 576 577 DEFINE_RDCH_EVENT(read); 578 DEFINE_WRCH_EVENT(write); 579 DEFINE_WRCH_EVENT(reply); 580 581 TRACE_DEFINE_ENUM(rpcrdma_noch); 582 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); 583 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); 584 TRACE_DEFINE_ENUM(rpcrdma_readch); 585 TRACE_DEFINE_ENUM(rpcrdma_areadch); 586 TRACE_DEFINE_ENUM(rpcrdma_writech); 587 TRACE_DEFINE_ENUM(rpcrdma_replych); 588 589 #define xprtrdma_show_chunktype(x) \ 590 __print_symbolic(x, \ 591 { rpcrdma_noch, "inline" }, \ 592 { rpcrdma_noch_pullup, "pullup" }, \ 593 { rpcrdma_noch_mapped, "mapped" }, \ 594 { rpcrdma_readch, "read list" }, \ 595 { rpcrdma_areadch, "*read list" }, \ 596 { rpcrdma_writech, "write list" }, \ 597 { rpcrdma_replych, "reply chunk" }) 598 599 TRACE_EVENT(xprtrdma_marshal, 600 TP_PROTO( 601 const struct rpcrdma_req *req, 602 unsigned int rtype, 603 unsigned int wtype 604 ), 605 606 TP_ARGS(req, rtype, wtype), 607 608 TP_STRUCT__entry( 609 __field(unsigned int, task_id) 610 __field(unsigned int, client_id) 611 __field(u32, xid) 612 __field(unsigned int, hdrlen) 613 __field(unsigned int, headlen) 614 __field(unsigned int, pagelen) 615 __field(unsigned int, taillen) 616 __field(unsigned int, rtype) 617 __field(unsigned int, wtype) 618 ), 619 620 TP_fast_assign( 621 const struct rpc_rqst *rqst = &req->rl_slot; 622 623 __entry->task_id = rqst->rq_task->tk_pid; 624 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 625 __entry->xid = be32_to_cpu(rqst->rq_xid); 626 __entry->hdrlen = req->rl_hdrbuf.len; 627 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; 628 __entry->pagelen = rqst->rq_snd_buf.page_len; 629 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; 630 __entry->rtype = rtype; 631 __entry->wtype = wtype; 632 ), 633 634 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", 635 __entry->task_id, __entry->client_id, __entry->xid, 636 __entry->hdrlen, 637 __entry->headlen, __entry->pagelen, __entry->taillen, 638 xprtrdma_show_chunktype(__entry->rtype), 639 xprtrdma_show_chunktype(__entry->wtype) 640 ) 641 ); 642 643 TRACE_EVENT(xprtrdma_marshal_failed, 644 TP_PROTO(const struct rpc_rqst *rqst, 645 int ret 646 ), 647 648 TP_ARGS(rqst, ret), 649 650 TP_STRUCT__entry( 651 __field(unsigned int, task_id) 652 __field(unsigned int, client_id) 653 __field(u32, xid) 654 __field(int, ret) 655 ), 656 657 TP_fast_assign( 658 __entry->task_id = rqst->rq_task->tk_pid; 659 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 660 __entry->xid = be32_to_cpu(rqst->rq_xid); 661 __entry->ret = ret; 662 ), 663 664 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 665 __entry->task_id, __entry->client_id, __entry->xid, 666 __entry->ret 667 ) 668 ); 669 670 TRACE_EVENT(xprtrdma_prepsend_failed, 671 TP_PROTO(const struct rpc_rqst *rqst, 672 int ret 673 ), 674 675 TP_ARGS(rqst, ret), 676 677 TP_STRUCT__entry( 678 __field(unsigned int, task_id) 679 __field(unsigned int, client_id) 680 __field(u32, xid) 681 __field(int, ret) 682 ), 683 684 TP_fast_assign( 685 __entry->task_id = rqst->rq_task->tk_pid; 686 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 687 __entry->xid = be32_to_cpu(rqst->rq_xid); 688 __entry->ret = ret; 689 ), 690 691 TP_printk("task:%u@%u xid=0x%08x: ret=%d", 692 __entry->task_id, __entry->client_id, __entry->xid, 693 __entry->ret 694 ) 695 ); 696 697 TRACE_EVENT(xprtrdma_post_send, 698 TP_PROTO( 699 const struct rpcrdma_req *req 700 ), 701 702 TP_ARGS(req), 703 704 TP_STRUCT__entry( 705 __field(u32, cq_id) 706 __field(int, completion_id) 707 __field(unsigned int, task_id) 708 __field(unsigned int, client_id) 709 __field(int, num_sge) 710 __field(int, signaled) 711 ), 712 713 TP_fast_assign( 714 const struct rpc_rqst *rqst = &req->rl_slot; 715 const struct rpcrdma_sendctx *sc = req->rl_sendctx; 716 717 __entry->cq_id = sc->sc_cid.ci_queue_id; 718 __entry->completion_id = sc->sc_cid.ci_completion_id; 719 __entry->task_id = rqst->rq_task->tk_pid; 720 __entry->client_id = rqst->rq_task->tk_client ? 721 rqst->rq_task->tk_client->cl_clid : -1; 722 __entry->num_sge = req->rl_wr.num_sge; 723 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 724 ), 725 726 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s", 727 __entry->task_id, __entry->client_id, 728 __entry->cq_id, __entry->completion_id, 729 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), 730 (__entry->signaled ? "signaled" : "") 731 ) 732 ); 733 734 TRACE_EVENT(xprtrdma_post_recv, 735 TP_PROTO( 736 const struct rpcrdma_rep *rep 737 ), 738 739 TP_ARGS(rep), 740 741 TP_STRUCT__entry( 742 __field(u32, cq_id) 743 __field(int, completion_id) 744 ), 745 746 TP_fast_assign( 747 __entry->cq_id = rep->rr_cid.ci_queue_id; 748 __entry->completion_id = rep->rr_cid.ci_completion_id; 749 ), 750 751 TP_printk("cq.id=%d cid=%d", 752 __entry->cq_id, __entry->completion_id 753 ) 754 ); 755 756 TRACE_EVENT(xprtrdma_post_recvs, 757 TP_PROTO( 758 const struct rpcrdma_xprt *r_xprt, 759 unsigned int count, 760 int status 761 ), 762 763 TP_ARGS(r_xprt, count, status), 764 765 TP_STRUCT__entry( 766 __field(const void *, r_xprt) 767 __field(unsigned int, count) 768 __field(int, status) 769 __field(int, posted) 770 __string(addr, rpcrdma_addrstr(r_xprt)) 771 __string(port, rpcrdma_portstr(r_xprt)) 772 ), 773 774 TP_fast_assign( 775 __entry->r_xprt = r_xprt; 776 __entry->count = count; 777 __entry->status = status; 778 __entry->posted = r_xprt->rx_ep->re_receive_count; 779 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 780 __assign_str(port, rpcrdma_portstr(r_xprt)); 781 ), 782 783 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", 784 __get_str(addr), __get_str(port), __entry->r_xprt, 785 __entry->count, __entry->posted, __entry->status 786 ) 787 ); 788 789 TRACE_EVENT(xprtrdma_post_linv_err, 790 TP_PROTO( 791 const struct rpcrdma_req *req, 792 int status 793 ), 794 795 TP_ARGS(req, status), 796 797 TP_STRUCT__entry( 798 __field(unsigned int, task_id) 799 __field(unsigned int, client_id) 800 __field(int, status) 801 ), 802 803 TP_fast_assign( 804 const struct rpc_task *task = req->rl_slot.rq_task; 805 806 __entry->task_id = task->tk_pid; 807 __entry->client_id = task->tk_client->cl_clid; 808 __entry->status = status; 809 ), 810 811 TP_printk("task:%u@%u status=%d", 812 __entry->task_id, __entry->client_id, __entry->status 813 ) 814 ); 815 816 /** 817 ** Completion events 818 **/ 819 820 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive); 821 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); 822 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); 823 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); 824 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); 825 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); 826 827 TRACE_EVENT(xprtrdma_frwr_alloc, 828 TP_PROTO( 829 const struct rpcrdma_mr *mr, 830 int rc 831 ), 832 833 TP_ARGS(mr, rc), 834 835 TP_STRUCT__entry( 836 __field(u32, mr_id) 837 __field(int, rc) 838 ), 839 840 TP_fast_assign( 841 __entry->mr_id = mr->frwr.fr_mr->res.id; 842 __entry->rc = rc; 843 ), 844 845 TP_printk("mr.id=%u: rc=%d", 846 __entry->mr_id, __entry->rc 847 ) 848 ); 849 850 TRACE_EVENT(xprtrdma_frwr_dereg, 851 TP_PROTO( 852 const struct rpcrdma_mr *mr, 853 int rc 854 ), 855 856 TP_ARGS(mr, rc), 857 858 TP_STRUCT__entry( 859 __field(u32, mr_id) 860 __field(int, nents) 861 __field(u32, handle) 862 __field(u32, length) 863 __field(u64, offset) 864 __field(u32, dir) 865 __field(int, rc) 866 ), 867 868 TP_fast_assign( 869 __entry->mr_id = mr->frwr.fr_mr->res.id; 870 __entry->nents = mr->mr_nents; 871 __entry->handle = mr->mr_handle; 872 __entry->length = mr->mr_length; 873 __entry->offset = mr->mr_offset; 874 __entry->dir = mr->mr_dir; 875 __entry->rc = rc; 876 ), 877 878 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", 879 __entry->mr_id, __entry->nents, __entry->length, 880 (unsigned long long)__entry->offset, __entry->handle, 881 xprtrdma_show_direction(__entry->dir), 882 __entry->rc 883 ) 884 ); 885 886 TRACE_EVENT(xprtrdma_frwr_sgerr, 887 TP_PROTO( 888 const struct rpcrdma_mr *mr, 889 int sg_nents 890 ), 891 892 TP_ARGS(mr, sg_nents), 893 894 TP_STRUCT__entry( 895 __field(u32, mr_id) 896 __field(u64, addr) 897 __field(u32, dir) 898 __field(int, nents) 899 ), 900 901 TP_fast_assign( 902 __entry->mr_id = mr->frwr.fr_mr->res.id; 903 __entry->addr = mr->mr_sg->dma_address; 904 __entry->dir = mr->mr_dir; 905 __entry->nents = sg_nents; 906 ), 907 908 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", 909 __entry->mr_id, __entry->addr, 910 xprtrdma_show_direction(__entry->dir), 911 __entry->nents 912 ) 913 ); 914 915 TRACE_EVENT(xprtrdma_frwr_maperr, 916 TP_PROTO( 917 const struct rpcrdma_mr *mr, 918 int num_mapped 919 ), 920 921 TP_ARGS(mr, num_mapped), 922 923 TP_STRUCT__entry( 924 __field(u32, mr_id) 925 __field(u64, addr) 926 __field(u32, dir) 927 __field(int, num_mapped) 928 __field(int, nents) 929 ), 930 931 TP_fast_assign( 932 __entry->mr_id = mr->frwr.fr_mr->res.id; 933 __entry->addr = mr->mr_sg->dma_address; 934 __entry->dir = mr->mr_dir; 935 __entry->num_mapped = num_mapped; 936 __entry->nents = mr->mr_nents; 937 ), 938 939 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", 940 __entry->mr_id, __entry->addr, 941 xprtrdma_show_direction(__entry->dir), 942 __entry->num_mapped, __entry->nents 943 ) 944 ); 945 946 DEFINE_MR_EVENT(localinv); 947 DEFINE_MR_EVENT(map); 948 DEFINE_MR_EVENT(unmap); 949 DEFINE_MR_EVENT(reminv); 950 DEFINE_MR_EVENT(recycle); 951 952 TRACE_EVENT(xprtrdma_dma_maperr, 953 TP_PROTO( 954 u64 addr 955 ), 956 957 TP_ARGS(addr), 958 959 TP_STRUCT__entry( 960 __field(u64, addr) 961 ), 962 963 TP_fast_assign( 964 __entry->addr = addr; 965 ), 966 967 TP_printk("dma addr=0x%llx\n", __entry->addr) 968 ); 969 970 /** 971 ** Reply events 972 **/ 973 974 TRACE_EVENT(xprtrdma_reply, 975 TP_PROTO( 976 const struct rpc_task *task, 977 const struct rpcrdma_rep *rep, 978 unsigned int credits 979 ), 980 981 TP_ARGS(task, rep, credits), 982 983 TP_STRUCT__entry( 984 __field(unsigned int, task_id) 985 __field(unsigned int, client_id) 986 __field(u32, xid) 987 __field(unsigned int, credits) 988 ), 989 990 TP_fast_assign( 991 __entry->task_id = task->tk_pid; 992 __entry->client_id = task->tk_client->cl_clid; 993 __entry->xid = be32_to_cpu(rep->rr_xid); 994 __entry->credits = credits; 995 ), 996 997 TP_printk("task:%u@%u xid=0x%08x credits=%u", 998 __entry->task_id, __entry->client_id, __entry->xid, 999 __entry->credits 1000 ) 1001 ); 1002 1003 DEFINE_REPLY_EVENT(vers); 1004 DEFINE_REPLY_EVENT(rqst); 1005 DEFINE_REPLY_EVENT(short); 1006 DEFINE_REPLY_EVENT(hdr); 1007 1008 TRACE_EVENT(xprtrdma_err_vers, 1009 TP_PROTO( 1010 const struct rpc_rqst *rqst, 1011 __be32 *min, 1012 __be32 *max 1013 ), 1014 1015 TP_ARGS(rqst, min, max), 1016 1017 TP_STRUCT__entry( 1018 __field(unsigned int, task_id) 1019 __field(unsigned int, client_id) 1020 __field(u32, xid) 1021 __field(u32, min) 1022 __field(u32, max) 1023 ), 1024 1025 TP_fast_assign( 1026 __entry->task_id = rqst->rq_task->tk_pid; 1027 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1028 __entry->xid = be32_to_cpu(rqst->rq_xid); 1029 __entry->min = be32_to_cpup(min); 1030 __entry->max = be32_to_cpup(max); 1031 ), 1032 1033 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]", 1034 __entry->task_id, __entry->client_id, __entry->xid, 1035 __entry->min, __entry->max 1036 ) 1037 ); 1038 1039 TRACE_EVENT(xprtrdma_err_chunk, 1040 TP_PROTO( 1041 const struct rpc_rqst *rqst 1042 ), 1043 1044 TP_ARGS(rqst), 1045 1046 TP_STRUCT__entry( 1047 __field(unsigned int, task_id) 1048 __field(unsigned int, client_id) 1049 __field(u32, xid) 1050 ), 1051 1052 TP_fast_assign( 1053 __entry->task_id = rqst->rq_task->tk_pid; 1054 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1055 __entry->xid = be32_to_cpu(rqst->rq_xid); 1056 ), 1057 1058 TP_printk("task:%u@%u xid=0x%08x", 1059 __entry->task_id, __entry->client_id, __entry->xid 1060 ) 1061 ); 1062 1063 TRACE_EVENT(xprtrdma_err_unrecognized, 1064 TP_PROTO( 1065 const struct rpc_rqst *rqst, 1066 __be32 *procedure 1067 ), 1068 1069 TP_ARGS(rqst, procedure), 1070 1071 TP_STRUCT__entry( 1072 __field(unsigned int, task_id) 1073 __field(unsigned int, client_id) 1074 __field(u32, xid) 1075 __field(u32, procedure) 1076 ), 1077 1078 TP_fast_assign( 1079 __entry->task_id = rqst->rq_task->tk_pid; 1080 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1081 __entry->procedure = be32_to_cpup(procedure); 1082 ), 1083 1084 TP_printk("task:%u@%u xid=0x%08x procedure=%u", 1085 __entry->task_id, __entry->client_id, __entry->xid, 1086 __entry->procedure 1087 ) 1088 ); 1089 1090 TRACE_EVENT(xprtrdma_fixup, 1091 TP_PROTO( 1092 const struct rpc_rqst *rqst, 1093 unsigned long fixup 1094 ), 1095 1096 TP_ARGS(rqst, fixup), 1097 1098 TP_STRUCT__entry( 1099 __field(unsigned int, task_id) 1100 __field(unsigned int, client_id) 1101 __field(unsigned long, fixup) 1102 __field(size_t, headlen) 1103 __field(unsigned int, pagelen) 1104 __field(size_t, taillen) 1105 ), 1106 1107 TP_fast_assign( 1108 __entry->task_id = rqst->rq_task->tk_pid; 1109 __entry->client_id = rqst->rq_task->tk_client->cl_clid; 1110 __entry->fixup = fixup; 1111 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; 1112 __entry->pagelen = rqst->rq_rcv_buf.page_len; 1113 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; 1114 ), 1115 1116 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", 1117 __entry->task_id, __entry->client_id, __entry->fixup, 1118 __entry->headlen, __entry->pagelen, __entry->taillen 1119 ) 1120 ); 1121 1122 TRACE_EVENT(xprtrdma_decode_seg, 1123 TP_PROTO( 1124 u32 handle, 1125 u32 length, 1126 u64 offset 1127 ), 1128 1129 TP_ARGS(handle, length, offset), 1130 1131 TP_STRUCT__entry( 1132 __field(u32, handle) 1133 __field(u32, length) 1134 __field(u64, offset) 1135 ), 1136 1137 TP_fast_assign( 1138 __entry->handle = handle; 1139 __entry->length = length; 1140 __entry->offset = offset; 1141 ), 1142 1143 TP_printk("%u@0x%016llx:0x%08x", 1144 __entry->length, (unsigned long long)__entry->offset, 1145 __entry->handle 1146 ) 1147 ); 1148 1149 /** 1150 ** Callback events 1151 **/ 1152 1153 TRACE_EVENT(xprtrdma_cb_setup, 1154 TP_PROTO( 1155 const struct rpcrdma_xprt *r_xprt, 1156 unsigned int reqs 1157 ), 1158 1159 TP_ARGS(r_xprt, reqs), 1160 1161 TP_STRUCT__entry( 1162 __field(const void *, r_xprt) 1163 __field(unsigned int, reqs) 1164 __string(addr, rpcrdma_addrstr(r_xprt)) 1165 __string(port, rpcrdma_portstr(r_xprt)) 1166 ), 1167 1168 TP_fast_assign( 1169 __entry->r_xprt = r_xprt; 1170 __entry->reqs = reqs; 1171 __assign_str(addr, rpcrdma_addrstr(r_xprt)); 1172 __assign_str(port, rpcrdma_portstr(r_xprt)); 1173 ), 1174 1175 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", 1176 __get_str(addr), __get_str(port), 1177 __entry->r_xprt, __entry->reqs 1178 ) 1179 ); 1180 1181 DEFINE_CALLBACK_EVENT(call); 1182 DEFINE_CALLBACK_EVENT(reply); 1183 1184 /** 1185 ** Server-side RPC/RDMA events 1186 **/ 1187 1188 DECLARE_EVENT_CLASS(svcrdma_accept_class, 1189 TP_PROTO( 1190 const struct svcxprt_rdma *rdma, 1191 long status 1192 ), 1193 1194 TP_ARGS(rdma, status), 1195 1196 TP_STRUCT__entry( 1197 __field(long, status) 1198 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1199 ), 1200 1201 TP_fast_assign( 1202 __entry->status = status; 1203 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1204 ), 1205 1206 TP_printk("addr=%s status=%ld", 1207 __get_str(addr), __entry->status 1208 ) 1209 ); 1210 1211 #define DEFINE_ACCEPT_EVENT(name) \ 1212 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ 1213 TP_PROTO( \ 1214 const struct svcxprt_rdma *rdma, \ 1215 long status \ 1216 ), \ 1217 TP_ARGS(rdma, status)) 1218 1219 DEFINE_ACCEPT_EVENT(pd); 1220 DEFINE_ACCEPT_EVENT(qp); 1221 DEFINE_ACCEPT_EVENT(fabric); 1222 DEFINE_ACCEPT_EVENT(initdepth); 1223 DEFINE_ACCEPT_EVENT(accept); 1224 1225 TRACE_DEFINE_ENUM(RDMA_MSG); 1226 TRACE_DEFINE_ENUM(RDMA_NOMSG); 1227 TRACE_DEFINE_ENUM(RDMA_MSGP); 1228 TRACE_DEFINE_ENUM(RDMA_DONE); 1229 TRACE_DEFINE_ENUM(RDMA_ERROR); 1230 1231 #define show_rpcrdma_proc(x) \ 1232 __print_symbolic(x, \ 1233 { RDMA_MSG, "RDMA_MSG" }, \ 1234 { RDMA_NOMSG, "RDMA_NOMSG" }, \ 1235 { RDMA_MSGP, "RDMA_MSGP" }, \ 1236 { RDMA_DONE, "RDMA_DONE" }, \ 1237 { RDMA_ERROR, "RDMA_ERROR" }) 1238 1239 TRACE_EVENT(svcrdma_decode_rqst, 1240 TP_PROTO( 1241 const struct svc_rdma_recv_ctxt *ctxt, 1242 __be32 *p, 1243 unsigned int hdrlen 1244 ), 1245 1246 TP_ARGS(ctxt, p, hdrlen), 1247 1248 TP_STRUCT__entry( 1249 __field(u32, cq_id) 1250 __field(int, completion_id) 1251 __field(u32, xid) 1252 __field(u32, vers) 1253 __field(u32, proc) 1254 __field(u32, credits) 1255 __field(unsigned int, hdrlen) 1256 ), 1257 1258 TP_fast_assign( 1259 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1260 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1261 __entry->xid = be32_to_cpup(p++); 1262 __entry->vers = be32_to_cpup(p++); 1263 __entry->credits = be32_to_cpup(p++); 1264 __entry->proc = be32_to_cpup(p); 1265 __entry->hdrlen = hdrlen; 1266 ), 1267 1268 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", 1269 __entry->cq_id, __entry->completion_id, 1270 __entry->xid, __entry->vers, __entry->credits, 1271 show_rpcrdma_proc(__entry->proc), __entry->hdrlen) 1272 ); 1273 1274 TRACE_EVENT(svcrdma_decode_short_err, 1275 TP_PROTO( 1276 const struct svc_rdma_recv_ctxt *ctxt, 1277 unsigned int hdrlen 1278 ), 1279 1280 TP_ARGS(ctxt, hdrlen), 1281 1282 TP_STRUCT__entry( 1283 __field(u32, cq_id) 1284 __field(int, completion_id) 1285 __field(unsigned int, hdrlen) 1286 ), 1287 1288 TP_fast_assign( 1289 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1290 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1291 __entry->hdrlen = hdrlen; 1292 ), 1293 1294 TP_printk("cq.id=%u cid=%d hdrlen=%u", 1295 __entry->cq_id, __entry->completion_id, 1296 __entry->hdrlen) 1297 ); 1298 1299 DECLARE_EVENT_CLASS(svcrdma_badreq_event, 1300 TP_PROTO( 1301 const struct svc_rdma_recv_ctxt *ctxt, 1302 __be32 *p 1303 ), 1304 1305 TP_ARGS(ctxt, p), 1306 1307 TP_STRUCT__entry( 1308 __field(u32, cq_id) 1309 __field(int, completion_id) 1310 __field(u32, xid) 1311 __field(u32, vers) 1312 __field(u32, proc) 1313 __field(u32, credits) 1314 ), 1315 1316 TP_fast_assign( 1317 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1318 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1319 __entry->xid = be32_to_cpup(p++); 1320 __entry->vers = be32_to_cpup(p++); 1321 __entry->credits = be32_to_cpup(p++); 1322 __entry->proc = be32_to_cpup(p); 1323 ), 1324 1325 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", 1326 __entry->cq_id, __entry->completion_id, 1327 __entry->xid, __entry->vers, __entry->credits, __entry->proc) 1328 ); 1329 1330 #define DEFINE_BADREQ_EVENT(name) \ 1331 DEFINE_EVENT(svcrdma_badreq_event, \ 1332 svcrdma_decode_##name##_err, \ 1333 TP_PROTO( \ 1334 const struct svc_rdma_recv_ctxt *ctxt, \ 1335 __be32 *p \ 1336 ), \ 1337 TP_ARGS(ctxt, p)) 1338 1339 DEFINE_BADREQ_EVENT(badvers); 1340 DEFINE_BADREQ_EVENT(drop); 1341 DEFINE_BADREQ_EVENT(badproc); 1342 DEFINE_BADREQ_EVENT(parse); 1343 1344 DECLARE_EVENT_CLASS(svcrdma_segment_event, 1345 TP_PROTO( 1346 u32 handle, 1347 u32 length, 1348 u64 offset 1349 ), 1350 1351 TP_ARGS(handle, length, offset), 1352 1353 TP_STRUCT__entry( 1354 __field(u32, handle) 1355 __field(u32, length) 1356 __field(u64, offset) 1357 ), 1358 1359 TP_fast_assign( 1360 __entry->handle = handle; 1361 __entry->length = length; 1362 __entry->offset = offset; 1363 ), 1364 1365 TP_printk("%u@0x%016llx:0x%08x", 1366 __entry->length, (unsigned long long)__entry->offset, 1367 __entry->handle 1368 ) 1369 ); 1370 1371 #define DEFINE_SEGMENT_EVENT(name) \ 1372 DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ 1373 TP_PROTO( \ 1374 u32 handle, \ 1375 u32 length, \ 1376 u64 offset \ 1377 ), \ 1378 TP_ARGS(handle, length, offset)) 1379 1380 DEFINE_SEGMENT_EVENT(decode_wseg); 1381 DEFINE_SEGMENT_EVENT(encode_rseg); 1382 DEFINE_SEGMENT_EVENT(send_rseg); 1383 DEFINE_SEGMENT_EVENT(encode_wseg); 1384 DEFINE_SEGMENT_EVENT(send_wseg); 1385 1386 DECLARE_EVENT_CLASS(svcrdma_chunk_event, 1387 TP_PROTO( 1388 u32 length 1389 ), 1390 1391 TP_ARGS(length), 1392 1393 TP_STRUCT__entry( 1394 __field(u32, length) 1395 ), 1396 1397 TP_fast_assign( 1398 __entry->length = length; 1399 ), 1400 1401 TP_printk("length=%u", 1402 __entry->length 1403 ) 1404 ); 1405 1406 #define DEFINE_CHUNK_EVENT(name) \ 1407 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \ 1408 TP_PROTO( \ 1409 u32 length \ 1410 ), \ 1411 TP_ARGS(length)) 1412 1413 DEFINE_CHUNK_EVENT(send_pzr); 1414 DEFINE_CHUNK_EVENT(encode_write_chunk); 1415 DEFINE_CHUNK_EVENT(send_write_chunk); 1416 DEFINE_CHUNK_EVENT(encode_read_chunk); 1417 DEFINE_CHUNK_EVENT(send_reply_chunk); 1418 1419 TRACE_EVENT(svcrdma_send_read_chunk, 1420 TP_PROTO( 1421 u32 length, 1422 u32 position 1423 ), 1424 1425 TP_ARGS(length, position), 1426 1427 TP_STRUCT__entry( 1428 __field(u32, length) 1429 __field(u32, position) 1430 ), 1431 1432 TP_fast_assign( 1433 __entry->length = length; 1434 __entry->position = position; 1435 ), 1436 1437 TP_printk("length=%u position=%u", 1438 __entry->length, __entry->position 1439 ) 1440 ); 1441 1442 DECLARE_EVENT_CLASS(svcrdma_error_event, 1443 TP_PROTO( 1444 __be32 xid 1445 ), 1446 1447 TP_ARGS(xid), 1448 1449 TP_STRUCT__entry( 1450 __field(u32, xid) 1451 ), 1452 1453 TP_fast_assign( 1454 __entry->xid = be32_to_cpu(xid); 1455 ), 1456 1457 TP_printk("xid=0x%08x", 1458 __entry->xid 1459 ) 1460 ); 1461 1462 #define DEFINE_ERROR_EVENT(name) \ 1463 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \ 1464 TP_PROTO( \ 1465 __be32 xid \ 1466 ), \ 1467 TP_ARGS(xid)) 1468 1469 DEFINE_ERROR_EVENT(vers); 1470 DEFINE_ERROR_EVENT(chunk); 1471 1472 /** 1473 ** Server-side RDMA API events 1474 **/ 1475 1476 DECLARE_EVENT_CLASS(svcrdma_dma_map_class, 1477 TP_PROTO( 1478 const struct svcxprt_rdma *rdma, 1479 u64 dma_addr, 1480 u32 length 1481 ), 1482 1483 TP_ARGS(rdma, dma_addr, length), 1484 1485 TP_STRUCT__entry( 1486 __field(u64, dma_addr) 1487 __field(u32, length) 1488 __string(device, rdma->sc_cm_id->device->name) 1489 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1490 ), 1491 1492 TP_fast_assign( 1493 __entry->dma_addr = dma_addr; 1494 __entry->length = length; 1495 __assign_str(device, rdma->sc_cm_id->device->name); 1496 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1497 ), 1498 1499 TP_printk("addr=%s device=%s dma_addr=%llu length=%u", 1500 __get_str(addr), __get_str(device), 1501 __entry->dma_addr, __entry->length 1502 ) 1503 ); 1504 1505 #define DEFINE_SVC_DMA_EVENT(name) \ 1506 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ 1507 TP_PROTO( \ 1508 const struct svcxprt_rdma *rdma,\ 1509 u64 dma_addr, \ 1510 u32 length \ 1511 ), \ 1512 TP_ARGS(rdma, dma_addr, length)) 1513 1514 DEFINE_SVC_DMA_EVENT(dma_map_page); 1515 DEFINE_SVC_DMA_EVENT(dma_unmap_page); 1516 1517 TRACE_EVENT(svcrdma_dma_map_rw_err, 1518 TP_PROTO( 1519 const struct svcxprt_rdma *rdma, 1520 unsigned int nents, 1521 int status 1522 ), 1523 1524 TP_ARGS(rdma, nents, status), 1525 1526 TP_STRUCT__entry( 1527 __field(int, status) 1528 __field(unsigned int, nents) 1529 __string(device, rdma->sc_cm_id->device->name) 1530 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1531 ), 1532 1533 TP_fast_assign( 1534 __entry->status = status; 1535 __entry->nents = nents; 1536 __assign_str(device, rdma->sc_cm_id->device->name); 1537 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1538 ), 1539 1540 TP_printk("addr=%s device=%s nents=%u status=%d", 1541 __get_str(addr), __get_str(device), __entry->nents, 1542 __entry->status 1543 ) 1544 ); 1545 1546 TRACE_EVENT(svcrdma_no_rwctx_err, 1547 TP_PROTO( 1548 const struct svcxprt_rdma *rdma, 1549 unsigned int num_sges 1550 ), 1551 1552 TP_ARGS(rdma, num_sges), 1553 1554 TP_STRUCT__entry( 1555 __field(unsigned int, num_sges) 1556 __string(device, rdma->sc_cm_id->device->name) 1557 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1558 ), 1559 1560 TP_fast_assign( 1561 __entry->num_sges = num_sges; 1562 __assign_str(device, rdma->sc_cm_id->device->name); 1563 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1564 ), 1565 1566 TP_printk("addr=%s device=%s num_sges=%d", 1567 __get_str(addr), __get_str(device), __entry->num_sges 1568 ) 1569 ); 1570 1571 TRACE_EVENT(svcrdma_page_overrun_err, 1572 TP_PROTO( 1573 const struct svcxprt_rdma *rdma, 1574 const struct svc_rqst *rqst, 1575 unsigned int pageno 1576 ), 1577 1578 TP_ARGS(rdma, rqst, pageno), 1579 1580 TP_STRUCT__entry( 1581 __field(unsigned int, pageno) 1582 __field(u32, xid) 1583 __string(device, rdma->sc_cm_id->device->name) 1584 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1585 ), 1586 1587 TP_fast_assign( 1588 __entry->pageno = pageno; 1589 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1590 __assign_str(device, rdma->sc_cm_id->device->name); 1591 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1592 ), 1593 1594 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), 1595 __get_str(device), __entry->xid, __entry->pageno 1596 ) 1597 ); 1598 1599 TRACE_EVENT(svcrdma_small_wrch_err, 1600 TP_PROTO( 1601 const struct svcxprt_rdma *rdma, 1602 unsigned int remaining, 1603 unsigned int seg_no, 1604 unsigned int num_segs 1605 ), 1606 1607 TP_ARGS(rdma, remaining, seg_no, num_segs), 1608 1609 TP_STRUCT__entry( 1610 __field(unsigned int, remaining) 1611 __field(unsigned int, seg_no) 1612 __field(unsigned int, num_segs) 1613 __string(device, rdma->sc_cm_id->device->name) 1614 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1615 ), 1616 1617 TP_fast_assign( 1618 __entry->remaining = remaining; 1619 __entry->seg_no = seg_no; 1620 __entry->num_segs = num_segs; 1621 __assign_str(device, rdma->sc_cm_id->device->name); 1622 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1623 ), 1624 1625 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", 1626 __get_str(addr), __get_str(device), __entry->remaining, 1627 __entry->seg_no, __entry->num_segs 1628 ) 1629 ); 1630 1631 TRACE_EVENT(svcrdma_send_pullup, 1632 TP_PROTO( 1633 unsigned int len 1634 ), 1635 1636 TP_ARGS(len), 1637 1638 TP_STRUCT__entry( 1639 __field(unsigned int, len) 1640 ), 1641 1642 TP_fast_assign( 1643 __entry->len = len; 1644 ), 1645 1646 TP_printk("len=%u", __entry->len) 1647 ); 1648 1649 TRACE_EVENT(svcrdma_send_err, 1650 TP_PROTO( 1651 const struct svc_rqst *rqst, 1652 int status 1653 ), 1654 1655 TP_ARGS(rqst, status), 1656 1657 TP_STRUCT__entry( 1658 __field(int, status) 1659 __field(u32, xid) 1660 __string(addr, rqst->rq_xprt->xpt_remotebuf) 1661 ), 1662 1663 TP_fast_assign( 1664 __entry->status = status; 1665 __entry->xid = __be32_to_cpu(rqst->rq_xid); 1666 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); 1667 ), 1668 1669 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), 1670 __entry->xid, __entry->status 1671 ) 1672 ); 1673 1674 TRACE_EVENT(svcrdma_post_send, 1675 TP_PROTO( 1676 const struct svc_rdma_send_ctxt *ctxt 1677 ), 1678 1679 TP_ARGS(ctxt), 1680 1681 TP_STRUCT__entry( 1682 __field(u32, cq_id) 1683 __field(int, completion_id) 1684 __field(unsigned int, num_sge) 1685 __field(u32, inv_rkey) 1686 ), 1687 1688 TP_fast_assign( 1689 const struct ib_send_wr *wr = &ctxt->sc_send_wr; 1690 1691 __entry->cq_id = ctxt->sc_cid.ci_queue_id; 1692 __entry->completion_id = ctxt->sc_cid.ci_completion_id; 1693 __entry->num_sge = wr->num_sge; 1694 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? 1695 wr->ex.invalidate_rkey : 0; 1696 ), 1697 1698 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", 1699 __entry->cq_id, __entry->completion_id, 1700 __entry->num_sge, __entry->inv_rkey 1701 ) 1702 ); 1703 1704 DEFINE_COMPLETION_EVENT(svcrdma_wc_send); 1705 1706 TRACE_EVENT(svcrdma_post_recv, 1707 TP_PROTO( 1708 const struct svc_rdma_recv_ctxt *ctxt 1709 ), 1710 1711 TP_ARGS(ctxt), 1712 1713 TP_STRUCT__entry( 1714 __field(u32, cq_id) 1715 __field(int, completion_id) 1716 ), 1717 1718 TP_fast_assign( 1719 __entry->cq_id = ctxt->rc_cid.ci_queue_id; 1720 __entry->completion_id = ctxt->rc_cid.ci_completion_id; 1721 ), 1722 1723 TP_printk("cq.id=%d cid=%d", 1724 __entry->cq_id, __entry->completion_id 1725 ) 1726 ); 1727 1728 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); 1729 1730 TRACE_EVENT(svcrdma_rq_post_err, 1731 TP_PROTO( 1732 const struct svcxprt_rdma *rdma, 1733 int status 1734 ), 1735 1736 TP_ARGS(rdma, status), 1737 1738 TP_STRUCT__entry( 1739 __field(int, status) 1740 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1741 ), 1742 1743 TP_fast_assign( 1744 __entry->status = status; 1745 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1746 ), 1747 1748 TP_printk("addr=%s status=%d", 1749 __get_str(addr), __entry->status 1750 ) 1751 ); 1752 1753 TRACE_EVENT(svcrdma_post_chunk, 1754 TP_PROTO( 1755 const struct rpc_rdma_cid *cid, 1756 int sqecount 1757 ), 1758 1759 TP_ARGS(cid, sqecount), 1760 1761 TP_STRUCT__entry( 1762 __field(u32, cq_id) 1763 __field(int, completion_id) 1764 __field(int, sqecount) 1765 ), 1766 1767 TP_fast_assign( 1768 __entry->cq_id = cid->ci_queue_id; 1769 __entry->completion_id = cid->ci_completion_id; 1770 __entry->sqecount = sqecount; 1771 ), 1772 1773 TP_printk("cq.id=%u cid=%d sqecount=%d", 1774 __entry->cq_id, __entry->completion_id, 1775 __entry->sqecount 1776 ) 1777 ); 1778 1779 DEFINE_COMPLETION_EVENT(svcrdma_wc_read); 1780 DEFINE_COMPLETION_EVENT(svcrdma_wc_write); 1781 1782 TRACE_EVENT(svcrdma_qp_error, 1783 TP_PROTO( 1784 const struct ib_event *event, 1785 const struct sockaddr *sap 1786 ), 1787 1788 TP_ARGS(event, sap), 1789 1790 TP_STRUCT__entry( 1791 __field(unsigned int, event) 1792 __string(device, event->device->name) 1793 __array(__u8, addr, INET6_ADDRSTRLEN + 10) 1794 ), 1795 1796 TP_fast_assign( 1797 __entry->event = event->event; 1798 __assign_str(device, event->device->name); 1799 snprintf(__entry->addr, sizeof(__entry->addr) - 1, 1800 "%pISpc", sap); 1801 ), 1802 1803 TP_printk("addr=%s dev=%s event=%s (%u)", 1804 __entry->addr, __get_str(device), 1805 rdma_show_ib_event(__entry->event), __entry->event 1806 ) 1807 ); 1808 1809 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, 1810 TP_PROTO( 1811 const struct svcxprt_rdma *rdma 1812 ), 1813 1814 TP_ARGS(rdma), 1815 1816 TP_STRUCT__entry( 1817 __field(int, avail) 1818 __field(int, depth) 1819 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1820 ), 1821 1822 TP_fast_assign( 1823 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1824 __entry->depth = rdma->sc_sq_depth; 1825 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1826 ), 1827 1828 TP_printk("addr=%s sc_sq_avail=%d/%d", 1829 __get_str(addr), __entry->avail, __entry->depth 1830 ) 1831 ); 1832 1833 #define DEFINE_SQ_EVENT(name) \ 1834 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\ 1835 TP_PROTO( \ 1836 const struct svcxprt_rdma *rdma \ 1837 ), \ 1838 TP_ARGS(rdma)) 1839 1840 DEFINE_SQ_EVENT(full); 1841 DEFINE_SQ_EVENT(retry); 1842 1843 TRACE_EVENT(svcrdma_sq_post_err, 1844 TP_PROTO( 1845 const struct svcxprt_rdma *rdma, 1846 int status 1847 ), 1848 1849 TP_ARGS(rdma, status), 1850 1851 TP_STRUCT__entry( 1852 __field(int, avail) 1853 __field(int, depth) 1854 __field(int, status) 1855 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1856 ), 1857 1858 TP_fast_assign( 1859 __entry->avail = atomic_read(&rdma->sc_sq_avail); 1860 __entry->depth = rdma->sc_sq_depth; 1861 __entry->status = status; 1862 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1863 ), 1864 1865 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", 1866 __get_str(addr), __entry->avail, __entry->depth, 1867 __entry->status 1868 ) 1869 ); 1870 1871 #endif /* _TRACE_RPCRDMA_H */ 1872 1873 #include <trace/define_trace.h> 1874