1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. 7 */ 8 #ifndef CM_MSGS_H 9 #define CM_MSGS_H 10 11 #include <rdma/ib_mad.h> 12 #include <rdma/ib_cm.h> 13 14 /* 15 * Parameters to routines below should be in network-byte order, and values 16 * are returned in network-byte order. 17 */ 18 19 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 20 21 struct cm_req_msg { 22 struct ib_mad_hdr hdr; 23 24 __be32 local_comm_id; 25 __be32 rsvd4; 26 __be64 service_id; 27 __be64 local_ca_guid; 28 __be32 rsvd24; 29 __be32 local_qkey; 30 /* local QPN:24, responder resources:8 */ 31 __be32 offset32; 32 /* local EECN:24, initiator depth:8 */ 33 __be32 offset36; 34 /* 35 * remote EECN:24, remote CM response timeout:5, 36 * transport service type:2, end-to-end flow control:1 37 */ 38 __be32 offset40; 39 /* starting PSN:24, local CM response timeout:5, retry count:3 */ 40 __be32 offset44; 41 __be16 pkey; 42 /* path MTU:4, RDC exists:1, RNR retry count:3. */ 43 u8 offset50; 44 /* max CM Retries:4, SRQ:1, extended transport type:3 */ 45 u8 offset51; 46 47 __be16 primary_local_lid; 48 __be16 primary_remote_lid; 49 union ib_gid primary_local_gid; 50 union ib_gid primary_remote_gid; 51 /* flow label:20, rsvd:6, packet rate:6 */ 52 __be32 primary_offset88; 53 u8 primary_traffic_class; 54 u8 primary_hop_limit; 55 /* SL:4, subnet local:1, rsvd:3 */ 56 u8 primary_offset94; 57 /* local ACK timeout:5, rsvd:3 */ 58 u8 primary_offset95; 59 60 __be16 alt_local_lid; 61 __be16 alt_remote_lid; 62 union ib_gid alt_local_gid; 63 union ib_gid alt_remote_gid; 64 /* flow label:20, rsvd:6, packet rate:6 */ 65 __be32 alt_offset132; 66 u8 alt_traffic_class; 67 u8 alt_hop_limit; 68 /* SL:4, subnet local:1, rsvd:3 */ 69 u8 alt_offset138; 70 /* local ACK timeout:5, rsvd:3 */ 71 u8 alt_offset139; 72 73 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; 74 75 } __packed; 76 77 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 78 { 79 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); 80 } 81 82 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) 83 { 84 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 85 (be32_to_cpu(req_msg->offset32) & 86 0x000000FF)); 87 } 88 89 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg) 90 { 91 return (u8) be32_to_cpu(req_msg->offset32); 92 } 93 94 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res) 95 { 96 req_msg->offset32 = cpu_to_be32(resp_res | 97 (be32_to_cpu(req_msg->offset32) & 98 0xFFFFFF00)); 99 } 100 101 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg) 102 { 103 return (u8) be32_to_cpu(req_msg->offset36); 104 } 105 106 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg, 107 u8 init_depth) 108 { 109 req_msg->offset36 = cpu_to_be32(init_depth | 110 (be32_to_cpu(req_msg->offset36) & 111 0xFFFFFF00)); 112 } 113 114 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg) 115 { 116 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3); 117 } 118 119 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg, 120 u8 resp_timeout) 121 { 122 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) | 123 (be32_to_cpu(req_msg->offset40) & 124 0xFFFFFF07)); 125 } 126 127 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) 128 { 129 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1; 130 switch(transport_type) { 131 case 0: return IB_QPT_RC; 132 case 1: return IB_QPT_UC; 133 case 3: 134 switch (req_msg->offset51 & 0x7) { 135 case 1: return IB_QPT_XRC_TGT; 136 default: return 0; 137 } 138 default: return 0; 139 } 140 } 141 142 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, 143 enum ib_qp_type qp_type) 144 { 145 switch(qp_type) { 146 case IB_QPT_UC: 147 req_msg->offset40 = cpu_to_be32((be32_to_cpu( 148 req_msg->offset40) & 149 0xFFFFFFF9) | 0x2); 150 break; 151 case IB_QPT_XRC_INI: 152 req_msg->offset40 = cpu_to_be32((be32_to_cpu( 153 req_msg->offset40) & 154 0xFFFFFFF9) | 0x6); 155 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1; 156 break; 157 default: 158 req_msg->offset40 = cpu_to_be32(be32_to_cpu( 159 req_msg->offset40) & 160 0xFFFFFFF9); 161 } 162 } 163 164 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg) 165 { 166 return be32_to_cpu(req_msg->offset40) & 0x1; 167 } 168 169 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg, 170 u8 flow_ctrl) 171 { 172 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) | 173 (be32_to_cpu(req_msg->offset40) & 174 0xFFFFFFFE)); 175 } 176 177 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) 178 { 179 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); 180 } 181 182 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, 183 __be32 starting_psn) 184 { 185 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 186 (be32_to_cpu(req_msg->offset44) & 0x000000FF)); 187 } 188 189 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg) 190 { 191 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3); 192 } 193 194 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg, 195 u8 resp_timeout) 196 { 197 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) | 198 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07)); 199 } 200 201 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg) 202 { 203 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7); 204 } 205 206 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg, 207 u8 retry_count) 208 { 209 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) | 210 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8)); 211 } 212 213 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg) 214 { 215 return req_msg->offset50 >> 4; 216 } 217 218 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu) 219 { 220 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4)); 221 } 222 223 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg) 224 { 225 return req_msg->offset50 & 0x7; 226 } 227 228 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg, 229 u8 rnr_retry_count) 230 { 231 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) | 232 (rnr_retry_count & 0x7)); 233 } 234 235 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg) 236 { 237 return req_msg->offset51 >> 4; 238 } 239 240 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg, 241 u8 retries) 242 { 243 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4)); 244 } 245 246 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg) 247 { 248 return (req_msg->offset51 & 0x8) >> 3; 249 } 250 251 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq) 252 { 253 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) | 254 ((srq & 0x1) << 3)); 255 } 256 257 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) 258 { 259 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12); 260 } 261 262 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, 263 __be32 flow_label) 264 { 265 req_msg->primary_offset88 = cpu_to_be32( 266 (be32_to_cpu(req_msg->primary_offset88) & 267 0x00000FFF) | 268 (be32_to_cpu(flow_label) << 12)); 269 } 270 271 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg) 272 { 273 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F); 274 } 275 276 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg, 277 u8 rate) 278 { 279 req_msg->primary_offset88 = cpu_to_be32( 280 (be32_to_cpu(req_msg->primary_offset88) & 281 0xFFFFFFC0) | (rate & 0x3F)); 282 } 283 284 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg) 285 { 286 return (u8) (req_msg->primary_offset94 >> 4); 287 } 288 289 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl) 290 { 291 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) | 292 (sl << 4)); 293 } 294 295 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg) 296 { 297 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3); 298 } 299 300 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg, 301 u8 subnet_local) 302 { 303 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) | 304 ((subnet_local & 0x1) << 3)); 305 } 306 307 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg) 308 { 309 return (u8) (req_msg->primary_offset95 >> 3); 310 } 311 312 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg, 313 u8 local_ack_timeout) 314 { 315 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) | 316 (local_ack_timeout << 3)); 317 } 318 319 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) 320 { 321 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12); 322 } 323 324 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, 325 __be32 flow_label) 326 { 327 req_msg->alt_offset132 = cpu_to_be32( 328 (be32_to_cpu(req_msg->alt_offset132) & 329 0x00000FFF) | 330 (be32_to_cpu(flow_label) << 12)); 331 } 332 333 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg) 334 { 335 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F); 336 } 337 338 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg, 339 u8 rate) 340 { 341 req_msg->alt_offset132 = cpu_to_be32( 342 (be32_to_cpu(req_msg->alt_offset132) & 343 0xFFFFFFC0) | (rate & 0x3F)); 344 } 345 346 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg) 347 { 348 return (u8) (req_msg->alt_offset138 >> 4); 349 } 350 351 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl) 352 { 353 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) | 354 (sl << 4)); 355 } 356 357 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg) 358 { 359 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3); 360 } 361 362 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg, 363 u8 subnet_local) 364 { 365 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) | 366 ((subnet_local & 0x1) << 3)); 367 } 368 369 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg) 370 { 371 return (u8) (req_msg->alt_offset139 >> 3); 372 } 373 374 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg, 375 u8 local_ack_timeout) 376 { 377 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) | 378 (local_ack_timeout << 3)); 379 } 380 381 /* Message REJected or MRAed */ 382 enum cm_msg_response { 383 CM_MSG_RESPONSE_REQ = 0x0, 384 CM_MSG_RESPONSE_REP = 0x1, 385 CM_MSG_RESPONSE_OTHER = 0x2 386 }; 387 388 struct cm_mra_msg { 389 struct ib_mad_hdr hdr; 390 391 __be32 local_comm_id; 392 __be32 remote_comm_id; 393 /* message MRAed:2, rsvd:6 */ 394 u8 offset8; 395 /* service timeout:5, rsvd:3 */ 396 u8 offset9; 397 398 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE]; 399 400 } __packed; 401 402 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg) 403 { 404 return (u8) (mra_msg->offset8 >> 6); 405 } 406 407 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg) 408 { 409 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6)); 410 } 411 412 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg) 413 { 414 return (u8) (mra_msg->offset9 >> 3); 415 } 416 417 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg, 418 u8 service_timeout) 419 { 420 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) | 421 (service_timeout << 3)); 422 } 423 424 struct cm_rej_msg { 425 struct ib_mad_hdr hdr; 426 427 __be32 local_comm_id; 428 __be32 remote_comm_id; 429 /* message REJected:2, rsvd:6 */ 430 u8 offset8; 431 /* reject info length:7, rsvd:1. */ 432 u8 offset9; 433 __be16 reason; 434 u8 ari[IB_CM_REJ_ARI_LENGTH]; 435 436 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; 437 438 } __packed; 439 440 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg) 441 { 442 return (u8) (rej_msg->offset8 >> 6); 443 } 444 445 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg) 446 { 447 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6)); 448 } 449 450 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg) 451 { 452 return (u8) (rej_msg->offset9 >> 1); 453 } 454 455 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg, 456 u8 len) 457 { 458 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1)); 459 } 460 461 struct cm_rep_msg { 462 struct ib_mad_hdr hdr; 463 464 __be32 local_comm_id; 465 __be32 remote_comm_id; 466 __be32 local_qkey; 467 /* local QPN:24, rsvd:8 */ 468 __be32 offset12; 469 /* local EECN:24, rsvd:8 */ 470 __be32 offset16; 471 /* starting PSN:24 rsvd:8 */ 472 __be32 offset20; 473 u8 resp_resources; 474 u8 initiator_depth; 475 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ 476 u8 offset26; 477 /* RNR retry count:3, SRQ:1, rsvd:5 */ 478 u8 offset27; 479 __be64 local_ca_guid; 480 481 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; 482 483 } __packed; 484 485 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 486 { 487 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); 488 } 489 490 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) 491 { 492 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 493 (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); 494 } 495 496 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg) 497 { 498 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8); 499 } 500 501 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn) 502 { 503 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) | 504 (be32_to_cpu(rep_msg->offset16) & 0x000000FF)); 505 } 506 507 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type) 508 { 509 return (qp_type == IB_QPT_XRC_INI) ? 510 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg); 511 } 512 513 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) 514 { 515 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); 516 } 517 518 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, 519 __be32 starting_psn) 520 { 521 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 522 (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); 523 } 524 525 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg) 526 { 527 return (u8) (rep_msg->offset26 >> 3); 528 } 529 530 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg, 531 u8 target_ack_delay) 532 { 533 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) | 534 (target_ack_delay << 3)); 535 } 536 537 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg) 538 { 539 return (u8) ((rep_msg->offset26 & 0x06) >> 1); 540 } 541 542 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover) 543 { 544 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) | 545 ((failover & 0x3) << 1)); 546 } 547 548 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg) 549 { 550 return (u8) (rep_msg->offset26 & 0x01); 551 } 552 553 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg, 554 u8 flow_ctrl) 555 { 556 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) | 557 (flow_ctrl & 0x1)); 558 } 559 560 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg) 561 { 562 return (u8) (rep_msg->offset27 >> 5); 563 } 564 565 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg, 566 u8 rnr_retry_count) 567 { 568 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) | 569 (rnr_retry_count << 5)); 570 } 571 572 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg) 573 { 574 return (u8) ((rep_msg->offset27 >> 4) & 0x1); 575 } 576 577 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq) 578 { 579 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) | 580 ((srq & 0x1) << 4)); 581 } 582 583 struct cm_rtu_msg { 584 struct ib_mad_hdr hdr; 585 586 __be32 local_comm_id; 587 __be32 remote_comm_id; 588 589 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; 590 591 } __packed; 592 593 struct cm_dreq_msg { 594 struct ib_mad_hdr hdr; 595 596 __be32 local_comm_id; 597 __be32 remote_comm_id; 598 /* remote QPN/EECN:24, rsvd:8 */ 599 __be32 offset8; 600 601 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; 602 603 } __packed; 604 605 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 606 { 607 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); 608 } 609 610 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) 611 { 612 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 613 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); 614 } 615 616 struct cm_drep_msg { 617 struct ib_mad_hdr hdr; 618 619 __be32 local_comm_id; 620 __be32 remote_comm_id; 621 622 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; 623 624 } __packed; 625 626 struct cm_lap_msg { 627 struct ib_mad_hdr hdr; 628 629 __be32 local_comm_id; 630 __be32 remote_comm_id; 631 632 __be32 rsvd8; 633 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ 634 __be32 offset12; 635 __be32 rsvd16; 636 637 __be16 alt_local_lid; 638 __be16 alt_remote_lid; 639 union ib_gid alt_local_gid; 640 union ib_gid alt_remote_gid; 641 /* flow label:20, rsvd:4, traffic class:8 */ 642 __be32 offset56; 643 u8 alt_hop_limit; 644 /* rsvd:2, packet rate:6 */ 645 u8 offset61; 646 /* SL:4, subnet local:1, rsvd:3 */ 647 u8 offset62; 648 /* local ACK timeout:5, rsvd:3 */ 649 u8 offset63; 650 651 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; 652 } __packed; 653 654 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 655 { 656 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); 657 } 658 659 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) 660 { 661 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 662 (be32_to_cpu(lap_msg->offset12) & 663 0x000000FF)); 664 } 665 666 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg) 667 { 668 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3); 669 } 670 671 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg, 672 u8 resp_timeout) 673 { 674 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) | 675 (be32_to_cpu(lap_msg->offset12) & 676 0xFFFFFF07)); 677 } 678 679 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) 680 { 681 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12); 682 } 683 684 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, 685 __be32 flow_label) 686 { 687 lap_msg->offset56 = cpu_to_be32( 688 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) | 689 (be32_to_cpu(flow_label) << 12)); 690 } 691 692 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) 693 { 694 return (u8) be32_to_cpu(lap_msg->offset56); 695 } 696 697 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg, 698 u8 traffic_class) 699 { 700 lap_msg->offset56 = cpu_to_be32(traffic_class | 701 (be32_to_cpu(lap_msg->offset56) & 702 0xFFFFFF00)); 703 } 704 705 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg) 706 { 707 return lap_msg->offset61 & 0x3F; 708 } 709 710 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg, 711 u8 packet_rate) 712 { 713 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0); 714 } 715 716 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg) 717 { 718 return lap_msg->offset62 >> 4; 719 } 720 721 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl) 722 { 723 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F); 724 } 725 726 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg) 727 { 728 return (lap_msg->offset62 >> 3) & 0x1; 729 } 730 731 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg, 732 u8 subnet_local) 733 { 734 lap_msg->offset62 = ((subnet_local & 0x1) << 3) | 735 (lap_msg->offset61 & 0xF7); 736 } 737 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg) 738 { 739 return lap_msg->offset63 >> 3; 740 } 741 742 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg, 743 u8 local_ack_timeout) 744 { 745 lap_msg->offset63 = (local_ack_timeout << 3) | 746 (lap_msg->offset63 & 0x07); 747 } 748 749 struct cm_apr_msg { 750 struct ib_mad_hdr hdr; 751 752 __be32 local_comm_id; 753 __be32 remote_comm_id; 754 755 u8 info_length; 756 u8 ap_status; 757 __be16 rsvd; 758 u8 info[IB_CM_APR_INFO_LENGTH]; 759 760 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE]; 761 } __packed; 762 763 struct cm_sidr_req_msg { 764 struct ib_mad_hdr hdr; 765 766 __be32 request_id; 767 __be16 pkey; 768 __be16 rsvd; 769 __be64 service_id; 770 771 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; 772 } __packed; 773 774 struct cm_sidr_rep_msg { 775 struct ib_mad_hdr hdr; 776 777 __be32 request_id; 778 u8 status; 779 u8 info_length; 780 __be16 rsvd; 781 /* QPN:24, rsvd:8 */ 782 __be32 offset8; 783 __be64 service_id; 784 __be32 qkey; 785 u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; 786 787 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; 788 } __packed; 789 790 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 791 { 792 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); 793 } 794 795 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, 796 __be32 qpn) 797 { 798 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 799 (be32_to_cpu(sidr_rep_msg->offset8) & 800 0x000000FF)); 801 } 802 803 #endif /* CM_MSGS_H */ 804