1 /* 2 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING the madirectory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use source and binary forms, with or 13 * withmodification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retathe above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE 32 * SOFTWARE. 33 */ 34 #if !defined(CM_MSGS_H) 35 #define CM_MSGS_H 36 37 #include <rdma/ib_mad.h> 38 #include <rdma/ib_cm.h> 39 40 /* 41 * Parameters to routines below should be in network-byte order, and values 42 * are returned in network-byte order. 43 */ 44 45 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 46 47 struct cm_req_msg { 48 struct ib_mad_hdr hdr; 49 50 __be32 local_comm_id; 51 __be32 rsvd4; 52 __be64 service_id; 53 __be64 local_ca_guid; 54 __be32 rsvd24; 55 __be32 local_qkey; 56 /* local QPN:24, responder resources:8 */ 57 __be32 offset32; 58 /* local EECN:24, initiator depth:8 */ 59 __be32 offset36; 60 /* 61 * remote EECN:24, remote CM response timeout:5, 62 * transport service type:2, end-to-end flow control:1 63 */ 64 __be32 offset40; 65 /* starting PSN:24, local CM response timeout:5, retry count:3 */ 66 __be32 offset44; 67 __be16 pkey; 68 /* path MTU:4, RDC exists:1, RNR retry count:3. */ 69 u8 offset50; 70 /* max CM Retries:4, SRQ:1, extended transport type:3 */ 71 u8 offset51; 72 73 __be16 primary_local_lid; 74 __be16 primary_remote_lid; 75 union ib_gid primary_local_gid; 76 union ib_gid primary_remote_gid; 77 /* flow label:20, rsvd:6, packet rate:6 */ 78 __be32 primary_offset88; 79 u8 primary_traffic_class; 80 u8 primary_hop_limit; 81 /* SL:4, subnet local:1, rsvd:3 */ 82 u8 primary_offset94; 83 /* local ACK timeout:5, rsvd:3 */ 84 u8 primary_offset95; 85 86 __be16 alt_local_lid; 87 __be16 alt_remote_lid; 88 union ib_gid alt_local_gid; 89 union ib_gid alt_remote_gid; 90 /* flow label:20, rsvd:6, packet rate:6 */ 91 __be32 alt_offset132; 92 u8 alt_traffic_class; 93 u8 alt_hop_limit; 94 /* SL:4, subnet local:1, rsvd:3 */ 95 u8 alt_offset138; 96 /* local ACK timeout:5, rsvd:3 */ 97 u8 alt_offset139; 98 99 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; 100 101 } __packed; 102 103 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 104 { 105 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); 106 } 107 108 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) 109 { 110 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 111 (be32_to_cpu(req_msg->offset32) & 112 0x000000FF)); 113 } 114 115 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg) 116 { 117 return (u8) be32_to_cpu(req_msg->offset32); 118 } 119 120 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res) 121 { 122 req_msg->offset32 = cpu_to_be32(resp_res | 123 (be32_to_cpu(req_msg->offset32) & 124 0xFFFFFF00)); 125 } 126 127 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg) 128 { 129 return (u8) be32_to_cpu(req_msg->offset36); 130 } 131 132 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg, 133 u8 init_depth) 134 { 135 req_msg->offset36 = cpu_to_be32(init_depth | 136 (be32_to_cpu(req_msg->offset36) & 137 0xFFFFFF00)); 138 } 139 140 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg) 141 { 142 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3); 143 } 144 145 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg, 146 u8 resp_timeout) 147 { 148 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) | 149 (be32_to_cpu(req_msg->offset40) & 150 0xFFFFFF07)); 151 } 152 153 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) 154 { 155 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1; 156 switch(transport_type) { 157 case 0: return IB_QPT_RC; 158 case 1: return IB_QPT_UC; 159 case 3: 160 switch (req_msg->offset51 & 0x7) { 161 case 1: return IB_QPT_XRC_TGT; 162 default: return 0; 163 } 164 default: return 0; 165 } 166 } 167 168 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, 169 enum ib_qp_type qp_type) 170 { 171 switch(qp_type) { 172 case IB_QPT_UC: 173 req_msg->offset40 = cpu_to_be32((be32_to_cpu( 174 req_msg->offset40) & 175 0xFFFFFFF9) | 0x2); 176 break; 177 case IB_QPT_XRC_INI: 178 req_msg->offset40 = cpu_to_be32((be32_to_cpu( 179 req_msg->offset40) & 180 0xFFFFFFF9) | 0x6); 181 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1; 182 break; 183 default: 184 req_msg->offset40 = cpu_to_be32(be32_to_cpu( 185 req_msg->offset40) & 186 0xFFFFFFF9); 187 } 188 } 189 190 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg) 191 { 192 return be32_to_cpu(req_msg->offset40) & 0x1; 193 } 194 195 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg, 196 u8 flow_ctrl) 197 { 198 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) | 199 (be32_to_cpu(req_msg->offset40) & 200 0xFFFFFFFE)); 201 } 202 203 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) 204 { 205 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); 206 } 207 208 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, 209 __be32 starting_psn) 210 { 211 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 212 (be32_to_cpu(req_msg->offset44) & 0x000000FF)); 213 } 214 215 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg) 216 { 217 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3); 218 } 219 220 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg, 221 u8 resp_timeout) 222 { 223 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) | 224 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07)); 225 } 226 227 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg) 228 { 229 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7); 230 } 231 232 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg, 233 u8 retry_count) 234 { 235 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) | 236 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8)); 237 } 238 239 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg) 240 { 241 return req_msg->offset50 >> 4; 242 } 243 244 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu) 245 { 246 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4)); 247 } 248 249 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg) 250 { 251 return req_msg->offset50 & 0x7; 252 } 253 254 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg, 255 u8 rnr_retry_count) 256 { 257 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) | 258 (rnr_retry_count & 0x7)); 259 } 260 261 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg) 262 { 263 return req_msg->offset51 >> 4; 264 } 265 266 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg, 267 u8 retries) 268 { 269 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4)); 270 } 271 272 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg) 273 { 274 return (req_msg->offset51 & 0x8) >> 3; 275 } 276 277 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq) 278 { 279 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) | 280 ((srq & 0x1) << 3)); 281 } 282 283 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) 284 { 285 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12); 286 } 287 288 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, 289 __be32 flow_label) 290 { 291 req_msg->primary_offset88 = cpu_to_be32( 292 (be32_to_cpu(req_msg->primary_offset88) & 293 0x00000FFF) | 294 (be32_to_cpu(flow_label) << 12)); 295 } 296 297 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg) 298 { 299 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F); 300 } 301 302 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg, 303 u8 rate) 304 { 305 req_msg->primary_offset88 = cpu_to_be32( 306 (be32_to_cpu(req_msg->primary_offset88) & 307 0xFFFFFFC0) | (rate & 0x3F)); 308 } 309 310 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg) 311 { 312 return (u8) (req_msg->primary_offset94 >> 4); 313 } 314 315 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl) 316 { 317 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) | 318 (sl << 4)); 319 } 320 321 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg) 322 { 323 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3); 324 } 325 326 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg, 327 u8 subnet_local) 328 { 329 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) | 330 ((subnet_local & 0x1) << 3)); 331 } 332 333 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg) 334 { 335 return (u8) (req_msg->primary_offset95 >> 3); 336 } 337 338 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg, 339 u8 local_ack_timeout) 340 { 341 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) | 342 (local_ack_timeout << 3)); 343 } 344 345 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) 346 { 347 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12); 348 } 349 350 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, 351 __be32 flow_label) 352 { 353 req_msg->alt_offset132 = cpu_to_be32( 354 (be32_to_cpu(req_msg->alt_offset132) & 355 0x00000FFF) | 356 (be32_to_cpu(flow_label) << 12)); 357 } 358 359 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg) 360 { 361 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F); 362 } 363 364 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg, 365 u8 rate) 366 { 367 req_msg->alt_offset132 = cpu_to_be32( 368 (be32_to_cpu(req_msg->alt_offset132) & 369 0xFFFFFFC0) | (rate & 0x3F)); 370 } 371 372 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg) 373 { 374 return (u8) (req_msg->alt_offset138 >> 4); 375 } 376 377 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl) 378 { 379 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) | 380 (sl << 4)); 381 } 382 383 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg) 384 { 385 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3); 386 } 387 388 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg, 389 u8 subnet_local) 390 { 391 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) | 392 ((subnet_local & 0x1) << 3)); 393 } 394 395 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg) 396 { 397 return (u8) (req_msg->alt_offset139 >> 3); 398 } 399 400 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg, 401 u8 local_ack_timeout) 402 { 403 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) | 404 (local_ack_timeout << 3)); 405 } 406 407 /* Message REJected or MRAed */ 408 enum cm_msg_response { 409 CM_MSG_RESPONSE_REQ = 0x0, 410 CM_MSG_RESPONSE_REP = 0x1, 411 CM_MSG_RESPONSE_OTHER = 0x2 412 }; 413 414 struct cm_mra_msg { 415 struct ib_mad_hdr hdr; 416 417 __be32 local_comm_id; 418 __be32 remote_comm_id; 419 /* message MRAed:2, rsvd:6 */ 420 u8 offset8; 421 /* service timeout:5, rsvd:3 */ 422 u8 offset9; 423 424 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE]; 425 426 } __packed; 427 428 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg) 429 { 430 return (u8) (mra_msg->offset8 >> 6); 431 } 432 433 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg) 434 { 435 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6)); 436 } 437 438 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg) 439 { 440 return (u8) (mra_msg->offset9 >> 3); 441 } 442 443 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg, 444 u8 service_timeout) 445 { 446 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) | 447 (service_timeout << 3)); 448 } 449 450 struct cm_rej_msg { 451 struct ib_mad_hdr hdr; 452 453 __be32 local_comm_id; 454 __be32 remote_comm_id; 455 /* message REJected:2, rsvd:6 */ 456 u8 offset8; 457 /* reject info length:7, rsvd:1. */ 458 u8 offset9; 459 __be16 reason; 460 u8 ari[IB_CM_REJ_ARI_LENGTH]; 461 462 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; 463 464 } __packed; 465 466 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg) 467 { 468 return (u8) (rej_msg->offset8 >> 6); 469 } 470 471 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg) 472 { 473 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6)); 474 } 475 476 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg) 477 { 478 return (u8) (rej_msg->offset9 >> 1); 479 } 480 481 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg, 482 u8 len) 483 { 484 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1)); 485 } 486 487 struct cm_rep_msg { 488 struct ib_mad_hdr hdr; 489 490 __be32 local_comm_id; 491 __be32 remote_comm_id; 492 __be32 local_qkey; 493 /* local QPN:24, rsvd:8 */ 494 __be32 offset12; 495 /* local EECN:24, rsvd:8 */ 496 __be32 offset16; 497 /* starting PSN:24 rsvd:8 */ 498 __be32 offset20; 499 u8 resp_resources; 500 u8 initiator_depth; 501 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ 502 u8 offset26; 503 /* RNR retry count:3, SRQ:1, rsvd:5 */ 504 u8 offset27; 505 __be64 local_ca_guid; 506 507 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; 508 509 } __packed; 510 511 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 512 { 513 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); 514 } 515 516 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) 517 { 518 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 519 (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); 520 } 521 522 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg) 523 { 524 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8); 525 } 526 527 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn) 528 { 529 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) | 530 (be32_to_cpu(rep_msg->offset16) & 0x000000FF)); 531 } 532 533 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type) 534 { 535 return (qp_type == IB_QPT_XRC_INI) ? 536 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg); 537 } 538 539 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) 540 { 541 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); 542 } 543 544 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, 545 __be32 starting_psn) 546 { 547 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 548 (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); 549 } 550 551 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg) 552 { 553 return (u8) (rep_msg->offset26 >> 3); 554 } 555 556 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg, 557 u8 target_ack_delay) 558 { 559 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) | 560 (target_ack_delay << 3)); 561 } 562 563 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg) 564 { 565 return (u8) ((rep_msg->offset26 & 0x06) >> 1); 566 } 567 568 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover) 569 { 570 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) | 571 ((failover & 0x3) << 1)); 572 } 573 574 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg) 575 { 576 return (u8) (rep_msg->offset26 & 0x01); 577 } 578 579 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg, 580 u8 flow_ctrl) 581 { 582 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) | 583 (flow_ctrl & 0x1)); 584 } 585 586 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg) 587 { 588 return (u8) (rep_msg->offset27 >> 5); 589 } 590 591 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg, 592 u8 rnr_retry_count) 593 { 594 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) | 595 (rnr_retry_count << 5)); 596 } 597 598 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg) 599 { 600 return (u8) ((rep_msg->offset27 >> 4) & 0x1); 601 } 602 603 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq) 604 { 605 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) | 606 ((srq & 0x1) << 4)); 607 } 608 609 struct cm_rtu_msg { 610 struct ib_mad_hdr hdr; 611 612 __be32 local_comm_id; 613 __be32 remote_comm_id; 614 615 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; 616 617 } __packed; 618 619 struct cm_dreq_msg { 620 struct ib_mad_hdr hdr; 621 622 __be32 local_comm_id; 623 __be32 remote_comm_id; 624 /* remote QPN/EECN:24, rsvd:8 */ 625 __be32 offset8; 626 627 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; 628 629 } __packed; 630 631 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 632 { 633 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); 634 } 635 636 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) 637 { 638 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 639 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); 640 } 641 642 struct cm_drep_msg { 643 struct ib_mad_hdr hdr; 644 645 __be32 local_comm_id; 646 __be32 remote_comm_id; 647 648 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; 649 650 } __packed; 651 652 struct cm_lap_msg { 653 struct ib_mad_hdr hdr; 654 655 __be32 local_comm_id; 656 __be32 remote_comm_id; 657 658 __be32 rsvd8; 659 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ 660 __be32 offset12; 661 __be32 rsvd16; 662 663 __be16 alt_local_lid; 664 __be16 alt_remote_lid; 665 union ib_gid alt_local_gid; 666 union ib_gid alt_remote_gid; 667 /* flow label:20, rsvd:4, traffic class:8 */ 668 __be32 offset56; 669 u8 alt_hop_limit; 670 /* rsvd:2, packet rate:6 */ 671 u8 offset61; 672 /* SL:4, subnet local:1, rsvd:3 */ 673 u8 offset62; 674 /* local ACK timeout:5, rsvd:3 */ 675 u8 offset63; 676 677 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; 678 } __packed; 679 680 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 681 { 682 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); 683 } 684 685 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) 686 { 687 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 688 (be32_to_cpu(lap_msg->offset12) & 689 0x000000FF)); 690 } 691 692 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg) 693 { 694 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3); 695 } 696 697 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg, 698 u8 resp_timeout) 699 { 700 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) | 701 (be32_to_cpu(lap_msg->offset12) & 702 0xFFFFFF07)); 703 } 704 705 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) 706 { 707 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12); 708 } 709 710 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, 711 __be32 flow_label) 712 { 713 lap_msg->offset56 = cpu_to_be32( 714 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) | 715 (be32_to_cpu(flow_label) << 12)); 716 } 717 718 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) 719 { 720 return (u8) be32_to_cpu(lap_msg->offset56); 721 } 722 723 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg, 724 u8 traffic_class) 725 { 726 lap_msg->offset56 = cpu_to_be32(traffic_class | 727 (be32_to_cpu(lap_msg->offset56) & 728 0xFFFFFF00)); 729 } 730 731 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg) 732 { 733 return lap_msg->offset61 & 0x3F; 734 } 735 736 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg, 737 u8 packet_rate) 738 { 739 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0); 740 } 741 742 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg) 743 { 744 return lap_msg->offset62 >> 4; 745 } 746 747 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl) 748 { 749 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F); 750 } 751 752 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg) 753 { 754 return (lap_msg->offset62 >> 3) & 0x1; 755 } 756 757 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg, 758 u8 subnet_local) 759 { 760 lap_msg->offset62 = ((subnet_local & 0x1) << 3) | 761 (lap_msg->offset61 & 0xF7); 762 } 763 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg) 764 { 765 return lap_msg->offset63 >> 3; 766 } 767 768 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg, 769 u8 local_ack_timeout) 770 { 771 lap_msg->offset63 = (local_ack_timeout << 3) | 772 (lap_msg->offset63 & 0x07); 773 } 774 775 struct cm_apr_msg { 776 struct ib_mad_hdr hdr; 777 778 __be32 local_comm_id; 779 __be32 remote_comm_id; 780 781 u8 info_length; 782 u8 ap_status; 783 __be16 rsvd; 784 u8 info[IB_CM_APR_INFO_LENGTH]; 785 786 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE]; 787 } __packed; 788 789 struct cm_sidr_req_msg { 790 struct ib_mad_hdr hdr; 791 792 __be32 request_id; 793 __be16 pkey; 794 __be16 rsvd; 795 __be64 service_id; 796 797 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; 798 } __packed; 799 800 struct cm_sidr_rep_msg { 801 struct ib_mad_hdr hdr; 802 803 __be32 request_id; 804 u8 status; 805 u8 info_length; 806 __be16 rsvd; 807 /* QPN:24, rsvd:8 */ 808 __be32 offset8; 809 __be64 service_id; 810 __be32 qkey; 811 u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; 812 813 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; 814 } __packed; 815 816 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 817 { 818 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); 819 } 820 821 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, 822 __be32 qpn) 823 { 824 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 825 (be32_to_cpu(sidr_rep_msg->offset8) & 826 0x000000FF)); 827 } 828 829 #endif /* CM_MSGS_H */ 830