1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef RXE_HDR_H
35 #define RXE_HDR_H
36 
37 /* extracted information about a packet carried in an sk_buff struct fits in
38  * the skbuff cb array. Must be at most 48 bytes. stored in control block of
39  * sk_buff for received packets.
40  */
41 struct rxe_pkt_info {
42 	struct rxe_dev		*rxe;		/* device that owns packet */
43 	struct rxe_qp		*qp;		/* qp that owns packet */
44 	struct rxe_send_wqe	*wqe;		/* send wqe */
45 	u8			*hdr;		/* points to bth */
46 	u32			mask;		/* useful info about pkt */
47 	u32			psn;		/* bth psn of packet */
48 	u16			pkey_index;	/* partition of pkt */
49 	u16			paylen;		/* length of bth - icrc */
50 	u8			port_num;	/* port pkt received on */
51 	u8			opcode;		/* bth opcode of packet */
52 	u8			offset;		/* bth offset from pkt->hdr */
53 };
54 
55 /* Macros should be used only for received skb */
56 #define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
57 #define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
58 
59 /*
60  * IBA header types and methods
61  *
62  * Some of these are for reference and completeness only since
63  * rxe does not currently support RD transport
64  * most of this could be moved into IB core. ib_pack.h has
65  * part of this but is incomplete
66  *
67  * Header specific routines to insert/extract values to/from headers
68  * the routines that are named __hhh_(set_)fff() take a pointer to a
69  * hhh header and get(set) the fff field. The routines named
70  * hhh_(set_)fff take a packet info struct and find the
71  * header and field based on the opcode in the packet.
72  * Conversion to/from network byte order from cpu order is also done.
73  */
74 
75 #define RXE_ICRC_SIZE		(4)
76 #define RXE_MAX_HDR_LENGTH	(80)
77 
78 /******************************************************************************
79  * Base Transport Header
80  ******************************************************************************/
81 struct rxe_bth {
82 	u8			opcode;
83 	u8			flags;
84 	__be16			pkey;
85 	__be32			qpn;
86 	__be32			apsn;
87 };
88 
89 #define BTH_TVER		(0)
90 #define BTH_DEF_PKEY		(0xffff)
91 
92 #define BTH_SE_MASK		(0x80)
93 #define BTH_MIG_MASK		(0x40)
94 #define BTH_PAD_MASK		(0x30)
95 #define BTH_TVER_MASK		(0x0f)
96 #define BTH_FECN_MASK		(0x80000000)
97 #define BTH_BECN_MASK		(0x40000000)
98 #define BTH_RESV6A_MASK		(0x3f000000)
99 #define BTH_QPN_MASK		(0x00ffffff)
100 #define BTH_ACK_MASK		(0x80000000)
101 #define BTH_RESV7_MASK		(0x7f000000)
102 #define BTH_PSN_MASK		(0x00ffffff)
103 
104 static inline u8 __bth_opcode(void *arg)
105 {
106 	struct rxe_bth *bth = arg;
107 
108 	return bth->opcode;
109 }
110 
111 static inline void __bth_set_opcode(void *arg, u8 opcode)
112 {
113 	struct rxe_bth *bth = arg;
114 
115 	bth->opcode = opcode;
116 }
117 
118 static inline u8 __bth_se(void *arg)
119 {
120 	struct rxe_bth *bth = arg;
121 
122 	return 0 != (BTH_SE_MASK & bth->flags);
123 }
124 
125 static inline void __bth_set_se(void *arg, int se)
126 {
127 	struct rxe_bth *bth = arg;
128 
129 	if (se)
130 		bth->flags |= BTH_SE_MASK;
131 	else
132 		bth->flags &= ~BTH_SE_MASK;
133 }
134 
135 static inline u8 __bth_mig(void *arg)
136 {
137 	struct rxe_bth *bth = arg;
138 
139 	return 0 != (BTH_MIG_MASK & bth->flags);
140 }
141 
142 static inline void __bth_set_mig(void *arg, u8 mig)
143 {
144 	struct rxe_bth *bth = arg;
145 
146 	if (mig)
147 		bth->flags |= BTH_MIG_MASK;
148 	else
149 		bth->flags &= ~BTH_MIG_MASK;
150 }
151 
152 static inline u8 __bth_pad(void *arg)
153 {
154 	struct rxe_bth *bth = arg;
155 
156 	return (BTH_PAD_MASK & bth->flags) >> 4;
157 }
158 
159 static inline void __bth_set_pad(void *arg, u8 pad)
160 {
161 	struct rxe_bth *bth = arg;
162 
163 	bth->flags = (BTH_PAD_MASK & (pad << 4)) |
164 			(~BTH_PAD_MASK & bth->flags);
165 }
166 
167 static inline u8 __bth_tver(void *arg)
168 {
169 	struct rxe_bth *bth = arg;
170 
171 	return BTH_TVER_MASK & bth->flags;
172 }
173 
174 static inline void __bth_set_tver(void *arg, u8 tver)
175 {
176 	struct rxe_bth *bth = arg;
177 
178 	bth->flags = (BTH_TVER_MASK & tver) |
179 			(~BTH_TVER_MASK & bth->flags);
180 }
181 
182 static inline u16 __bth_pkey(void *arg)
183 {
184 	struct rxe_bth *bth = arg;
185 
186 	return be16_to_cpu(bth->pkey);
187 }
188 
189 static inline void __bth_set_pkey(void *arg, u16 pkey)
190 {
191 	struct rxe_bth *bth = arg;
192 
193 	bth->pkey = cpu_to_be16(pkey);
194 }
195 
196 static inline u32 __bth_qpn(void *arg)
197 {
198 	struct rxe_bth *bth = arg;
199 
200 	return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
201 }
202 
203 static inline void __bth_set_qpn(void *arg, u32 qpn)
204 {
205 	struct rxe_bth *bth = arg;
206 	u32 resvqpn = be32_to_cpu(bth->qpn);
207 
208 	bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
209 			       (~BTH_QPN_MASK & resvqpn));
210 }
211 
212 static inline int __bth_fecn(void *arg)
213 {
214 	struct rxe_bth *bth = arg;
215 
216 	return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
217 }
218 
219 static inline void __bth_set_fecn(void *arg, int fecn)
220 {
221 	struct rxe_bth *bth = arg;
222 
223 	if (fecn)
224 		bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
225 	else
226 		bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
227 }
228 
229 static inline int __bth_becn(void *arg)
230 {
231 	struct rxe_bth *bth = arg;
232 
233 	return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
234 }
235 
236 static inline void __bth_set_becn(void *arg, int becn)
237 {
238 	struct rxe_bth *bth = arg;
239 
240 	if (becn)
241 		bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
242 	else
243 		bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
244 }
245 
246 static inline u8 __bth_resv6a(void *arg)
247 {
248 	struct rxe_bth *bth = arg;
249 
250 	return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
251 }
252 
253 static inline void __bth_set_resv6a(void *arg)
254 {
255 	struct rxe_bth *bth = arg;
256 
257 	bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
258 }
259 
260 static inline int __bth_ack(void *arg)
261 {
262 	struct rxe_bth *bth = arg;
263 
264 	return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
265 }
266 
267 static inline void __bth_set_ack(void *arg, int ack)
268 {
269 	struct rxe_bth *bth = arg;
270 
271 	if (ack)
272 		bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
273 	else
274 		bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
275 }
276 
277 static inline void __bth_set_resv7(void *arg)
278 {
279 	struct rxe_bth *bth = arg;
280 
281 	bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
282 }
283 
284 static inline u32 __bth_psn(void *arg)
285 {
286 	struct rxe_bth *bth = arg;
287 
288 	return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
289 }
290 
291 static inline void __bth_set_psn(void *arg, u32 psn)
292 {
293 	struct rxe_bth *bth = arg;
294 	u32 apsn = be32_to_cpu(bth->apsn);
295 
296 	bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
297 			(~BTH_PSN_MASK & apsn));
298 }
299 
300 static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
301 {
302 	return __bth_opcode(pkt->hdr + pkt->offset);
303 }
304 
305 static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
306 {
307 	__bth_set_opcode(pkt->hdr + pkt->offset, opcode);
308 }
309 
310 static inline u8 bth_se(struct rxe_pkt_info *pkt)
311 {
312 	return __bth_se(pkt->hdr + pkt->offset);
313 }
314 
315 static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
316 {
317 	__bth_set_se(pkt->hdr + pkt->offset, se);
318 }
319 
320 static inline u8 bth_mig(struct rxe_pkt_info *pkt)
321 {
322 	return __bth_mig(pkt->hdr + pkt->offset);
323 }
324 
325 static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
326 {
327 	__bth_set_mig(pkt->hdr + pkt->offset, mig);
328 }
329 
330 static inline u8 bth_pad(struct rxe_pkt_info *pkt)
331 {
332 	return __bth_pad(pkt->hdr + pkt->offset);
333 }
334 
335 static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
336 {
337 	__bth_set_pad(pkt->hdr + pkt->offset, pad);
338 }
339 
340 static inline u8 bth_tver(struct rxe_pkt_info *pkt)
341 {
342 	return __bth_tver(pkt->hdr + pkt->offset);
343 }
344 
345 static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
346 {
347 	__bth_set_tver(pkt->hdr + pkt->offset, tver);
348 }
349 
350 static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
351 {
352 	return __bth_pkey(pkt->hdr + pkt->offset);
353 }
354 
355 static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
356 {
357 	__bth_set_pkey(pkt->hdr + pkt->offset, pkey);
358 }
359 
360 static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
361 {
362 	return __bth_qpn(pkt->hdr + pkt->offset);
363 }
364 
365 static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
366 {
367 	__bth_set_qpn(pkt->hdr + pkt->offset, qpn);
368 }
369 
370 static inline int bth_fecn(struct rxe_pkt_info *pkt)
371 {
372 	return __bth_fecn(pkt->hdr + pkt->offset);
373 }
374 
375 static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
376 {
377 	__bth_set_fecn(pkt->hdr + pkt->offset, fecn);
378 }
379 
380 static inline int bth_becn(struct rxe_pkt_info *pkt)
381 {
382 	return __bth_becn(pkt->hdr + pkt->offset);
383 }
384 
385 static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
386 {
387 	__bth_set_becn(pkt->hdr + pkt->offset, becn);
388 }
389 
390 static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
391 {
392 	return __bth_resv6a(pkt->hdr + pkt->offset);
393 }
394 
395 static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
396 {
397 	__bth_set_resv6a(pkt->hdr + pkt->offset);
398 }
399 
400 static inline int bth_ack(struct rxe_pkt_info *pkt)
401 {
402 	return __bth_ack(pkt->hdr + pkt->offset);
403 }
404 
405 static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
406 {
407 	__bth_set_ack(pkt->hdr + pkt->offset, ack);
408 }
409 
410 static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
411 {
412 	__bth_set_resv7(pkt->hdr + pkt->offset);
413 }
414 
415 static inline u32 bth_psn(struct rxe_pkt_info *pkt)
416 {
417 	return __bth_psn(pkt->hdr + pkt->offset);
418 }
419 
420 static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
421 {
422 	__bth_set_psn(pkt->hdr + pkt->offset, psn);
423 }
424 
425 static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
426 			    int mig, int pad, u16 pkey, u32 qpn, int ack_req,
427 			    u32 psn)
428 {
429 	struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
430 
431 	bth->opcode = opcode;
432 	bth->flags = (pad << 4) & BTH_PAD_MASK;
433 	if (se)
434 		bth->flags |= BTH_SE_MASK;
435 	if (mig)
436 		bth->flags |= BTH_MIG_MASK;
437 	bth->pkey = cpu_to_be16(pkey);
438 	bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
439 	psn &= BTH_PSN_MASK;
440 	if (ack_req)
441 		psn |= BTH_ACK_MASK;
442 	bth->apsn = cpu_to_be32(psn);
443 }
444 
445 /******************************************************************************
446  * Reliable Datagram Extended Transport Header
447  ******************************************************************************/
448 struct rxe_rdeth {
449 	__be32			een;
450 };
451 
452 #define RDETH_EEN_MASK		(0x00ffffff)
453 
454 static inline u8 __rdeth_een(void *arg)
455 {
456 	struct rxe_rdeth *rdeth = arg;
457 
458 	return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
459 }
460 
461 static inline void __rdeth_set_een(void *arg, u32 een)
462 {
463 	struct rxe_rdeth *rdeth = arg;
464 
465 	rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
466 }
467 
468 static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
469 {
470 	return __rdeth_een(pkt->hdr + pkt->offset
471 		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
472 }
473 
474 static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
475 {
476 	__rdeth_set_een(pkt->hdr + pkt->offset
477 		+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
478 }
479 
480 /******************************************************************************
481  * Datagram Extended Transport Header
482  ******************************************************************************/
483 struct rxe_deth {
484 	__be32			qkey;
485 	__be32			sqp;
486 };
487 
488 #define GSI_QKEY		(0x80010000)
489 #define DETH_SQP_MASK		(0x00ffffff)
490 
491 static inline u32 __deth_qkey(void *arg)
492 {
493 	struct rxe_deth *deth = arg;
494 
495 	return be32_to_cpu(deth->qkey);
496 }
497 
498 static inline void __deth_set_qkey(void *arg, u32 qkey)
499 {
500 	struct rxe_deth *deth = arg;
501 
502 	deth->qkey = cpu_to_be32(qkey);
503 }
504 
505 static inline u32 __deth_sqp(void *arg)
506 {
507 	struct rxe_deth *deth = arg;
508 
509 	return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
510 }
511 
512 static inline void __deth_set_sqp(void *arg, u32 sqp)
513 {
514 	struct rxe_deth *deth = arg;
515 
516 	deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
517 }
518 
519 static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
520 {
521 	return __deth_qkey(pkt->hdr + pkt->offset
522 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
523 }
524 
525 static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
526 {
527 	__deth_set_qkey(pkt->hdr + pkt->offset
528 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
529 }
530 
531 static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
532 {
533 	return __deth_sqp(pkt->hdr + pkt->offset
534 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
535 }
536 
537 static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
538 {
539 	__deth_set_sqp(pkt->hdr + pkt->offset
540 		+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
541 }
542 
543 /******************************************************************************
544  * RDMA Extended Transport Header
545  ******************************************************************************/
546 struct rxe_reth {
547 	__be64			va;
548 	__be32			rkey;
549 	__be32			len;
550 };
551 
552 static inline u64 __reth_va(void *arg)
553 {
554 	struct rxe_reth *reth = arg;
555 
556 	return be64_to_cpu(reth->va);
557 }
558 
559 static inline void __reth_set_va(void *arg, u64 va)
560 {
561 	struct rxe_reth *reth = arg;
562 
563 	reth->va = cpu_to_be64(va);
564 }
565 
566 static inline u32 __reth_rkey(void *arg)
567 {
568 	struct rxe_reth *reth = arg;
569 
570 	return be32_to_cpu(reth->rkey);
571 }
572 
573 static inline void __reth_set_rkey(void *arg, u32 rkey)
574 {
575 	struct rxe_reth *reth = arg;
576 
577 	reth->rkey = cpu_to_be32(rkey);
578 }
579 
580 static inline u32 __reth_len(void *arg)
581 {
582 	struct rxe_reth *reth = arg;
583 
584 	return be32_to_cpu(reth->len);
585 }
586 
587 static inline void __reth_set_len(void *arg, u32 len)
588 {
589 	struct rxe_reth *reth = arg;
590 
591 	reth->len = cpu_to_be32(len);
592 }
593 
594 static inline u64 reth_va(struct rxe_pkt_info *pkt)
595 {
596 	return __reth_va(pkt->hdr + pkt->offset
597 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
598 }
599 
600 static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
601 {
602 	__reth_set_va(pkt->hdr + pkt->offset
603 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
604 }
605 
606 static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
607 {
608 	return __reth_rkey(pkt->hdr + pkt->offset
609 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
610 }
611 
612 static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
613 {
614 	__reth_set_rkey(pkt->hdr + pkt->offset
615 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
616 }
617 
618 static inline u32 reth_len(struct rxe_pkt_info *pkt)
619 {
620 	return __reth_len(pkt->hdr + pkt->offset
621 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
622 }
623 
624 static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
625 {
626 	__reth_set_len(pkt->hdr + pkt->offset
627 		+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
628 }
629 
630 /******************************************************************************
631  * Atomic Extended Transport Header
632  ******************************************************************************/
633 struct rxe_atmeth {
634 	__be64			va;
635 	__be32			rkey;
636 	__be64			swap_add;
637 	__be64			comp;
638 } __attribute__((__packed__));
639 
640 static inline u64 __atmeth_va(void *arg)
641 {
642 	struct rxe_atmeth *atmeth = arg;
643 
644 	return be64_to_cpu(atmeth->va);
645 }
646 
647 static inline void __atmeth_set_va(void *arg, u64 va)
648 {
649 	struct rxe_atmeth *atmeth = arg;
650 
651 	atmeth->va = cpu_to_be64(va);
652 }
653 
654 static inline u32 __atmeth_rkey(void *arg)
655 {
656 	struct rxe_atmeth *atmeth = arg;
657 
658 	return be32_to_cpu(atmeth->rkey);
659 }
660 
661 static inline void __atmeth_set_rkey(void *arg, u32 rkey)
662 {
663 	struct rxe_atmeth *atmeth = arg;
664 
665 	atmeth->rkey = cpu_to_be32(rkey);
666 }
667 
668 static inline u64 __atmeth_swap_add(void *arg)
669 {
670 	struct rxe_atmeth *atmeth = arg;
671 
672 	return be64_to_cpu(atmeth->swap_add);
673 }
674 
675 static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
676 {
677 	struct rxe_atmeth *atmeth = arg;
678 
679 	atmeth->swap_add = cpu_to_be64(swap_add);
680 }
681 
682 static inline u64 __atmeth_comp(void *arg)
683 {
684 	struct rxe_atmeth *atmeth = arg;
685 
686 	return be64_to_cpu(atmeth->comp);
687 }
688 
689 static inline void __atmeth_set_comp(void *arg, u64 comp)
690 {
691 	struct rxe_atmeth *atmeth = arg;
692 
693 	atmeth->comp = cpu_to_be64(comp);
694 }
695 
696 static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
697 {
698 	return __atmeth_va(pkt->hdr + pkt->offset
699 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
700 }
701 
702 static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
703 {
704 	__atmeth_set_va(pkt->hdr + pkt->offset
705 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
706 }
707 
708 static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
709 {
710 	return __atmeth_rkey(pkt->hdr + pkt->offset
711 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
712 }
713 
714 static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
715 {
716 	__atmeth_set_rkey(pkt->hdr + pkt->offset
717 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
718 }
719 
720 static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
721 {
722 	return __atmeth_swap_add(pkt->hdr + pkt->offset
723 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
724 }
725 
726 static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
727 {
728 	__atmeth_set_swap_add(pkt->hdr + pkt->offset
729 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
730 }
731 
732 static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
733 {
734 	return __atmeth_comp(pkt->hdr + pkt->offset
735 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
736 }
737 
738 static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
739 {
740 	__atmeth_set_comp(pkt->hdr + pkt->offset
741 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
742 }
743 
744 /******************************************************************************
745  * Ack Extended Transport Header
746  ******************************************************************************/
747 struct rxe_aeth {
748 	__be32			smsn;
749 };
750 
751 #define AETH_SYN_MASK		(0xff000000)
752 #define AETH_MSN_MASK		(0x00ffffff)
753 
754 enum aeth_syndrome {
755 	AETH_TYPE_MASK		= 0xe0,
756 	AETH_ACK		= 0x00,
757 	AETH_RNR_NAK		= 0x20,
758 	AETH_RSVD		= 0x40,
759 	AETH_NAK		= 0x60,
760 	AETH_ACK_UNLIMITED	= 0x1f,
761 	AETH_NAK_PSN_SEQ_ERROR	= 0x60,
762 	AETH_NAK_INVALID_REQ	= 0x61,
763 	AETH_NAK_REM_ACC_ERR	= 0x62,
764 	AETH_NAK_REM_OP_ERR	= 0x63,
765 	AETH_NAK_INV_RD_REQ	= 0x64,
766 };
767 
768 static inline u8 __aeth_syn(void *arg)
769 {
770 	struct rxe_aeth *aeth = arg;
771 
772 	return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
773 }
774 
775 static inline void __aeth_set_syn(void *arg, u8 syn)
776 {
777 	struct rxe_aeth *aeth = arg;
778 	u32 smsn = be32_to_cpu(aeth->smsn);
779 
780 	aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
781 			 (~AETH_SYN_MASK & smsn));
782 }
783 
784 static inline u32 __aeth_msn(void *arg)
785 {
786 	struct rxe_aeth *aeth = arg;
787 
788 	return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
789 }
790 
791 static inline void __aeth_set_msn(void *arg, u32 msn)
792 {
793 	struct rxe_aeth *aeth = arg;
794 	u32 smsn = be32_to_cpu(aeth->smsn);
795 
796 	aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
797 			 (~AETH_MSN_MASK & smsn));
798 }
799 
800 static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
801 {
802 	return __aeth_syn(pkt->hdr + pkt->offset
803 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
804 }
805 
806 static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
807 {
808 	__aeth_set_syn(pkt->hdr + pkt->offset
809 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
810 }
811 
812 static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
813 {
814 	return __aeth_msn(pkt->hdr + pkt->offset
815 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
816 }
817 
818 static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
819 {
820 	__aeth_set_msn(pkt->hdr + pkt->offset
821 		+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
822 }
823 
824 /******************************************************************************
825  * Atomic Ack Extended Transport Header
826  ******************************************************************************/
827 struct rxe_atmack {
828 	__be64			orig;
829 };
830 
831 static inline u64 __atmack_orig(void *arg)
832 {
833 	struct rxe_atmack *atmack = arg;
834 
835 	return be64_to_cpu(atmack->orig);
836 }
837 
838 static inline void __atmack_set_orig(void *arg, u64 orig)
839 {
840 	struct rxe_atmack *atmack = arg;
841 
842 	atmack->orig = cpu_to_be64(orig);
843 }
844 
845 static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
846 {
847 	return __atmack_orig(pkt->hdr + pkt->offset
848 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
849 }
850 
851 static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
852 {
853 	__atmack_set_orig(pkt->hdr + pkt->offset
854 		+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
855 }
856 
857 /******************************************************************************
858  * Immediate Extended Transport Header
859  ******************************************************************************/
860 struct rxe_immdt {
861 	__be32			imm;
862 };
863 
864 static inline __be32 __immdt_imm(void *arg)
865 {
866 	struct rxe_immdt *immdt = arg;
867 
868 	return immdt->imm;
869 }
870 
871 static inline void __immdt_set_imm(void *arg, __be32 imm)
872 {
873 	struct rxe_immdt *immdt = arg;
874 
875 	immdt->imm = imm;
876 }
877 
878 static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
879 {
880 	return __immdt_imm(pkt->hdr + pkt->offset
881 		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
882 }
883 
884 static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
885 {
886 	__immdt_set_imm(pkt->hdr + pkt->offset
887 		+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
888 }
889 
890 /******************************************************************************
891  * Invalidate Extended Transport Header
892  ******************************************************************************/
893 struct rxe_ieth {
894 	__be32			rkey;
895 };
896 
897 static inline u32 __ieth_rkey(void *arg)
898 {
899 	struct rxe_ieth *ieth = arg;
900 
901 	return be32_to_cpu(ieth->rkey);
902 }
903 
904 static inline void __ieth_set_rkey(void *arg, u32 rkey)
905 {
906 	struct rxe_ieth *ieth = arg;
907 
908 	ieth->rkey = cpu_to_be32(rkey);
909 }
910 
911 static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
912 {
913 	return __ieth_rkey(pkt->hdr + pkt->offset
914 		+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
915 }
916 
917 static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
918 {
919 	__ieth_set_rkey(pkt->hdr + pkt->offset
920 		+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
921 }
922 
923 enum rxe_hdr_length {
924 	RXE_BTH_BYTES		= sizeof(struct rxe_bth),
925 	RXE_DETH_BYTES		= sizeof(struct rxe_deth),
926 	RXE_IMMDT_BYTES		= sizeof(struct rxe_immdt),
927 	RXE_RETH_BYTES		= sizeof(struct rxe_reth),
928 	RXE_AETH_BYTES		= sizeof(struct rxe_aeth),
929 	RXE_ATMACK_BYTES	= sizeof(struct rxe_atmack),
930 	RXE_ATMETH_BYTES	= sizeof(struct rxe_atmeth),
931 	RXE_IETH_BYTES		= sizeof(struct rxe_ieth),
932 	RXE_RDETH_BYTES		= sizeof(struct rxe_rdeth),
933 };
934 
935 static inline size_t header_size(struct rxe_pkt_info *pkt)
936 {
937 	return pkt->offset + rxe_opcode[pkt->opcode].length;
938 }
939 
940 static inline void *payload_addr(struct rxe_pkt_info *pkt)
941 {
942 	return pkt->hdr + pkt->offset
943 		+ rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
944 }
945 
946 static inline size_t payload_size(struct rxe_pkt_info *pkt)
947 {
948 	return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
949 		- bth_pad(pkt) - RXE_ICRC_SIZE;
950 }
951 
952 #endif /* RXE_HDR_H */
953