xref: /openbmc/linux/drivers/infiniband/sw/rdmavt/rc.c (revision d164bf64)
1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3  * Copyright(c) 2016 Intel Corporation.
4  */
5 
6 #include <rdma/rdmavt_qp.h>
7 #include <rdma/ib_hdrs.h>
8 
9 /*
10  * Convert the AETH credit code into the number of credits.
11  */
12 static const u16 credit_table[31] = {
13 	0,                      /* 0 */
14 	1,                      /* 1 */
15 	2,                      /* 2 */
16 	3,                      /* 3 */
17 	4,                      /* 4 */
18 	6,                      /* 5 */
19 	8,                      /* 6 */
20 	12,                     /* 7 */
21 	16,                     /* 8 */
22 	24,                     /* 9 */
23 	32,                     /* A */
24 	48,                     /* B */
25 	64,                     /* C */
26 	96,                     /* D */
27 	128,                    /* E */
28 	192,                    /* F */
29 	256,                    /* 10 */
30 	384,                    /* 11 */
31 	512,                    /* 12 */
32 	768,                    /* 13 */
33 	1024,                   /* 14 */
34 	1536,                   /* 15 */
35 	2048,                   /* 16 */
36 	3072,                   /* 17 */
37 	4096,                   /* 18 */
38 	6144,                   /* 19 */
39 	8192,                   /* 1A */
40 	12288,                  /* 1B */
41 	16384,                  /* 1C */
42 	24576,                  /* 1D */
43 	32768                   /* 1E */
44 };
45 
46 /**
47  * rvt_compute_aeth - compute the AETH (syndrome + MSN)
48  * @qp: the queue pair to compute the AETH for
49  *
50  * Returns the AETH.
51  */
rvt_compute_aeth(struct rvt_qp * qp)52 __be32 rvt_compute_aeth(struct rvt_qp *qp)
53 {
54 	u32 aeth = qp->r_msn & IB_MSN_MASK;
55 
56 	if (qp->ibqp.srq) {
57 		/*
58 		 * Shared receive queues don't generate credits.
59 		 * Set the credit field to the invalid value.
60 		 */
61 		aeth |= IB_AETH_CREDIT_INVAL << IB_AETH_CREDIT_SHIFT;
62 	} else {
63 		u32 min, max, x;
64 		u32 credits;
65 		u32 head;
66 		u32 tail;
67 
68 		credits = READ_ONCE(qp->r_rq.kwq->count);
69 		if (credits == 0) {
70 			/* sanity check pointers before trusting them */
71 			if (qp->ip) {
72 				head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head);
73 				tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail);
74 			} else {
75 				head = READ_ONCE(qp->r_rq.kwq->head);
76 				tail = READ_ONCE(qp->r_rq.kwq->tail);
77 			}
78 			if (head >= qp->r_rq.size)
79 				head = 0;
80 			if (tail >= qp->r_rq.size)
81 				tail = 0;
82 			/*
83 			 * Compute the number of credits available (RWQEs).
84 			 * There is a small chance that the pair of reads are
85 			 * not atomic, which is OK, since the fuzziness is
86 			 * resolved as further ACKs go out.
87 			 */
88 			credits = rvt_get_rq_count(&qp->r_rq, head, tail);
89 		}
90 		/*
91 		 * Binary search the credit table to find the code to
92 		 * use.
93 		 */
94 		min = 0;
95 		max = 31;
96 		for (;;) {
97 			x = (min + max) / 2;
98 			if (credit_table[x] == credits)
99 				break;
100 			if (credit_table[x] > credits) {
101 				max = x;
102 			} else {
103 				if (min == x)
104 					break;
105 				min = x;
106 			}
107 		}
108 		aeth |= x << IB_AETH_CREDIT_SHIFT;
109 	}
110 	return cpu_to_be32(aeth);
111 }
112 EXPORT_SYMBOL(rvt_compute_aeth);
113 
114 /**
115  * rvt_get_credit - flush the send work queue of a QP
116  * @qp: the qp who's send work queue to flush
117  * @aeth: the Acknowledge Extended Transport Header
118  *
119  * The QP s_lock should be held.
120  */
rvt_get_credit(struct rvt_qp * qp,u32 aeth)121 void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
122 {
123 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
124 	u32 credit = (aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK;
125 
126 	lockdep_assert_held(&qp->s_lock);
127 	/*
128 	 * If the credit is invalid, we can send
129 	 * as many packets as we like.  Otherwise, we have to
130 	 * honor the credit field.
131 	 */
132 	if (credit == IB_AETH_CREDIT_INVAL) {
133 		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
134 			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
135 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
136 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
137 				rdi->driver_f.schedule_send(qp);
138 			}
139 		}
140 	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
141 		/* Compute new LSN (i.e., MSN + credit) */
142 		credit = (aeth + credit_table[credit]) & IB_MSN_MASK;
143 		if (rvt_cmp_msn(credit, qp->s_lsn) > 0) {
144 			qp->s_lsn = credit;
145 			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
146 				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
147 				rdi->driver_f.schedule_send(qp);
148 			}
149 		}
150 	}
151 }
152 EXPORT_SYMBOL(rvt_get_credit);
153 
154 /**
155  * rvt_restart_sge - rewind the sge state for a wqe
156  * @ss: the sge state pointer
157  * @wqe: the wqe to rewind
158  * @len: the data length from the start of the wqe in bytes
159  *
160  * Returns the remaining data length.
161  */
rvt_restart_sge(struct rvt_sge_state * ss,struct rvt_swqe * wqe,u32 len)162 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
163 {
164 	ss->sge = wqe->sg_list[0];
165 	ss->sg_list = wqe->sg_list + 1;
166 	ss->num_sge = wqe->wr.num_sge;
167 	ss->total_len = wqe->length;
168 	rvt_skip_sge(ss, len, false);
169 	return wqe->length - len;
170 }
171 EXPORT_SYMBOL(rvt_restart_sge);
172 
173