1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "ena_eth_com.h"
34 
35 static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
36 	struct ena_com_io_cq *io_cq)
37 {
38 	struct ena_eth_io_rx_cdesc_base *cdesc;
39 	u16 expected_phase, head_masked;
40 	u16 desc_phase;
41 
42 	head_masked = io_cq->head & (io_cq->q_depth - 1);
43 	expected_phase = io_cq->phase;
44 
45 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
46 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
47 
48 	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
49 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
50 
51 	if (desc_phase != expected_phase)
52 		return NULL;
53 
54 	/* Make sure we read the rest of the descriptor after the phase bit
55 	 * has been read
56 	 */
57 	dma_rmb();
58 
59 	return cdesc;
60 }
61 
62 static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
63 {
64 	u16 tail_masked;
65 	u32 offset;
66 
67 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
68 
69 	offset = tail_masked * io_sq->desc_entry_size;
70 
71 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
72 }
73 
74 static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
75 						     u8 *bounce_buffer)
76 {
77 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
78 
79 	u16 dst_tail_mask;
80 	u32 dst_offset;
81 
82 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
83 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
84 
85 	/* Make sure everything was written into the bounce buffer before
86 	 * writing the bounce buffer to the device
87 	 */
88 	wmb();
89 
90 	/* The line is completed. Copy it to dev */
91 	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
92 			 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
93 
94 	io_sq->tail++;
95 
96 	/* Switch phase bit in case of wrap around */
97 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
98 		io_sq->phase ^= 1;
99 
100 	return 0;
101 }
102 
103 static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
104 						 u8 *header_src,
105 						 u16 header_len)
106 {
107 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
108 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
109 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
110 	u16 header_offset;
111 
112 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
113 		return 0;
114 
115 	header_offset =
116 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
117 
118 	if (unlikely((header_offset + header_len) >
119 		     llq_info->desc_list_entry_size)) {
120 		pr_err("trying to write header larger than llq entry can accommodate\n");
121 		return -EFAULT;
122 	}
123 
124 	if (unlikely(!bounce_buffer)) {
125 		pr_err("bounce buffer is NULL\n");
126 		return -EFAULT;
127 	}
128 
129 	memcpy(bounce_buffer + header_offset, header_src, header_len);
130 
131 	return 0;
132 }
133 
134 static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
135 {
136 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
137 	u8 *bounce_buffer;
138 	void *sq_desc;
139 
140 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
141 
142 	if (unlikely(!bounce_buffer)) {
143 		pr_err("bounce buffer is NULL\n");
144 		return NULL;
145 	}
146 
147 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
148 	pkt_ctrl->idx++;
149 	pkt_ctrl->descs_left_in_line--;
150 
151 	return sq_desc;
152 }
153 
154 static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
155 {
156 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
157 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
158 	int rc;
159 
160 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
161 		return 0;
162 
163 	/* bounce buffer was used, so write it and get a new one */
164 	if (pkt_ctrl->idx) {
165 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
166 							pkt_ctrl->curr_bounce_buf);
167 		if (unlikely(rc))
168 			return rc;
169 
170 		pkt_ctrl->curr_bounce_buf =
171 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
172 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
173 		       0x0, llq_info->desc_list_entry_size);
174 	}
175 
176 	pkt_ctrl->idx = 0;
177 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
178 	return 0;
179 }
180 
181 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
182 {
183 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
184 		return get_sq_desc_llq(io_sq);
185 
186 	return get_sq_desc_regular_queue(io_sq);
187 }
188 
189 static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
190 {
191 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
192 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
193 	int rc;
194 
195 	if (!pkt_ctrl->descs_left_in_line) {
196 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
197 							pkt_ctrl->curr_bounce_buf);
198 		if (unlikely(rc))
199 			return rc;
200 
201 		pkt_ctrl->curr_bounce_buf =
202 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
203 			memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
204 			       0x0, llq_info->desc_list_entry_size);
205 
206 		pkt_ctrl->idx = 0;
207 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
208 			pkt_ctrl->descs_left_in_line = 1;
209 		else
210 			pkt_ctrl->descs_left_in_line =
211 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
212 	}
213 
214 	return 0;
215 }
216 
217 static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
218 {
219 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
220 		return ena_com_sq_update_llq_tail(io_sq);
221 
222 	io_sq->tail++;
223 
224 	/* Switch phase bit in case of wrap around */
225 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
226 		io_sq->phase ^= 1;
227 
228 	return 0;
229 }
230 
231 static inline struct ena_eth_io_rx_cdesc_base *
232 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
233 {
234 	idx &= (io_cq->q_depth - 1);
235 	return (struct ena_eth_io_rx_cdesc_base *)
236 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
237 		idx * io_cq->cdesc_entry_size_in_bytes);
238 }
239 
240 static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
241 					   u16 *first_cdesc_idx)
242 {
243 	struct ena_eth_io_rx_cdesc_base *cdesc;
244 	u16 count = 0, head_masked;
245 	u32 last = 0;
246 
247 	do {
248 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
249 		if (!cdesc)
250 			break;
251 
252 		ena_com_cq_inc_head(io_cq);
253 		count++;
254 		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
255 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
256 	} while (!last);
257 
258 	if (last) {
259 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
260 		count += io_cq->cur_rx_pkt_cdesc_count;
261 
262 		head_masked = io_cq->head & (io_cq->q_depth - 1);
263 
264 		io_cq->cur_rx_pkt_cdesc_count = 0;
265 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
266 
267 		pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
268 			 io_cq->qid, *first_cdesc_idx, count);
269 	} else {
270 		io_cq->cur_rx_pkt_cdesc_count += count;
271 		count = 0;
272 	}
273 
274 	return count;
275 }
276 
277 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
278 					     struct ena_com_tx_ctx *ena_tx_ctx)
279 {
280 	int rc;
281 
282 	if (ena_tx_ctx->meta_valid) {
283 		rc = memcmp(&io_sq->cached_tx_meta,
284 			    &ena_tx_ctx->ena_meta,
285 			    sizeof(struct ena_com_tx_meta));
286 
287 		if (unlikely(rc != 0))
288 			return true;
289 	}
290 
291 	return false;
292 }
293 
294 static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
295 							struct ena_com_tx_ctx *ena_tx_ctx)
296 {
297 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
298 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
299 
300 	meta_desc = get_sq_desc(io_sq);
301 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
302 
303 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
304 
305 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
306 
307 	/* bits 0-9 of the mss */
308 	meta_desc->word2 |= (ena_meta->mss <<
309 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
310 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
311 	/* bits 10-13 of the mss */
312 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
313 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
314 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
315 
316 	/* Extended meta desc */
317 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
318 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
319 	meta_desc->len_ctrl |= (io_sq->phase <<
320 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
321 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
322 
323 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
324 	meta_desc->word2 |= ena_meta->l3_hdr_len &
325 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
326 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
327 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
328 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
329 
330 	meta_desc->word2 |= (ena_meta->l4_hdr_len <<
331 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
332 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
333 
334 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
335 
336 	/* Cached the meta desc */
337 	memcpy(&io_sq->cached_tx_meta, ena_meta,
338 	       sizeof(struct ena_com_tx_meta));
339 
340 	return ena_com_sq_update_tail(io_sq);
341 }
342 
343 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
344 					struct ena_eth_io_rx_cdesc_base *cdesc)
345 {
346 	ena_rx_ctx->l3_proto = cdesc->status &
347 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
348 	ena_rx_ctx->l4_proto =
349 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
350 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
351 	ena_rx_ctx->l3_csum_err =
352 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
353 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
354 	ena_rx_ctx->l4_csum_err =
355 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
356 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
357 	ena_rx_ctx->l4_csum_checked =
358 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
359 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
360 	ena_rx_ctx->hash = cdesc->hash;
361 	ena_rx_ctx->frag =
362 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
363 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
364 
365 	pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
366 		 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
367 		 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
368 		 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
369 }
370 
371 /*****************************************************************************/
372 /*****************************     API      **********************************/
373 /*****************************************************************************/
374 
375 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
376 		       struct ena_com_tx_ctx *ena_tx_ctx,
377 		       int *nb_hw_desc)
378 {
379 	struct ena_eth_io_tx_desc *desc = NULL;
380 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
381 	void *buffer_to_push = ena_tx_ctx->push_header;
382 	u16 header_len = ena_tx_ctx->header_len;
383 	u16 num_bufs = ena_tx_ctx->num_bufs;
384 	u16 start_tail = io_sq->tail;
385 	int i, rc;
386 	bool have_meta;
387 	u64 addr_hi;
388 
389 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
390 
391 	/* num_bufs +1 for potential meta desc */
392 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
393 		pr_debug("Not enough space in the tx queue\n");
394 		return -ENOMEM;
395 	}
396 
397 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
398 		pr_err("header size is too large %d max header: %d\n",
399 		       header_len, io_sq->tx_max_header_size);
400 		return -EINVAL;
401 	}
402 
403 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
404 		     !buffer_to_push))
405 		return -EINVAL;
406 
407 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
408 	if (unlikely(rc))
409 		return rc;
410 
411 	have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
412 			ena_tx_ctx);
413 	if (have_meta) {
414 		rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
415 		if (unlikely(rc))
416 			return rc;
417 	}
418 
419 	/* If the caller doesn't want to send packets */
420 	if (unlikely(!num_bufs && !header_len)) {
421 		rc = ena_com_close_bounce_buffer(io_sq);
422 		*nb_hw_desc = io_sq->tail - start_tail;
423 		return rc;
424 	}
425 
426 	desc = get_sq_desc(io_sq);
427 	if (unlikely(!desc))
428 		return -EFAULT;
429 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
430 
431 	/* Set first desc when we don't have meta descriptor */
432 	if (!have_meta)
433 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
434 
435 	desc->buff_addr_hi_hdr_sz |= (header_len <<
436 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
437 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
438 	desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
439 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
440 
441 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
442 
443 	/* Bits 0-9 */
444 	desc->meta_ctrl |= (ena_tx_ctx->req_id <<
445 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
446 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
447 
448 	desc->meta_ctrl |= (ena_tx_ctx->df <<
449 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
450 		ENA_ETH_IO_TX_DESC_DF_MASK;
451 
452 	/* Bits 10-15 */
453 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
454 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
455 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
456 
457 	if (ena_tx_ctx->meta_valid) {
458 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
459 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
460 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
461 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
462 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
463 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
464 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
465 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
466 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
467 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
468 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
469 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
470 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
471 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
472 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
473 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
474 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
475 	}
476 
477 	for (i = 0; i < num_bufs; i++) {
478 		/* The first desc share the same desc as the header */
479 		if (likely(i != 0)) {
480 			rc = ena_com_sq_update_tail(io_sq);
481 			if (unlikely(rc))
482 				return rc;
483 
484 			desc = get_sq_desc(io_sq);
485 			if (unlikely(!desc))
486 				return -EFAULT;
487 
488 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
489 
490 			desc->len_ctrl |= (io_sq->phase <<
491 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
492 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
493 		}
494 
495 		desc->len_ctrl |= ena_bufs->len &
496 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
497 
498 		addr_hi = ((ena_bufs->paddr &
499 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
500 
501 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
502 		desc->buff_addr_hi_hdr_sz |= addr_hi &
503 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
504 		ena_bufs++;
505 	}
506 
507 	/* set the last desc indicator */
508 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
509 
510 	rc = ena_com_sq_update_tail(io_sq);
511 	if (unlikely(rc))
512 		return rc;
513 
514 	rc = ena_com_close_bounce_buffer(io_sq);
515 
516 	*nb_hw_desc = io_sq->tail - start_tail;
517 	return rc;
518 }
519 
520 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
521 		   struct ena_com_io_sq *io_sq,
522 		   struct ena_com_rx_ctx *ena_rx_ctx)
523 {
524 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
525 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
526 	u16 cdesc_idx = 0;
527 	u16 nb_hw_desc;
528 	u16 i;
529 
530 	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
531 
532 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
533 	if (nb_hw_desc == 0) {
534 		ena_rx_ctx->descs = nb_hw_desc;
535 		return 0;
536 	}
537 
538 	pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
539 		 nb_hw_desc);
540 
541 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
542 		pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
543 		       ena_rx_ctx->max_bufs);
544 		return -ENOSPC;
545 	}
546 
547 	for (i = 0; i < nb_hw_desc; i++) {
548 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
549 
550 		ena_buf->len = cdesc->length;
551 		ena_buf->req_id = cdesc->req_id;
552 		ena_buf++;
553 	}
554 
555 	/* Update SQ head ptr */
556 	io_sq->next_to_comp += nb_hw_desc;
557 
558 	pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
559 		 io_sq->next_to_comp);
560 
561 	/* Get rx flags from the last pkt */
562 	ena_com_rx_set_flags(ena_rx_ctx, cdesc);
563 
564 	ena_rx_ctx->descs = nb_hw_desc;
565 	return 0;
566 }
567 
568 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
569 			       struct ena_com_buf *ena_buf,
570 			       u16 req_id)
571 {
572 	struct ena_eth_io_rx_desc *desc;
573 
574 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
575 
576 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
577 		return -ENOSPC;
578 
579 	desc = get_sq_desc(io_sq);
580 	if (unlikely(!desc))
581 		return -EFAULT;
582 
583 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
584 
585 	desc->length = ena_buf->len;
586 
587 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
588 	desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
589 	desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
590 	desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
591 
592 	desc->req_id = req_id;
593 
594 	desc->buff_addr_lo = (u32)ena_buf->paddr;
595 	desc->buff_addr_hi =
596 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
597 
598 	return ena_com_sq_update_tail(io_sq);
599 }
600 
601 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
602 {
603 	struct ena_eth_io_rx_cdesc_base *cdesc;
604 
605 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
606 	if (cdesc)
607 		return false;
608 	else
609 		return true;
610 }
611