1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include "ena_eth_com.h"
7
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 struct ena_com_io_cq *io_cq)
10 {
11 struct ena_eth_io_rx_cdesc_base *cdesc;
12 u16 expected_phase, head_masked;
13 u16 desc_phase;
14
15 head_masked = io_cq->head & (io_cq->q_depth - 1);
16 expected_phase = io_cq->phase;
17
18 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
20
21 desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
23
24 if (desc_phase != expected_phase)
25 return NULL;
26
27 /* Make sure we read the rest of the descriptor after the phase bit
28 * has been read
29 */
30 dma_rmb();
31
32 return cdesc;
33 }
34
get_sq_desc_regular_queue(struct ena_com_io_sq * io_sq)35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
36 {
37 u16 tail_masked;
38 u32 offset;
39
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
41
42 offset = tail_masked * io_sq->desc_entry_size;
43
44 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
45 }
46
ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq * io_sq,u8 * bounce_buffer)47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
48 u8 *bounce_buffer)
49 {
50 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
51
52 u16 dst_tail_mask;
53 u32 dst_offset;
54
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
57
58 if (is_llq_max_tx_burst_exists(io_sq)) {
59 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
61 "Error: trying to send more packets than tx burst allows\n");
62 return -ENOSPC;
63 }
64
65 io_sq->entries_in_tx_burst_left--;
66 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
67 "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
68 io_sq->entries_in_tx_burst_left);
69 }
70
71 /* Make sure everything was written into the bounce buffer before
72 * writing the bounce buffer to the device
73 */
74 wmb();
75
76 /* The line is completed. Copy it to dev */
77 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
78 (llq_info->desc_list_entry_size) / 8);
79
80 io_sq->tail++;
81
82 /* Switch phase bit in case of wrap around */
83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
84 io_sq->phase ^= 1;
85
86 return 0;
87 }
88
ena_com_write_header_to_bounce(struct ena_com_io_sq * io_sq,u8 * header_src,u16 header_len)89 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
90 u8 *header_src,
91 u16 header_len)
92 {
93 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
94 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
95 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
96 u16 header_offset;
97
98 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
99 return 0;
100
101 header_offset =
102 llq_info->descs_num_before_header * io_sq->desc_entry_size;
103
104 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
105 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
106 "Trying to write header larger than llq entry can accommodate\n");
107 return -EFAULT;
108 }
109
110 if (unlikely(!bounce_buffer)) {
111 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
112 return -EFAULT;
113 }
114
115 memcpy(bounce_buffer + header_offset, header_src, header_len);
116
117 return 0;
118 }
119
get_sq_desc_llq(struct ena_com_io_sq * io_sq)120 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
121 {
122 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
123 u8 *bounce_buffer;
124 void *sq_desc;
125
126 bounce_buffer = pkt_ctrl->curr_bounce_buf;
127
128 if (unlikely(!bounce_buffer)) {
129 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
130 return NULL;
131 }
132
133 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
134 pkt_ctrl->idx++;
135 pkt_ctrl->descs_left_in_line--;
136
137 return sq_desc;
138 }
139
ena_com_close_bounce_buffer(struct ena_com_io_sq * io_sq)140 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
141 {
142 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
143 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
144 int rc;
145
146 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
147 return 0;
148
149 /* bounce buffer was used, so write it and get a new one */
150 if (likely(pkt_ctrl->idx)) {
151 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
152 pkt_ctrl->curr_bounce_buf);
153 if (unlikely(rc)) {
154 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
155 "Failed to write bounce buffer to device\n");
156 return rc;
157 }
158
159 pkt_ctrl->curr_bounce_buf =
160 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
161 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
162 0x0, llq_info->desc_list_entry_size);
163 }
164
165 pkt_ctrl->idx = 0;
166 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
167 return 0;
168 }
169
get_sq_desc(struct ena_com_io_sq * io_sq)170 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
171 {
172 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
173 return get_sq_desc_llq(io_sq);
174
175 return get_sq_desc_regular_queue(io_sq);
176 }
177
ena_com_sq_update_llq_tail(struct ena_com_io_sq * io_sq)178 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
179 {
180 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
181 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
182 int rc;
183
184 if (!pkt_ctrl->descs_left_in_line) {
185 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
186 pkt_ctrl->curr_bounce_buf);
187 if (unlikely(rc)) {
188 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
189 "Failed to write bounce buffer to device\n");
190 return rc;
191 }
192
193 pkt_ctrl->curr_bounce_buf =
194 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
195 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
196 0x0, llq_info->desc_list_entry_size);
197
198 pkt_ctrl->idx = 0;
199 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
200 pkt_ctrl->descs_left_in_line = 1;
201 else
202 pkt_ctrl->descs_left_in_line =
203 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
204 }
205
206 return 0;
207 }
208
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)209 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
210 {
211 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
212 return ena_com_sq_update_llq_tail(io_sq);
213
214 io_sq->tail++;
215
216 /* Switch phase bit in case of wrap around */
217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
218 io_sq->phase ^= 1;
219
220 return 0;
221 }
222
223 static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)224 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
225 {
226 idx &= (io_cq->q_depth - 1);
227 return (struct ena_eth_io_rx_cdesc_base *)
228 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
229 idx * io_cq->cdesc_entry_size_in_bytes);
230 }
231
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx)232 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
233 u16 *first_cdesc_idx)
234 {
235 struct ena_eth_io_rx_cdesc_base *cdesc;
236 u16 count = 0, head_masked;
237 u32 last = 0;
238
239 do {
240 cdesc = ena_com_get_next_rx_cdesc(io_cq);
241 if (!cdesc)
242 break;
243
244 ena_com_cq_inc_head(io_cq);
245 count++;
246 last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
247 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
248 } while (!last);
249
250 if (last) {
251 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
252 count += io_cq->cur_rx_pkt_cdesc_count;
253
254 head_masked = io_cq->head & (io_cq->q_depth - 1);
255
256 io_cq->cur_rx_pkt_cdesc_count = 0;
257 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
258
259 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
260 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
261 io_cq->qid, *first_cdesc_idx, count);
262 } else {
263 io_cq->cur_rx_pkt_cdesc_count += count;
264 count = 0;
265 }
266
267 return count;
268 }
269
ena_com_create_meta(struct ena_com_io_sq * io_sq,struct ena_com_tx_meta * ena_meta)270 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
271 struct ena_com_tx_meta *ena_meta)
272 {
273 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
274
275 meta_desc = get_sq_desc(io_sq);
276 if (unlikely(!meta_desc))
277 return -EFAULT;
278
279 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
280
281 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
282
283 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
284
285 /* bits 0-9 of the mss */
286 meta_desc->word2 |= ((u32)ena_meta->mss <<
287 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
288 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
289 /* bits 10-13 of the mss */
290 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
291 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
292 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
293
294 /* Extended meta desc */
295 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
296 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
297 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
298 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
299
300 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
301 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
302
303 meta_desc->word2 |= ena_meta->l3_hdr_len &
304 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
305 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
306 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
307 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
308
309 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
310 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
311 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
312
313 return ena_com_sq_update_tail(io_sq);
314 }
315
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,bool * have_meta)316 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
317 struct ena_com_tx_ctx *ena_tx_ctx,
318 bool *have_meta)
319 {
320 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
321
322 /* When disable meta caching is set, don't bother to save the meta and
323 * compare it to the stored version, just create the meta
324 */
325 if (io_sq->disable_meta_caching) {
326 *have_meta = true;
327 return ena_com_create_meta(io_sq, ena_meta);
328 }
329
330 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
331 *have_meta = true;
332 /* Cache the meta desc */
333 memcpy(&io_sq->cached_tx_meta, ena_meta,
334 sizeof(struct ena_com_tx_meta));
335 return ena_com_create_meta(io_sq, ena_meta);
336 }
337
338 *have_meta = false;
339 return 0;
340 }
341
ena_com_rx_set_flags(struct ena_com_io_cq * io_cq,struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)342 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
343 struct ena_com_rx_ctx *ena_rx_ctx,
344 struct ena_eth_io_rx_cdesc_base *cdesc)
345 {
346 ena_rx_ctx->l3_proto = cdesc->status &
347 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
348 ena_rx_ctx->l4_proto =
349 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
350 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
351 ena_rx_ctx->l3_csum_err =
352 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
353 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
354 ena_rx_ctx->l4_csum_err =
355 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
356 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
357 ena_rx_ctx->l4_csum_checked =
358 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
359 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
360 ena_rx_ctx->hash = cdesc->hash;
361 ena_rx_ctx->frag =
362 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
363 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
364
365 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
366 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
367 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
368 ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
369 }
370
371 /*****************************************************************************/
372 /***************************** API **********************************/
373 /*****************************************************************************/
374
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)375 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
376 struct ena_com_tx_ctx *ena_tx_ctx,
377 int *nb_hw_desc)
378 {
379 struct ena_eth_io_tx_desc *desc = NULL;
380 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
381 void *buffer_to_push = ena_tx_ctx->push_header;
382 u16 header_len = ena_tx_ctx->header_len;
383 u16 num_bufs = ena_tx_ctx->num_bufs;
384 u16 start_tail = io_sq->tail;
385 int i, rc;
386 bool have_meta;
387 u64 addr_hi;
388
389 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
390
391 /* num_bufs +1 for potential meta desc */
392 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
393 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
394 "Not enough space in the tx queue\n");
395 return -ENOMEM;
396 }
397
398 if (unlikely(header_len > io_sq->tx_max_header_size)) {
399 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
400 "Header size is too large %d max header: %d\n", header_len,
401 io_sq->tx_max_header_size);
402 return -EINVAL;
403 }
404
405 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
406 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
407 "Push header wasn't provided in LLQ mode\n");
408 return -EINVAL;
409 }
410
411 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
412 if (unlikely(rc))
413 return rc;
414
415 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
416 if (unlikely(rc)) {
417 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
418 "Failed to create and store tx meta desc\n");
419 return rc;
420 }
421
422 /* If the caller doesn't want to send packets */
423 if (unlikely(!num_bufs && !header_len)) {
424 rc = ena_com_close_bounce_buffer(io_sq);
425 if (rc)
426 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
427 "Failed to write buffers to LLQ\n");
428 *nb_hw_desc = io_sq->tail - start_tail;
429 return rc;
430 }
431
432 desc = get_sq_desc(io_sq);
433 if (unlikely(!desc))
434 return -EFAULT;
435 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
436
437 /* Set first desc when we don't have meta descriptor */
438 if (!have_meta)
439 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
440
441 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
442 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
443 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
444 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
445 ENA_ETH_IO_TX_DESC_PHASE_MASK;
446
447 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
448
449 /* Bits 0-9 */
450 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
451 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
452 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
453
454 desc->meta_ctrl |= (ena_tx_ctx->df <<
455 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
456 ENA_ETH_IO_TX_DESC_DF_MASK;
457
458 /* Bits 10-15 */
459 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
460 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
461 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
462
463 if (ena_tx_ctx->meta_valid) {
464 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
465 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
466 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
467 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
468 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
469 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
470 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
471 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
472 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
473 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
474 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
475 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
476 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
477 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
478 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
479 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
480 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
481 }
482
483 for (i = 0; i < num_bufs; i++) {
484 /* The first desc share the same desc as the header */
485 if (likely(i != 0)) {
486 rc = ena_com_sq_update_tail(io_sq);
487 if (unlikely(rc)) {
488 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
489 "Failed to update sq tail\n");
490 return rc;
491 }
492
493 desc = get_sq_desc(io_sq);
494 if (unlikely(!desc))
495 return -EFAULT;
496
497 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
498
499 desc->len_ctrl |= ((u32)io_sq->phase <<
500 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
501 ENA_ETH_IO_TX_DESC_PHASE_MASK;
502 }
503
504 desc->len_ctrl |= ena_bufs->len &
505 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
506
507 addr_hi = ((ena_bufs->paddr &
508 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
509
510 desc->buff_addr_lo = (u32)ena_bufs->paddr;
511 desc->buff_addr_hi_hdr_sz |= addr_hi &
512 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
513 ena_bufs++;
514 }
515
516 /* set the last desc indicator */
517 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
518
519 rc = ena_com_sq_update_tail(io_sq);
520 if (unlikely(rc)) {
521 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
522 "Failed to update sq tail of the last descriptor\n");
523 return rc;
524 }
525
526 rc = ena_com_close_bounce_buffer(io_sq);
527
528 *nb_hw_desc = io_sq->tail - start_tail;
529 return rc;
530 }
531
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)532 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
533 struct ena_com_io_sq *io_sq,
534 struct ena_com_rx_ctx *ena_rx_ctx)
535 {
536 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
537 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
538 u16 q_depth = io_cq->q_depth;
539 u16 cdesc_idx = 0;
540 u16 nb_hw_desc;
541 u16 i = 0;
542
543 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
544
545 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
546 if (nb_hw_desc == 0) {
547 ena_rx_ctx->descs = nb_hw_desc;
548 return 0;
549 }
550
551 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
552 "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
553
554 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
555 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
556 "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
557 return -ENOSPC;
558 }
559
560 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
561 ena_rx_ctx->pkt_offset = cdesc->offset;
562
563 do {
564 ena_buf[i].len = cdesc->length;
565 ena_buf[i].req_id = cdesc->req_id;
566 if (unlikely(ena_buf[i].req_id >= q_depth))
567 return -EIO;
568
569 if (++i >= nb_hw_desc)
570 break;
571
572 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
573
574 } while (1);
575
576 /* Update SQ head ptr */
577 io_sq->next_to_comp += nb_hw_desc;
578
579 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
580 "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
581 io_sq->next_to_comp);
582
583 /* Get rx flags from the last pkt */
584 ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
585
586 ena_rx_ctx->descs = nb_hw_desc;
587
588 return 0;
589 }
590
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)591 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
592 struct ena_com_buf *ena_buf,
593 u16 req_id)
594 {
595 struct ena_eth_io_rx_desc *desc;
596
597 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
598
599 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
600 return -ENOSPC;
601
602 desc = get_sq_desc(io_sq);
603 if (unlikely(!desc))
604 return -EFAULT;
605
606 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
607
608 desc->length = ena_buf->len;
609
610 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
611 ENA_ETH_IO_RX_DESC_LAST_MASK |
612 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
613 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
614
615 desc->req_id = req_id;
616
617 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
618 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
619 req_id);
620
621 desc->buff_addr_lo = (u32)ena_buf->paddr;
622 desc->buff_addr_hi =
623 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
624
625 return ena_com_sq_update_tail(io_sq);
626 }
627
ena_com_cq_empty(struct ena_com_io_cq * io_cq)628 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
629 {
630 struct ena_eth_io_rx_cdesc_base *cdesc;
631
632 cdesc = ena_com_get_next_rx_cdesc(io_cq);
633 if (cdesc)
634 return false;
635 else
636 return true;
637 }
638