1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #ifndef ENA_ETH_COM_H_ 7 #define ENA_ETH_COM_H_ 8 9 #include "ena_com.h" 10 11 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ 12 #define ENA_COMP_HEAD_THRESH 4 13 /* we allow 2 DMA descriptors per LLQ entry */ 14 #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc)) 15 #define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE) 16 #define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE) 17 18 struct ena_com_tx_ctx { 19 struct ena_com_tx_meta ena_meta; 20 struct ena_com_buf *ena_bufs; 21 /* For LLQ, header buffer - pushed to the device mem space */ 22 void *push_header; 23 24 enum ena_eth_io_l3_proto_index l3_proto; 25 enum ena_eth_io_l4_proto_index l4_proto; 26 u16 num_bufs; 27 u16 req_id; 28 /* For regular queue, indicate the size of the header 29 * For LLQ, indicate the size of the pushed buffer 30 */ 31 u16 header_len; 32 33 u8 meta_valid; 34 u8 tso_enable; 35 u8 l3_csum_enable; 36 u8 l4_csum_enable; 37 u8 l4_csum_partial; 38 u8 df; /* Don't fragment */ 39 }; 40 41 struct ena_com_rx_ctx { 42 struct ena_com_rx_buf_info *ena_bufs; 43 enum ena_eth_io_l3_proto_index l3_proto; 44 enum ena_eth_io_l4_proto_index l4_proto; 45 bool l3_csum_err; 46 bool l4_csum_err; 47 u8 l4_csum_checked; 48 /* fragmented packet */ 49 bool frag; 50 u32 hash; 51 u16 descs; 52 int max_bufs; 53 u8 pkt_offset; 54 }; 55 56 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, 57 struct ena_com_tx_ctx *ena_tx_ctx, 58 int *nb_hw_desc); 59 60 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 61 struct ena_com_io_sq *io_sq, 62 struct ena_com_rx_ctx *ena_rx_ctx); 63 64 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, 65 struct ena_com_buf *ena_buf, 66 u16 req_id); 67 68 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); 69 70 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, 71 struct ena_eth_io_intr_reg *intr_reg) 72 { 73 writel(intr_reg->intr_control, io_cq->unmask_reg); 74 } 75 76 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) 77 { 78 u16 tail, next_to_comp, cnt; 79 80 next_to_comp = io_sq->next_to_comp; 81 tail = io_sq->tail; 82 cnt = tail - next_to_comp; 83 84 return io_sq->q_depth - 1 - cnt; 85 } 86 87 /* Check if the submission queue has enough space to hold required_buffers */ 88 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, 89 u16 required_buffers) 90 { 91 int temp; 92 93 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 94 return ena_com_free_q_entries(io_sq) >= required_buffers; 95 96 /* This calculation doesn't need to be 100% accurate. So to reduce 97 * the calculation overhead just Subtract 2 lines from the free descs 98 * (one for the header line and one to compensate the devision 99 * down calculation. 100 */ 101 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; 102 103 return ena_com_free_q_entries(io_sq) > temp; 104 } 105 106 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, 107 struct ena_com_tx_ctx *ena_tx_ctx) 108 { 109 if (!ena_tx_ctx->meta_valid) 110 return false; 111 112 return !!memcmp(&io_sq->cached_tx_meta, 113 &ena_tx_ctx->ena_meta, 114 sizeof(struct ena_com_tx_meta)); 115 } 116 117 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) 118 { 119 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && 120 io_sq->llq_info.max_entries_in_tx_burst > 0; 121 } 122 123 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, 124 struct ena_com_tx_ctx *ena_tx_ctx) 125 { 126 struct ena_com_llq_info *llq_info; 127 int descs_after_first_entry; 128 int num_entries_needed = 1; 129 u16 num_descs; 130 131 if (!is_llq_max_tx_burst_exists(io_sq)) 132 return false; 133 134 llq_info = &io_sq->llq_info; 135 num_descs = ena_tx_ctx->num_bufs; 136 137 if (llq_info->disable_meta_caching || 138 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) 139 ++num_descs; 140 141 if (num_descs > llq_info->descs_num_before_header) { 142 descs_after_first_entry = num_descs - llq_info->descs_num_before_header; 143 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry, 144 llq_info->descs_per_entry); 145 } 146 147 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, 148 "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs, 149 num_entries_needed); 150 151 return num_entries_needed > io_sq->entries_in_tx_burst_left; 152 } 153 154 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) 155 { 156 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; 157 u16 tail = io_sq->tail; 158 159 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, 160 "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); 161 162 writel(tail, io_sq->db_addr); 163 164 if (is_llq_max_tx_burst_exists(io_sq)) { 165 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, 166 "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid, 167 max_entries_in_tx_burst); 168 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; 169 } 170 171 return 0; 172 } 173 174 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) 175 { 176 u16 unreported_comp, head; 177 bool need_update; 178 179 if (unlikely(io_cq->cq_head_db_reg)) { 180 head = io_cq->head; 181 unreported_comp = head - io_cq->last_head_update; 182 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); 183 184 if (unlikely(need_update)) { 185 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, 186 "Write completion queue doorbell for queue %d: head: %d\n", 187 io_cq->qid, head); 188 writel(head, io_cq->cq_head_db_reg); 189 io_cq->last_head_update = head; 190 } 191 } 192 193 return 0; 194 } 195 196 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, 197 u8 numa_node) 198 { 199 struct ena_eth_io_numa_node_cfg_reg numa_cfg; 200 201 if (!io_cq->numa_node_cfg_reg) 202 return; 203 204 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) 205 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; 206 207 writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); 208 } 209 210 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) 211 { 212 io_sq->next_to_comp += elem; 213 } 214 215 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) 216 { 217 io_cq->head++; 218 219 /* Switch phase bit in case of wrap around */ 220 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) 221 io_cq->phase ^= 1; 222 } 223 224 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, 225 u16 *req_id) 226 { 227 u8 expected_phase, cdesc_phase; 228 struct ena_eth_io_tx_cdesc *cdesc; 229 u16 masked_head; 230 231 masked_head = io_cq->head & (io_cq->q_depth - 1); 232 expected_phase = io_cq->phase; 233 234 cdesc = (struct ena_eth_io_tx_cdesc *) 235 ((uintptr_t)io_cq->cdesc_addr.virt_addr + 236 (masked_head * io_cq->cdesc_entry_size_in_bytes)); 237 238 /* When the current completion descriptor phase isn't the same as the 239 * expected, it mean that the device still didn't update 240 * this completion. 241 */ 242 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; 243 if (cdesc_phase != expected_phase) 244 return -EAGAIN; 245 246 dma_rmb(); 247 248 *req_id = READ_ONCE(cdesc->req_id); 249 if (unlikely(*req_id >= io_cq->q_depth)) { 250 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n", 251 cdesc->req_id); 252 return -EINVAL; 253 } 254 255 ena_com_cq_inc_head(io_cq); 256 257 return 0; 258 } 259 260 #endif /* ENA_ETH_COM_H_ */ 261