1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef ENA_ETH_COM_H_ 34 #define ENA_ETH_COM_H_ 35 36 #include "ena_com.h" 37 38 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ 39 #define ENA_COMP_HEAD_THRESH 4 40 41 struct ena_com_tx_ctx { 42 struct ena_com_tx_meta ena_meta; 43 struct ena_com_buf *ena_bufs; 44 /* For LLQ, header buffer - pushed to the device mem space */ 45 void *push_header; 46 47 enum ena_eth_io_l3_proto_index l3_proto; 48 enum ena_eth_io_l4_proto_index l4_proto; 49 u16 num_bufs; 50 u16 req_id; 51 /* For regular queue, indicate the size of the header 52 * For LLQ, indicate the size of the pushed buffer 53 */ 54 u16 header_len; 55 56 u8 meta_valid; 57 u8 tso_enable; 58 u8 l3_csum_enable; 59 u8 l4_csum_enable; 60 u8 l4_csum_partial; 61 u8 df; /* Don't fragment */ 62 }; 63 64 struct ena_com_rx_ctx { 65 struct ena_com_rx_buf_info *ena_bufs; 66 enum ena_eth_io_l3_proto_index l3_proto; 67 enum ena_eth_io_l4_proto_index l4_proto; 68 bool l3_csum_err; 69 bool l4_csum_err; 70 u8 l4_csum_checked; 71 /* fragmented packet */ 72 bool frag; 73 u32 hash; 74 u16 descs; 75 int max_bufs; 76 u8 pkt_offset; 77 }; 78 79 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, 80 struct ena_com_tx_ctx *ena_tx_ctx, 81 int *nb_hw_desc); 82 83 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 84 struct ena_com_io_sq *io_sq, 85 struct ena_com_rx_ctx *ena_rx_ctx); 86 87 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, 88 struct ena_com_buf *ena_buf, 89 u16 req_id); 90 91 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); 92 93 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, 94 struct ena_eth_io_intr_reg *intr_reg) 95 { 96 writel(intr_reg->intr_control, io_cq->unmask_reg); 97 } 98 99 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) 100 { 101 u16 tail, next_to_comp, cnt; 102 103 next_to_comp = io_sq->next_to_comp; 104 tail = io_sq->tail; 105 cnt = tail - next_to_comp; 106 107 return io_sq->q_depth - 1 - cnt; 108 } 109 110 /* Check if the submission queue has enough space to hold required_buffers */ 111 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, 112 u16 required_buffers) 113 { 114 int temp; 115 116 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 117 return ena_com_free_q_entries(io_sq) >= required_buffers; 118 119 /* This calculation doesn't need to be 100% accurate. So to reduce 120 * the calculation overhead just Subtract 2 lines from the free descs 121 * (one for the header line and one to compensate the devision 122 * down calculation. 123 */ 124 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; 125 126 return ena_com_free_q_entries(io_sq) > temp; 127 } 128 129 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, 130 struct ena_com_tx_ctx *ena_tx_ctx) 131 { 132 if (!ena_tx_ctx->meta_valid) 133 return false; 134 135 return !!memcmp(&io_sq->cached_tx_meta, 136 &ena_tx_ctx->ena_meta, 137 sizeof(struct ena_com_tx_meta)); 138 } 139 140 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) 141 { 142 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && 143 io_sq->llq_info.max_entries_in_tx_burst > 0; 144 } 145 146 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, 147 struct ena_com_tx_ctx *ena_tx_ctx) 148 { 149 struct ena_com_llq_info *llq_info; 150 int descs_after_first_entry; 151 int num_entries_needed = 1; 152 u16 num_descs; 153 154 if (!is_llq_max_tx_burst_exists(io_sq)) 155 return false; 156 157 llq_info = &io_sq->llq_info; 158 num_descs = ena_tx_ctx->num_bufs; 159 160 if (llq_info->disable_meta_caching || 161 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) 162 ++num_descs; 163 164 if (num_descs > llq_info->descs_num_before_header) { 165 descs_after_first_entry = num_descs - llq_info->descs_num_before_header; 166 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry, 167 llq_info->descs_per_entry); 168 } 169 170 pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, 171 num_descs, num_entries_needed); 172 173 return num_entries_needed > io_sq->entries_in_tx_burst_left; 174 } 175 176 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) 177 { 178 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; 179 u16 tail = io_sq->tail; 180 181 pr_debug("write submission queue doorbell for queue: %d tail: %d\n", 182 io_sq->qid, tail); 183 184 writel(tail, io_sq->db_addr); 185 186 if (is_llq_max_tx_burst_exists(io_sq)) { 187 pr_debug("reset available entries in tx burst for queue %d to %d\n", 188 io_sq->qid, max_entries_in_tx_burst); 189 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; 190 } 191 192 return 0; 193 } 194 195 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) 196 { 197 u16 unreported_comp, head; 198 bool need_update; 199 200 if (unlikely(io_cq->cq_head_db_reg)) { 201 head = io_cq->head; 202 unreported_comp = head - io_cq->last_head_update; 203 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); 204 205 if (unlikely(need_update)) { 206 pr_debug("Write completion queue doorbell for queue %d: head: %d\n", 207 io_cq->qid, head); 208 writel(head, io_cq->cq_head_db_reg); 209 io_cq->last_head_update = head; 210 } 211 } 212 213 return 0; 214 } 215 216 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, 217 u8 numa_node) 218 { 219 struct ena_eth_io_numa_node_cfg_reg numa_cfg; 220 221 if (!io_cq->numa_node_cfg_reg) 222 return; 223 224 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) 225 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; 226 227 writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); 228 } 229 230 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) 231 { 232 io_sq->next_to_comp += elem; 233 } 234 235 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) 236 { 237 io_cq->head++; 238 239 /* Switch phase bit in case of wrap around */ 240 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) 241 io_cq->phase ^= 1; 242 } 243 244 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, 245 u16 *req_id) 246 { 247 u8 expected_phase, cdesc_phase; 248 struct ena_eth_io_tx_cdesc *cdesc; 249 u16 masked_head; 250 251 masked_head = io_cq->head & (io_cq->q_depth - 1); 252 expected_phase = io_cq->phase; 253 254 cdesc = (struct ena_eth_io_tx_cdesc *) 255 ((uintptr_t)io_cq->cdesc_addr.virt_addr + 256 (masked_head * io_cq->cdesc_entry_size_in_bytes)); 257 258 /* When the current completion descriptor phase isn't the same as the 259 * expected, it mean that the device still didn't update 260 * this completion. 261 */ 262 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; 263 if (cdesc_phase != expected_phase) 264 return -EAGAIN; 265 266 dma_rmb(); 267 268 *req_id = READ_ONCE(cdesc->req_id); 269 if (unlikely(*req_id >= io_cq->q_depth)) { 270 pr_err("Invalid req id %d\n", cdesc->req_id); 271 return -EINVAL; 272 } 273 274 ena_com_cq_inc_head(io_cq); 275 276 return 0; 277 } 278 279 #endif /* ENA_ETH_COM_H_ */ 280