1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2018 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include "net_driver.h" 12 #include "efx.h" 13 #include "nic_common.h" 14 #include "tx_common.h" 15 16 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) 17 { 18 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 19 PAGE_SIZE >> EFX_TX_CB_ORDER); 20 } 21 22 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 23 { 24 struct efx_nic *efx = tx_queue->efx; 25 unsigned int entries; 26 int rc; 27 28 /* Create the smallest power-of-two aligned ring */ 29 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 30 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 31 tx_queue->ptr_mask = entries - 1; 32 33 netif_dbg(efx, probe, efx->net_dev, 34 "creating TX queue %d size %#x mask %#x\n", 35 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 36 37 /* Allocate software ring */ 38 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 39 GFP_KERNEL); 40 if (!tx_queue->buffer) 41 return -ENOMEM; 42 43 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), 44 sizeof(tx_queue->cb_page[0]), GFP_KERNEL); 45 if (!tx_queue->cb_page) { 46 rc = -ENOMEM; 47 goto fail1; 48 } 49 50 /* Allocate hardware ring */ 51 rc = efx_nic_probe_tx(tx_queue); 52 if (rc) 53 goto fail2; 54 55 return 0; 56 57 fail2: 58 kfree(tx_queue->cb_page); 59 tx_queue->cb_page = NULL; 60 fail1: 61 kfree(tx_queue->buffer); 62 tx_queue->buffer = NULL; 63 return rc; 64 } 65 66 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 67 { 68 struct efx_nic *efx = tx_queue->efx; 69 70 netif_dbg(efx, drv, efx->net_dev, 71 "initialising TX queue %d\n", tx_queue->queue); 72 73 tx_queue->insert_count = 0; 74 tx_queue->notify_count = 0; 75 tx_queue->write_count = 0; 76 tx_queue->packet_write_count = 0; 77 tx_queue->old_write_count = 0; 78 tx_queue->read_count = 0; 79 tx_queue->old_read_count = 0; 80 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 81 tx_queue->xmit_more_available = false; 82 tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && 83 tx_queue->channel == efx_ptp_channel(efx)); 84 tx_queue->completed_timestamp_major = 0; 85 tx_queue->completed_timestamp_minor = 0; 86 87 tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); 88 89 /* Set up default function pointers. These may get replaced by 90 * efx_nic_init_tx() based off NIC/queue capabilities. 91 */ 92 tx_queue->handle_tso = efx_enqueue_skb_tso; 93 94 /* Set up TX descriptor ring */ 95 efx_nic_init_tx(tx_queue); 96 97 tx_queue->initialised = true; 98 } 99 100 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 101 { 102 struct efx_tx_buffer *buffer; 103 104 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 105 "shutting down TX queue %d\n", tx_queue->queue); 106 107 if (!tx_queue->buffer) 108 return; 109 110 /* Free any buffers left in the ring */ 111 while (tx_queue->read_count != tx_queue->write_count) { 112 unsigned int pkts_compl = 0, bytes_compl = 0; 113 114 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 115 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 116 117 ++tx_queue->read_count; 118 } 119 tx_queue->xmit_more_available = false; 120 netdev_tx_reset_queue(tx_queue->core_txq); 121 } 122 123 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 124 { 125 int i; 126 127 if (!tx_queue->buffer) 128 return; 129 130 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 131 "destroying TX queue %d\n", tx_queue->queue); 132 efx_nic_remove_tx(tx_queue); 133 134 if (tx_queue->cb_page) { 135 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) 136 efx_nic_free_buffer(tx_queue->efx, 137 &tx_queue->cb_page[i]); 138 kfree(tx_queue->cb_page); 139 tx_queue->cb_page = NULL; 140 } 141 142 kfree(tx_queue->buffer); 143 tx_queue->buffer = NULL; 144 } 145 146 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 147 struct efx_tx_buffer *buffer, 148 unsigned int *pkts_compl, 149 unsigned int *bytes_compl) 150 { 151 if (buffer->unmap_len) { 152 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 153 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; 154 155 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 156 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 157 DMA_TO_DEVICE); 158 else 159 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 160 DMA_TO_DEVICE); 161 buffer->unmap_len = 0; 162 } 163 164 if (buffer->flags & EFX_TX_BUF_SKB) { 165 struct sk_buff *skb = (struct sk_buff *)buffer->skb; 166 167 EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); 168 (*pkts_compl)++; 169 (*bytes_compl) += skb->len; 170 if (tx_queue->timestamping && 171 (tx_queue->completed_timestamp_major || 172 tx_queue->completed_timestamp_minor)) { 173 struct skb_shared_hwtstamps hwtstamp; 174 175 hwtstamp.hwtstamp = 176 efx_ptp_nic_to_kernel_time(tx_queue); 177 skb_tstamp_tx(skb, &hwtstamp); 178 179 tx_queue->completed_timestamp_major = 0; 180 tx_queue->completed_timestamp_minor = 0; 181 } 182 dev_consume_skb_any((struct sk_buff *)buffer->skb); 183 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 184 "TX queue %d transmission id %x complete\n", 185 tx_queue->queue, tx_queue->read_count); 186 } else if (buffer->flags & EFX_TX_BUF_XDP) { 187 xdp_return_frame_rx_napi(buffer->xdpf); 188 } 189 190 buffer->len = 0; 191 buffer->flags = 0; 192 } 193 194 /* Remove packets from the TX queue 195 * 196 * This removes packets from the TX queue, up to and including the 197 * specified index. 198 */ 199 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 200 unsigned int index, 201 unsigned int *pkts_compl, 202 unsigned int *bytes_compl) 203 { 204 struct efx_nic *efx = tx_queue->efx; 205 unsigned int stop_index, read_ptr; 206 207 stop_index = (index + 1) & tx_queue->ptr_mask; 208 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 209 210 while (read_ptr != stop_index) { 211 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 212 213 if (!efx_tx_buffer_in_use(buffer)) { 214 netif_err(efx, tx_err, efx->net_dev, 215 "TX queue %d spurious TX completion id %d\n", 216 tx_queue->queue, read_ptr); 217 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 218 return; 219 } 220 221 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 222 223 ++tx_queue->read_count; 224 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 225 } 226 } 227 228 void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) 229 { 230 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 231 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); 232 if (tx_queue->read_count == tx_queue->old_write_count) { 233 /* Ensure that read_count is flushed. */ 234 smp_mb(); 235 tx_queue->empty_read_count = 236 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 237 } 238 } 239 } 240 241 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 242 { 243 unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; 244 struct efx_nic *efx = tx_queue->efx; 245 struct efx_tx_queue *txq2; 246 247 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); 248 249 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 250 tx_queue->pkts_compl += pkts_compl; 251 tx_queue->bytes_compl += bytes_compl; 252 253 if (pkts_compl > 1) 254 ++tx_queue->merge_events; 255 256 /* See if we need to restart the netif queue. This memory 257 * barrier ensures that we write read_count (inside 258 * efx_dequeue_buffers()) before reading the queue status. 259 */ 260 smp_mb(); 261 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 262 likely(efx->port_enabled) && 263 likely(netif_device_present(efx->net_dev))) { 264 txq2 = efx_tx_queue_partner(tx_queue); 265 fill_level = max(tx_queue->insert_count - tx_queue->read_count, 266 txq2->insert_count - txq2->read_count); 267 if (fill_level <= efx->txq_wake_thresh) 268 netif_tx_wake_queue(tx_queue->core_txq); 269 } 270 271 efx_xmit_done_check_empty(tx_queue); 272 } 273 274 /* Remove buffers put into a tx_queue for the current packet. 275 * None of the buffers must have an skb attached. 276 */ 277 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 278 unsigned int insert_count) 279 { 280 struct efx_tx_buffer *buffer; 281 unsigned int bytes_compl = 0; 282 unsigned int pkts_compl = 0; 283 284 /* Work backwards until we hit the original insert pointer value */ 285 while (tx_queue->insert_count != insert_count) { 286 --tx_queue->insert_count; 287 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 288 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 289 } 290 } 291 292 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, 293 dma_addr_t dma_addr, size_t len) 294 { 295 const struct efx_nic_type *nic_type = tx_queue->efx->type; 296 struct efx_tx_buffer *buffer; 297 unsigned int dma_len; 298 299 /* Map the fragment taking account of NIC-dependent DMA limits. */ 300 do { 301 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 302 303 if (nic_type->tx_limit_len) 304 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); 305 else 306 dma_len = len; 307 308 buffer->len = dma_len; 309 buffer->dma_addr = dma_addr; 310 buffer->flags = EFX_TX_BUF_CONT; 311 len -= dma_len; 312 dma_addr += dma_len; 313 ++tx_queue->insert_count; 314 } while (len); 315 316 return buffer; 317 } 318 319 int efx_tx_tso_header_length(struct sk_buff *skb) 320 { 321 size_t header_len; 322 323 if (skb->encapsulation) 324 header_len = skb_inner_transport_header(skb) - 325 skb->data + 326 (inner_tcp_hdr(skb)->doff << 2u); 327 else 328 header_len = skb_transport_header(skb) - skb->data + 329 (tcp_hdr(skb)->doff << 2u); 330 return header_len; 331 } 332 333 /* Map all data from an SKB for DMA and create descriptors on the queue. */ 334 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 335 unsigned int segment_count) 336 { 337 struct efx_nic *efx = tx_queue->efx; 338 struct device *dma_dev = &efx->pci_dev->dev; 339 unsigned int frag_index, nr_frags; 340 dma_addr_t dma_addr, unmap_addr; 341 unsigned short dma_flags; 342 size_t len, unmap_len; 343 344 nr_frags = skb_shinfo(skb)->nr_frags; 345 frag_index = 0; 346 347 /* Map header data. */ 348 len = skb_headlen(skb); 349 dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); 350 dma_flags = EFX_TX_BUF_MAP_SINGLE; 351 unmap_len = len; 352 unmap_addr = dma_addr; 353 354 if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 355 return -EIO; 356 357 if (segment_count) { 358 /* For TSO we need to put the header in to a separate 359 * descriptor. Map this separately if necessary. 360 */ 361 size_t header_len = efx_tx_tso_header_length(skb); 362 363 if (header_len != len) { 364 tx_queue->tso_long_headers++; 365 efx_tx_map_chunk(tx_queue, dma_addr, header_len); 366 len -= header_len; 367 dma_addr += header_len; 368 } 369 } 370 371 /* Add descriptors for each fragment. */ 372 do { 373 struct efx_tx_buffer *buffer; 374 skb_frag_t *fragment; 375 376 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); 377 378 /* The final descriptor for a fragment is responsible for 379 * unmapping the whole fragment. 380 */ 381 buffer->flags = EFX_TX_BUF_CONT | dma_flags; 382 buffer->unmap_len = unmap_len; 383 buffer->dma_offset = buffer->dma_addr - unmap_addr; 384 385 if (frag_index >= nr_frags) { 386 /* Store SKB details with the final buffer for 387 * the completion. 388 */ 389 buffer->skb = skb; 390 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 391 return 0; 392 } 393 394 /* Move on to the next fragment. */ 395 fragment = &skb_shinfo(skb)->frags[frag_index++]; 396 len = skb_frag_size(fragment); 397 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 398 DMA_TO_DEVICE); 399 dma_flags = 0; 400 unmap_len = len; 401 unmap_addr = dma_addr; 402 403 if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 404 return -EIO; 405 } while (1); 406 } 407 408 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) 409 { 410 /* Header and payload descriptor for each output segment, plus 411 * one for every input fragment boundary within a segment 412 */ 413 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 414 415 /* Possibly one more per segment for option descriptors */ 416 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 417 max_descs += EFX_TSO_MAX_SEGS; 418 419 /* Possibly more for PCIe page boundaries within input fragments */ 420 if (PAGE_SIZE > EFX_PAGE_SIZE) 421 max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 422 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); 423 424 return max_descs; 425 } 426 427 /* 428 * Fallback to software TSO. 429 * 430 * This is used if we are unable to send a GSO packet through hardware TSO. 431 * This should only ever happen due to per-queue restrictions - unsupported 432 * packets should first be filtered by the feature flags. 433 * 434 * Returns 0 on success, error code otherwise. 435 */ 436 int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 437 { 438 struct sk_buff *segments, *next; 439 440 segments = skb_gso_segment(skb, 0); 441 if (IS_ERR(segments)) 442 return PTR_ERR(segments); 443 444 dev_consume_skb_any(skb); 445 446 skb_list_walk_safe(segments, skb, next) { 447 skb_mark_not_on_list(skb); 448 efx_enqueue_skb(tx_queue, skb); 449 } 450 451 return 0; 452 } 453