1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/prefetch.h> 5 #include <net/busy_poll.h> 6 #include <linux/bpf_trace.h> 7 #include <net/xdp.h> 8 #include "i40e.h" 9 #include "i40e_trace.h" 10 #include "i40e_prototype.h" 11 12 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 13 u32 td_tag) 14 { 15 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | 16 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | 17 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | 18 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | 19 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); 20 } 21 22 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) 23 /** 24 * i40e_fdir - Generate a Flow Director descriptor based on fdata 25 * @tx_ring: Tx ring to send buffer on 26 * @fdata: Flow director filter data 27 * @add: Indicate if we are adding a rule or deleting one 28 * 29 **/ 30 static void i40e_fdir(struct i40e_ring *tx_ring, 31 struct i40e_fdir_filter *fdata, bool add) 32 { 33 struct i40e_filter_program_desc *fdir_desc; 34 struct i40e_pf *pf = tx_ring->vsi->back; 35 u32 flex_ptype, dtype_cmd; 36 u16 i; 37 38 /* grab the next descriptor */ 39 i = tx_ring->next_to_use; 40 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 41 42 i++; 43 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 44 45 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK & 46 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); 47 48 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK & 49 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); 50 51 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & 52 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 53 54 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK & 55 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); 56 57 /* Use LAN VSI Id if not programmed by user */ 58 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK & 59 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << 60 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); 61 62 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 63 64 dtype_cmd |= add ? 65 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 66 I40E_TXD_FLTR_QW1_PCMD_SHIFT : 67 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 68 I40E_TXD_FLTR_QW1_PCMD_SHIFT; 69 70 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK & 71 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); 72 73 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK & 74 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); 75 76 if (fdata->cnt_index) { 77 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 78 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK & 79 ((u32)fdata->cnt_index << 80 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT); 81 } 82 83 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 84 fdir_desc->rsvd = cpu_to_le32(0); 85 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 86 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); 87 } 88 89 #define I40E_FD_CLEAN_DELAY 10 90 /** 91 * i40e_program_fdir_filter - Program a Flow Director filter 92 * @fdir_data: Packet data that will be filter parameters 93 * @raw_packet: the pre-allocated packet buffer for FDir 94 * @pf: The PF pointer 95 * @add: True for add/update, False for remove 96 **/ 97 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, 98 u8 *raw_packet, struct i40e_pf *pf, 99 bool add) 100 { 101 struct i40e_tx_buffer *tx_buf, *first; 102 struct i40e_tx_desc *tx_desc; 103 struct i40e_ring *tx_ring; 104 struct i40e_vsi *vsi; 105 struct device *dev; 106 dma_addr_t dma; 107 u32 td_cmd = 0; 108 u16 i; 109 110 /* find existing FDIR VSI */ 111 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); 112 if (!vsi) 113 return -ENOENT; 114 115 tx_ring = vsi->tx_rings[0]; 116 dev = tx_ring->dev; 117 118 /* we need two descriptors to add/del a filter and we can wait */ 119 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { 120 if (!i) 121 return -EAGAIN; 122 msleep_interruptible(1); 123 } 124 125 dma = dma_map_single(dev, raw_packet, 126 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); 127 if (dma_mapping_error(dev, dma)) 128 goto dma_fail; 129 130 /* grab the next descriptor */ 131 i = tx_ring->next_to_use; 132 first = &tx_ring->tx_bi[i]; 133 i40e_fdir(tx_ring, fdir_data, add); 134 135 /* Now program a dummy descriptor */ 136 i = tx_ring->next_to_use; 137 tx_desc = I40E_TX_DESC(tx_ring, i); 138 tx_buf = &tx_ring->tx_bi[i]; 139 140 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; 141 142 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); 143 144 /* record length, and DMA address */ 145 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); 146 dma_unmap_addr_set(tx_buf, dma, dma); 147 148 tx_desc->buffer_addr = cpu_to_le64(dma); 149 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; 150 151 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; 152 tx_buf->raw_buf = (void *)raw_packet; 153 154 tx_desc->cmd_type_offset_bsz = 155 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); 156 157 /* Force memory writes to complete before letting h/w 158 * know there are new descriptors to fetch. 159 */ 160 wmb(); 161 162 /* Mark the data descriptor to be watched */ 163 first->next_to_watch = tx_desc; 164 165 writel(tx_ring->next_to_use, tx_ring->tail); 166 return 0; 167 168 dma_fail: 169 return -1; 170 } 171 172 #define IP_HEADER_OFFSET 14 173 #define I40E_UDPIP_DUMMY_PACKET_LEN 42 174 /** 175 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters 176 * @vsi: pointer to the targeted VSI 177 * @fd_data: the flow director data required for the FDir descriptor 178 * @add: true adds a filter, false removes it 179 * 180 * Returns 0 if the filters were successfully added or removed 181 **/ 182 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, 183 struct i40e_fdir_filter *fd_data, 184 bool add) 185 { 186 struct i40e_pf *pf = vsi->back; 187 struct udphdr *udp; 188 struct iphdr *ip; 189 u8 *raw_packet; 190 int ret; 191 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 192 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, 193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 194 195 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 196 if (!raw_packet) 197 return -ENOMEM; 198 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); 199 200 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); 201 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET 202 + sizeof(struct iphdr)); 203 204 ip->daddr = fd_data->dst_ip; 205 udp->dest = fd_data->dst_port; 206 ip->saddr = fd_data->src_ip; 207 udp->source = fd_data->src_port; 208 209 if (fd_data->flex_filter) { 210 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN; 211 __be16 pattern = fd_data->flex_word; 212 u16 off = fd_data->flex_offset; 213 214 *((__force __be16 *)(payload + off)) = pattern; 215 } 216 217 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; 218 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 219 if (ret) { 220 dev_info(&pf->pdev->dev, 221 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 222 fd_data->pctype, fd_data->fd_id, ret); 223 /* Free the packet buffer since it wasn't added to the ring */ 224 kfree(raw_packet); 225 return -EOPNOTSUPP; 226 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 227 if (add) 228 dev_info(&pf->pdev->dev, 229 "Filter OK for PCTYPE %d loc = %d\n", 230 fd_data->pctype, fd_data->fd_id); 231 else 232 dev_info(&pf->pdev->dev, 233 "Filter deleted for PCTYPE %d loc = %d\n", 234 fd_data->pctype, fd_data->fd_id); 235 } 236 237 if (add) 238 pf->fd_udp4_filter_cnt++; 239 else 240 pf->fd_udp4_filter_cnt--; 241 242 return 0; 243 } 244 245 #define I40E_TCPIP_DUMMY_PACKET_LEN 54 246 /** 247 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters 248 * @vsi: pointer to the targeted VSI 249 * @fd_data: the flow director data required for the FDir descriptor 250 * @add: true adds a filter, false removes it 251 * 252 * Returns 0 if the filters were successfully added or removed 253 **/ 254 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, 255 struct i40e_fdir_filter *fd_data, 256 bool add) 257 { 258 struct i40e_pf *pf = vsi->back; 259 struct tcphdr *tcp; 260 struct iphdr *ip; 261 u8 *raw_packet; 262 int ret; 263 /* Dummy packet */ 264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, 266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, 267 0x0, 0x72, 0, 0, 0, 0}; 268 269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 270 if (!raw_packet) 271 return -ENOMEM; 272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); 273 274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); 275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET 276 + sizeof(struct iphdr)); 277 278 ip->daddr = fd_data->dst_ip; 279 tcp->dest = fd_data->dst_port; 280 ip->saddr = fd_data->src_ip; 281 tcp->source = fd_data->src_port; 282 283 if (fd_data->flex_filter) { 284 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN; 285 __be16 pattern = fd_data->flex_word; 286 u16 off = fd_data->flex_offset; 287 288 *((__force __be16 *)(payload + off)) = pattern; 289 } 290 291 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 292 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 293 if (ret) { 294 dev_info(&pf->pdev->dev, 295 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 296 fd_data->pctype, fd_data->fd_id, ret); 297 /* Free the packet buffer since it wasn't added to the ring */ 298 kfree(raw_packet); 299 return -EOPNOTSUPP; 300 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 301 if (add) 302 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", 303 fd_data->pctype, fd_data->fd_id); 304 else 305 dev_info(&pf->pdev->dev, 306 "Filter deleted for PCTYPE %d loc = %d\n", 307 fd_data->pctype, fd_data->fd_id); 308 } 309 310 if (add) { 311 pf->fd_tcp4_filter_cnt++; 312 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 313 I40E_DEBUG_FD & pf->hw.debug_mask) 314 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); 315 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 316 } else { 317 pf->fd_tcp4_filter_cnt--; 318 } 319 320 return 0; 321 } 322 323 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46 324 /** 325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for 326 * a specific flow spec 327 * @vsi: pointer to the targeted VSI 328 * @fd_data: the flow director data required for the FDir descriptor 329 * @add: true adds a filter, false removes it 330 * 331 * Returns 0 if the filters were successfully added or removed 332 **/ 333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, 334 struct i40e_fdir_filter *fd_data, 335 bool add) 336 { 337 struct i40e_pf *pf = vsi->back; 338 struct sctphdr *sctp; 339 struct iphdr *ip; 340 u8 *raw_packet; 341 int ret; 342 /* Dummy packet */ 343 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 344 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0, 345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 346 347 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 348 if (!raw_packet) 349 return -ENOMEM; 350 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN); 351 352 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); 353 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET 354 + sizeof(struct iphdr)); 355 356 ip->daddr = fd_data->dst_ip; 357 sctp->dest = fd_data->dst_port; 358 ip->saddr = fd_data->src_ip; 359 sctp->source = fd_data->src_port; 360 361 if (fd_data->flex_filter) { 362 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN; 363 __be16 pattern = fd_data->flex_word; 364 u16 off = fd_data->flex_offset; 365 366 *((__force __be16 *)(payload + off)) = pattern; 367 } 368 369 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; 370 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 371 if (ret) { 372 dev_info(&pf->pdev->dev, 373 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 374 fd_data->pctype, fd_data->fd_id, ret); 375 /* Free the packet buffer since it wasn't added to the ring */ 376 kfree(raw_packet); 377 return -EOPNOTSUPP; 378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 379 if (add) 380 dev_info(&pf->pdev->dev, 381 "Filter OK for PCTYPE %d loc = %d\n", 382 fd_data->pctype, fd_data->fd_id); 383 else 384 dev_info(&pf->pdev->dev, 385 "Filter deleted for PCTYPE %d loc = %d\n", 386 fd_data->pctype, fd_data->fd_id); 387 } 388 389 if (add) 390 pf->fd_sctp4_filter_cnt++; 391 else 392 pf->fd_sctp4_filter_cnt--; 393 394 return 0; 395 } 396 397 #define I40E_IP_DUMMY_PACKET_LEN 34 398 /** 399 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for 400 * a specific flow spec 401 * @vsi: pointer to the targeted VSI 402 * @fd_data: the flow director data required for the FDir descriptor 403 * @add: true adds a filter, false removes it 404 * 405 * Returns 0 if the filters were successfully added or removed 406 **/ 407 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, 408 struct i40e_fdir_filter *fd_data, 409 bool add) 410 { 411 struct i40e_pf *pf = vsi->back; 412 struct iphdr *ip; 413 u8 *raw_packet; 414 int ret; 415 int i; 416 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, 417 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0, 418 0, 0, 0, 0}; 419 420 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 421 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { 422 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); 423 if (!raw_packet) 424 return -ENOMEM; 425 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); 426 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); 427 428 ip->saddr = fd_data->src_ip; 429 ip->daddr = fd_data->dst_ip; 430 ip->protocol = 0; 431 432 if (fd_data->flex_filter) { 433 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN; 434 __be16 pattern = fd_data->flex_word; 435 u16 off = fd_data->flex_offset; 436 437 *((__force __be16 *)(payload + off)) = pattern; 438 } 439 440 fd_data->pctype = i; 441 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 442 if (ret) { 443 dev_info(&pf->pdev->dev, 444 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", 445 fd_data->pctype, fd_data->fd_id, ret); 446 /* The packet buffer wasn't added to the ring so we 447 * need to free it now. 448 */ 449 kfree(raw_packet); 450 return -EOPNOTSUPP; 451 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { 452 if (add) 453 dev_info(&pf->pdev->dev, 454 "Filter OK for PCTYPE %d loc = %d\n", 455 fd_data->pctype, fd_data->fd_id); 456 else 457 dev_info(&pf->pdev->dev, 458 "Filter deleted for PCTYPE %d loc = %d\n", 459 fd_data->pctype, fd_data->fd_id); 460 } 461 } 462 463 if (add) 464 pf->fd_ip4_filter_cnt++; 465 else 466 pf->fd_ip4_filter_cnt--; 467 468 return 0; 469 } 470 471 /** 472 * i40e_add_del_fdir - Build raw packets to add/del fdir filter 473 * @vsi: pointer to the targeted VSI 474 * @input: filter to add or delete 475 * @add: true adds a filter, false removes it 476 * 477 **/ 478 int i40e_add_del_fdir(struct i40e_vsi *vsi, 479 struct i40e_fdir_filter *input, bool add) 480 { 481 struct i40e_pf *pf = vsi->back; 482 int ret; 483 484 switch (input->flow_type & ~FLOW_EXT) { 485 case TCP_V4_FLOW: 486 ret = i40e_add_del_fdir_tcpv4(vsi, input, add); 487 break; 488 case UDP_V4_FLOW: 489 ret = i40e_add_del_fdir_udpv4(vsi, input, add); 490 break; 491 case SCTP_V4_FLOW: 492 ret = i40e_add_del_fdir_sctpv4(vsi, input, add); 493 break; 494 case IP_USER_FLOW: 495 switch (input->ip4_proto) { 496 case IPPROTO_TCP: 497 ret = i40e_add_del_fdir_tcpv4(vsi, input, add); 498 break; 499 case IPPROTO_UDP: 500 ret = i40e_add_del_fdir_udpv4(vsi, input, add); 501 break; 502 case IPPROTO_SCTP: 503 ret = i40e_add_del_fdir_sctpv4(vsi, input, add); 504 break; 505 case IPPROTO_IP: 506 ret = i40e_add_del_fdir_ipv4(vsi, input, add); 507 break; 508 default: 509 /* We cannot support masking based on protocol */ 510 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", 511 input->ip4_proto); 512 return -EINVAL; 513 } 514 break; 515 default: 516 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", 517 input->flow_type); 518 return -EINVAL; 519 } 520 521 /* The buffer allocated here will be normally be freed by 522 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit 523 * completion. In the event of an error adding the buffer to the FDIR 524 * ring, it will immediately be freed. It may also be freed by 525 * i40e_clean_tx_ring() when closing the VSI. 526 */ 527 return ret; 528 } 529 530 /** 531 * i40e_fd_handle_status - check the Programming Status for FD 532 * @rx_ring: the Rx ring for this descriptor 533 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. 534 * @prog_id: the id originally used for programming 535 * 536 * This is used to verify if the FD programming or invalidation 537 * requested by SW to the HW is successful or not and take actions accordingly. 538 **/ 539 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, 540 union i40e_rx_desc *rx_desc, u8 prog_id) 541 { 542 struct i40e_pf *pf = rx_ring->vsi->back; 543 struct pci_dev *pdev = pf->pdev; 544 u32 fcnt_prog, fcnt_avail; 545 u32 error; 546 u64 qw; 547 548 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 549 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 550 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 551 552 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { 553 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id); 554 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || 555 (I40E_DEBUG_FD & pf->hw.debug_mask)) 556 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", 557 pf->fd_inv); 558 559 /* Check if the programming error is for ATR. 560 * If so, auto disable ATR and set a state for 561 * flush in progress. Next time we come here if flush is in 562 * progress do nothing, once flush is complete the state will 563 * be cleared. 564 */ 565 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 566 return; 567 568 pf->fd_add_err++; 569 /* store the current atr filter count */ 570 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); 571 572 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && 573 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { 574 /* These set_bit() calls aren't atomic with the 575 * test_bit() here, but worse case we potentially 576 * disable ATR and queue a flush right after SB 577 * support is re-enabled. That shouldn't cause an 578 * issue in practice 579 */ 580 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); 581 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); 582 } 583 584 /* filter programming failed most likely due to table full */ 585 fcnt_prog = i40e_get_global_fd_count(pf); 586 fcnt_avail = pf->fdir_pf_filter_count; 587 /* If ATR is running fcnt_prog can quickly change, 588 * if we are very close to full, it makes sense to disable 589 * FD ATR/SB and then re-enable it when there is room. 590 */ 591 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 592 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 593 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, 594 pf->state)) 595 if (I40E_DEBUG_FD & pf->hw.debug_mask) 596 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 597 } 598 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 599 if (I40E_DEBUG_FD & pf->hw.debug_mask) 600 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", 601 rx_desc->wb.qword0.hi_dword.fd_id); 602 } 603 } 604 605 /** 606 * i40e_unmap_and_free_tx_resource - Release a Tx buffer 607 * @ring: the ring that owns the buffer 608 * @tx_buffer: the buffer to free 609 **/ 610 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, 611 struct i40e_tx_buffer *tx_buffer) 612 { 613 if (tx_buffer->skb) { 614 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) 615 kfree(tx_buffer->raw_buf); 616 else if (ring_is_xdp(ring)) 617 xdp_return_frame(tx_buffer->xdpf); 618 else 619 dev_kfree_skb_any(tx_buffer->skb); 620 if (dma_unmap_len(tx_buffer, len)) 621 dma_unmap_single(ring->dev, 622 dma_unmap_addr(tx_buffer, dma), 623 dma_unmap_len(tx_buffer, len), 624 DMA_TO_DEVICE); 625 } else if (dma_unmap_len(tx_buffer, len)) { 626 dma_unmap_page(ring->dev, 627 dma_unmap_addr(tx_buffer, dma), 628 dma_unmap_len(tx_buffer, len), 629 DMA_TO_DEVICE); 630 } 631 632 tx_buffer->next_to_watch = NULL; 633 tx_buffer->skb = NULL; 634 dma_unmap_len_set(tx_buffer, len, 0); 635 /* tx_buffer must be completely set up in the transmit path */ 636 } 637 638 /** 639 * i40e_clean_tx_ring - Free any empty Tx buffers 640 * @tx_ring: ring to be cleaned 641 **/ 642 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) 643 { 644 unsigned long bi_size; 645 u16 i; 646 647 /* ring already cleared, nothing to do */ 648 if (!tx_ring->tx_bi) 649 return; 650 651 /* Free all the Tx ring sk_buffs */ 652 for (i = 0; i < tx_ring->count; i++) 653 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); 654 655 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 656 memset(tx_ring->tx_bi, 0, bi_size); 657 658 /* Zero out the descriptor ring */ 659 memset(tx_ring->desc, 0, tx_ring->size); 660 661 tx_ring->next_to_use = 0; 662 tx_ring->next_to_clean = 0; 663 664 if (!tx_ring->netdev) 665 return; 666 667 /* cleanup Tx queue statistics */ 668 netdev_tx_reset_queue(txring_txq(tx_ring)); 669 } 670 671 /** 672 * i40e_free_tx_resources - Free Tx resources per queue 673 * @tx_ring: Tx descriptor ring for a specific queue 674 * 675 * Free all transmit software resources 676 **/ 677 void i40e_free_tx_resources(struct i40e_ring *tx_ring) 678 { 679 i40e_clean_tx_ring(tx_ring); 680 kfree(tx_ring->tx_bi); 681 tx_ring->tx_bi = NULL; 682 683 if (tx_ring->desc) { 684 dma_free_coherent(tx_ring->dev, tx_ring->size, 685 tx_ring->desc, tx_ring->dma); 686 tx_ring->desc = NULL; 687 } 688 } 689 690 /** 691 * i40e_get_tx_pending - how many tx descriptors not processed 692 * @ring: the ring of descriptors 693 * @in_sw: use SW variables 694 * 695 * Since there is no access to the ring head register 696 * in XL710, we need to use our local copies 697 **/ 698 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) 699 { 700 u32 head, tail; 701 702 if (!in_sw) { 703 head = i40e_get_head(ring); 704 tail = readl(ring->tail); 705 } else { 706 head = ring->next_to_clean; 707 tail = ring->next_to_use; 708 } 709 710 if (head != tail) 711 return (head < tail) ? 712 tail - head : (tail + ring->count - head); 713 714 return 0; 715 } 716 717 /** 718 * i40e_detect_recover_hung - Function to detect and recover hung_queues 719 * @vsi: pointer to vsi struct with tx queues 720 * 721 * VSI has netdev and netdev has TX queues. This function is to check each of 722 * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 723 **/ 724 void i40e_detect_recover_hung(struct i40e_vsi *vsi) 725 { 726 struct i40e_ring *tx_ring = NULL; 727 struct net_device *netdev; 728 unsigned int i; 729 int packets; 730 731 if (!vsi) 732 return; 733 734 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 735 return; 736 737 netdev = vsi->netdev; 738 if (!netdev) 739 return; 740 741 if (!netif_carrier_ok(netdev)) 742 return; 743 744 for (i = 0; i < vsi->num_queue_pairs; i++) { 745 tx_ring = vsi->tx_rings[i]; 746 if (tx_ring && tx_ring->desc) { 747 /* If packet counter has not changed the queue is 748 * likely stalled, so force an interrupt for this 749 * queue. 750 * 751 * prev_pkt_ctr would be negative if there was no 752 * pending work. 753 */ 754 packets = tx_ring->stats.packets & INT_MAX; 755 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { 756 i40e_force_wb(vsi, tx_ring->q_vector); 757 continue; 758 } 759 760 /* Memory barrier between read of packet count and call 761 * to i40e_get_tx_pending() 762 */ 763 smp_rmb(); 764 tx_ring->tx_stats.prev_pkt_ctr = 765 i40e_get_tx_pending(tx_ring, true) ? packets : -1; 766 } 767 } 768 } 769 770 #define WB_STRIDE 4 771 772 /** 773 * i40e_clean_tx_irq - Reclaim resources after transmit completes 774 * @vsi: the VSI we care about 775 * @tx_ring: Tx ring to clean 776 * @napi_budget: Used to determine if we are in netpoll 777 * 778 * Returns true if there's any budget left (e.g. the clean is finished) 779 **/ 780 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, 781 struct i40e_ring *tx_ring, int napi_budget) 782 { 783 u16 i = tx_ring->next_to_clean; 784 struct i40e_tx_buffer *tx_buf; 785 struct i40e_tx_desc *tx_head; 786 struct i40e_tx_desc *tx_desc; 787 unsigned int total_bytes = 0, total_packets = 0; 788 unsigned int budget = vsi->work_limit; 789 790 tx_buf = &tx_ring->tx_bi[i]; 791 tx_desc = I40E_TX_DESC(tx_ring, i); 792 i -= tx_ring->count; 793 794 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); 795 796 do { 797 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 798 799 /* if next_to_watch is not set then there is no work pending */ 800 if (!eop_desc) 801 break; 802 803 /* prevent any other reads prior to eop_desc */ 804 smp_rmb(); 805 806 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 807 /* we have caught up to head, no work left to do */ 808 if (tx_head == tx_desc) 809 break; 810 811 /* clear next_to_watch to prevent false hangs */ 812 tx_buf->next_to_watch = NULL; 813 814 /* update the statistics for this packet */ 815 total_bytes += tx_buf->bytecount; 816 total_packets += tx_buf->gso_segs; 817 818 /* free the skb/XDP data */ 819 if (ring_is_xdp(tx_ring)) 820 xdp_return_frame(tx_buf->xdpf); 821 else 822 napi_consume_skb(tx_buf->skb, napi_budget); 823 824 /* unmap skb header data */ 825 dma_unmap_single(tx_ring->dev, 826 dma_unmap_addr(tx_buf, dma), 827 dma_unmap_len(tx_buf, len), 828 DMA_TO_DEVICE); 829 830 /* clear tx_buffer data */ 831 tx_buf->skb = NULL; 832 dma_unmap_len_set(tx_buf, len, 0); 833 834 /* unmap remaining buffers */ 835 while (tx_desc != eop_desc) { 836 i40e_trace(clean_tx_irq_unmap, 837 tx_ring, tx_desc, tx_buf); 838 839 tx_buf++; 840 tx_desc++; 841 i++; 842 if (unlikely(!i)) { 843 i -= tx_ring->count; 844 tx_buf = tx_ring->tx_bi; 845 tx_desc = I40E_TX_DESC(tx_ring, 0); 846 } 847 848 /* unmap any remaining paged data */ 849 if (dma_unmap_len(tx_buf, len)) { 850 dma_unmap_page(tx_ring->dev, 851 dma_unmap_addr(tx_buf, dma), 852 dma_unmap_len(tx_buf, len), 853 DMA_TO_DEVICE); 854 dma_unmap_len_set(tx_buf, len, 0); 855 } 856 } 857 858 /* move us one more past the eop_desc for start of next pkt */ 859 tx_buf++; 860 tx_desc++; 861 i++; 862 if (unlikely(!i)) { 863 i -= tx_ring->count; 864 tx_buf = tx_ring->tx_bi; 865 tx_desc = I40E_TX_DESC(tx_ring, 0); 866 } 867 868 prefetch(tx_desc); 869 870 /* update budget accounting */ 871 budget--; 872 } while (likely(budget)); 873 874 i += tx_ring->count; 875 tx_ring->next_to_clean = i; 876 u64_stats_update_begin(&tx_ring->syncp); 877 tx_ring->stats.bytes += total_bytes; 878 tx_ring->stats.packets += total_packets; 879 u64_stats_update_end(&tx_ring->syncp); 880 tx_ring->q_vector->tx.total_bytes += total_bytes; 881 tx_ring->q_vector->tx.total_packets += total_packets; 882 883 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { 884 /* check to see if there are < 4 descriptors 885 * waiting to be written back, then kick the hardware to force 886 * them to be written back in case we stay in NAPI. 887 * In this mode on X722 we do not enable Interrupt. 888 */ 889 unsigned int j = i40e_get_tx_pending(tx_ring, false); 890 891 if (budget && 892 ((j / WB_STRIDE) == 0) && (j > 0) && 893 !test_bit(__I40E_VSI_DOWN, vsi->state) && 894 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 895 tx_ring->arm_wb = true; 896 } 897 898 if (ring_is_xdp(tx_ring)) 899 return !!budget; 900 901 /* notify netdev of completed buffers */ 902 netdev_tx_completed_queue(txring_txq(tx_ring), 903 total_packets, total_bytes); 904 905 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 906 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 907 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 908 /* Make sure that anybody stopping the queue after this 909 * sees the new next_to_clean. 910 */ 911 smp_mb(); 912 if (__netif_subqueue_stopped(tx_ring->netdev, 913 tx_ring->queue_index) && 914 !test_bit(__I40E_VSI_DOWN, vsi->state)) { 915 netif_wake_subqueue(tx_ring->netdev, 916 tx_ring->queue_index); 917 ++tx_ring->tx_stats.restart_queue; 918 } 919 } 920 921 return !!budget; 922 } 923 924 /** 925 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 926 * @vsi: the VSI we care about 927 * @q_vector: the vector on which to enable writeback 928 * 929 **/ 930 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, 931 struct i40e_q_vector *q_vector) 932 { 933 u16 flags = q_vector->tx.ring[0].flags; 934 u32 val; 935 936 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) 937 return; 938 939 if (q_vector->arm_wb_state) 940 return; 941 942 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 943 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | 944 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ 945 946 wr32(&vsi->back->hw, 947 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), 948 val); 949 } else { 950 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | 951 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ 952 953 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 954 } 955 q_vector->arm_wb_state = true; 956 } 957 958 /** 959 * i40e_force_wb - Issue SW Interrupt so HW does a wb 960 * @vsi: the VSI we care about 961 * @q_vector: the vector on which to force writeback 962 * 963 **/ 964 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) 965 { 966 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { 967 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 968 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ 969 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 970 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; 971 /* allow 00 to be written to the index */ 972 973 wr32(&vsi->back->hw, 974 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); 975 } else { 976 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | 977 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ 978 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 979 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; 980 /* allow 00 to be written to the index */ 981 982 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); 983 } 984 } 985 986 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, 987 struct i40e_ring_container *rc) 988 { 989 return &q_vector->rx == rc; 990 } 991 992 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) 993 { 994 unsigned int divisor; 995 996 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { 997 case I40E_LINK_SPEED_40GB: 998 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; 999 break; 1000 case I40E_LINK_SPEED_25GB: 1001 case I40E_LINK_SPEED_20GB: 1002 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; 1003 break; 1004 default: 1005 case I40E_LINK_SPEED_10GB: 1006 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; 1007 break; 1008 case I40E_LINK_SPEED_1GB: 1009 case I40E_LINK_SPEED_100MB: 1010 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; 1011 break; 1012 } 1013 1014 return divisor; 1015 } 1016 1017 /** 1018 * i40e_update_itr - update the dynamic ITR value based on statistics 1019 * @q_vector: structure containing interrupt and ring information 1020 * @rc: structure containing ring performance data 1021 * 1022 * Stores a new ITR value based on packets and byte 1023 * counts during the last interrupt. The advantage of per interrupt 1024 * computation is faster updates and more accurate ITR for the current 1025 * traffic pattern. Constants in this function were computed 1026 * based on theoretical maximum wire speed and thresholds were set based 1027 * on testing data as well as attempting to minimize response time 1028 * while increasing bulk throughput. 1029 **/ 1030 static void i40e_update_itr(struct i40e_q_vector *q_vector, 1031 struct i40e_ring_container *rc) 1032 { 1033 unsigned int avg_wire_size, packets, bytes, itr; 1034 unsigned long next_update = jiffies; 1035 1036 /* If we don't have any rings just leave ourselves set for maximum 1037 * possible latency so we take ourselves out of the equation. 1038 */ 1039 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 1040 return; 1041 1042 /* For Rx we want to push the delay up and default to low latency. 1043 * for Tx we want to pull the delay down and default to high latency. 1044 */ 1045 itr = i40e_container_is_rx(q_vector, rc) ? 1046 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : 1047 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; 1048 1049 /* If we didn't update within up to 1 - 2 jiffies we can assume 1050 * that either packets are coming in so slow there hasn't been 1051 * any work, or that there is so much work that NAPI is dealing 1052 * with interrupt moderation and we don't need to do anything. 1053 */ 1054 if (time_after(next_update, rc->next_update)) 1055 goto clear_counts; 1056 1057 /* If itr_countdown is set it means we programmed an ITR within 1058 * the last 4 interrupt cycles. This has a side effect of us 1059 * potentially firing an early interrupt. In order to work around 1060 * this we need to throw out any data received for a few 1061 * interrupts following the update. 1062 */ 1063 if (q_vector->itr_countdown) { 1064 itr = rc->target_itr; 1065 goto clear_counts; 1066 } 1067 1068 packets = rc->total_packets; 1069 bytes = rc->total_bytes; 1070 1071 if (i40e_container_is_rx(q_vector, rc)) { 1072 /* If Rx there are 1 to 4 packets and bytes are less than 1073 * 9000 assume insufficient data to use bulk rate limiting 1074 * approach unless Tx is already in bulk rate limiting. We 1075 * are likely latency driven. 1076 */ 1077 if (packets && packets < 4 && bytes < 9000 && 1078 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { 1079 itr = I40E_ITR_ADAPTIVE_LATENCY; 1080 goto adjust_by_size; 1081 } 1082 } else if (packets < 4) { 1083 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1084 * bulk mode and we are receiving 4 or fewer packets just 1085 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1086 * that the Rx can relax. 1087 */ 1088 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && 1089 (q_vector->rx.target_itr & I40E_ITR_MASK) == 1090 I40E_ITR_ADAPTIVE_MAX_USECS) 1091 goto clear_counts; 1092 } else if (packets > 32) { 1093 /* If we have processed over 32 packets in a single interrupt 1094 * for Tx assume we need to switch over to "bulk" mode. 1095 */ 1096 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; 1097 } 1098 1099 /* We have no packets to actually measure against. This means 1100 * either one of the other queues on this vector is active or 1101 * we are a Tx queue doing TSO with too high of an interrupt rate. 1102 * 1103 * Between 4 and 56 we can assume that our current interrupt delay 1104 * is only slightly too low. As such we should increase it by a small 1105 * fixed amount. 1106 */ 1107 if (packets < 56) { 1108 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; 1109 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1110 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1111 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1112 } 1113 goto clear_counts; 1114 } 1115 1116 if (packets <= 256) { 1117 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1118 itr &= I40E_ITR_MASK; 1119 1120 /* Between 56 and 112 is our "goldilocks" zone where we are 1121 * working out "just right". Just report that our current 1122 * ITR is good for us. 1123 */ 1124 if (packets <= 112) 1125 goto clear_counts; 1126 1127 /* If packet count is 128 or greater we are likely looking 1128 * at a slight overrun of the delay we want. Try halving 1129 * our delay to see if that will cut the number of packets 1130 * in half per interrupt. 1131 */ 1132 itr /= 2; 1133 itr &= I40E_ITR_MASK; 1134 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) 1135 itr = I40E_ITR_ADAPTIVE_MIN_USECS; 1136 1137 goto clear_counts; 1138 } 1139 1140 /* The paths below assume we are dealing with a bulk ITR since 1141 * number of packets is greater than 256. We are just going to have 1142 * to compute a value and try to bring the count under control, 1143 * though for smaller packet sizes there isn't much we can do as 1144 * NAPI polling will likely be kicking in sooner rather than later. 1145 */ 1146 itr = I40E_ITR_ADAPTIVE_BULK; 1147 1148 adjust_by_size: 1149 /* If packet counts are 256 or greater we can assume we have a gross 1150 * overestimation of what the rate should be. Instead of trying to fine 1151 * tune it just use the formula below to try and dial in an exact value 1152 * give the current packet size of the frame. 1153 */ 1154 avg_wire_size = bytes / packets; 1155 1156 /* The following is a crude approximation of: 1157 * wmem_default / (size + overhead) = desired_pkts_per_int 1158 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1159 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1160 * 1161 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1162 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1163 * formula down to 1164 * 1165 * (170 * (size + 24)) / (size + 640) = ITR 1166 * 1167 * We first do some math on the packet size and then finally bitshift 1168 * by 8 after rounding up. We also have to account for PCIe link speed 1169 * difference as ITR scales based on this. 1170 */ 1171 if (avg_wire_size <= 60) { 1172 /* Start at 250k ints/sec */ 1173 avg_wire_size = 4096; 1174 } else if (avg_wire_size <= 380) { 1175 /* 250K ints/sec to 60K ints/sec */ 1176 avg_wire_size *= 40; 1177 avg_wire_size += 1696; 1178 } else if (avg_wire_size <= 1084) { 1179 /* 60K ints/sec to 36K ints/sec */ 1180 avg_wire_size *= 15; 1181 avg_wire_size += 11452; 1182 } else if (avg_wire_size <= 1980) { 1183 /* 36K ints/sec to 30K ints/sec */ 1184 avg_wire_size *= 5; 1185 avg_wire_size += 22420; 1186 } else { 1187 /* plateau at a limit of 30K ints/sec */ 1188 avg_wire_size = 32256; 1189 } 1190 1191 /* If we are in low latency mode halve our delay which doubles the 1192 * rate to somewhere between 100K to 16K ints/sec 1193 */ 1194 if (itr & I40E_ITR_ADAPTIVE_LATENCY) 1195 avg_wire_size /= 2; 1196 1197 /* Resultant value is 256 times larger than it needs to be. This 1198 * gives us room to adjust the value as needed to either increase 1199 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 1200 * 1201 * Use addition as we have already recorded the new latency flag 1202 * for the ITR value. 1203 */ 1204 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * 1205 I40E_ITR_ADAPTIVE_MIN_INC; 1206 1207 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { 1208 itr &= I40E_ITR_ADAPTIVE_LATENCY; 1209 itr += I40E_ITR_ADAPTIVE_MAX_USECS; 1210 } 1211 1212 clear_counts: 1213 /* write back value */ 1214 rc->target_itr = itr; 1215 1216 /* next update should occur within next jiffy */ 1217 rc->next_update = next_update + 1; 1218 1219 rc->total_bytes = 0; 1220 rc->total_packets = 0; 1221 } 1222 1223 /** 1224 * i40e_reuse_rx_page - page flip buffer and store it back on the ring 1225 * @rx_ring: rx descriptor ring to store buffers on 1226 * @old_buff: donor buffer to have page reused 1227 * 1228 * Synchronizes page for reuse by the adapter 1229 **/ 1230 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, 1231 struct i40e_rx_buffer *old_buff) 1232 { 1233 struct i40e_rx_buffer *new_buff; 1234 u16 nta = rx_ring->next_to_alloc; 1235 1236 new_buff = &rx_ring->rx_bi[nta]; 1237 1238 /* update, and store next to alloc */ 1239 nta++; 1240 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1241 1242 /* transfer page from old buffer to new buffer */ 1243 new_buff->dma = old_buff->dma; 1244 new_buff->page = old_buff->page; 1245 new_buff->page_offset = old_buff->page_offset; 1246 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 1247 } 1248 1249 /** 1250 * i40e_rx_is_programming_status - check for programming status descriptor 1251 * @qw: qword representing status_error_len in CPU ordering 1252 * 1253 * The value of in the descriptor length field indicate if this 1254 * is a programming status descriptor for flow director or FCoE 1255 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise 1256 * it is a packet descriptor. 1257 **/ 1258 static inline bool i40e_rx_is_programming_status(u64 qw) 1259 { 1260 /* The Rx filter programming status and SPH bit occupy the same 1261 * spot in the descriptor. Since we don't support packet split we 1262 * can just reuse the bit as an indication that this is a 1263 * programming status descriptor. 1264 */ 1265 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK; 1266 } 1267 1268 /** 1269 * i40e_clean_programming_status - clean the programming status descriptor 1270 * @rx_ring: the rx ring that has this descriptor 1271 * @rx_desc: the rx descriptor written back by HW 1272 * @qw: qword representing status_error_len in CPU ordering 1273 * 1274 * Flow director should handle FD_FILTER_STATUS to check its filter programming 1275 * status being successful or not and take actions accordingly. FCoE should 1276 * handle its context/filter programming/invalidation status and take actions. 1277 * 1278 **/ 1279 static void i40e_clean_programming_status(struct i40e_ring *rx_ring, 1280 union i40e_rx_desc *rx_desc, 1281 u64 qw) 1282 { 1283 struct i40e_rx_buffer *rx_buffer; 1284 u32 ntc = rx_ring->next_to_clean; 1285 u8 id; 1286 1287 /* fetch, update, and store next to clean */ 1288 rx_buffer = &rx_ring->rx_bi[ntc++]; 1289 ntc = (ntc < rx_ring->count) ? ntc : 0; 1290 rx_ring->next_to_clean = ntc; 1291 1292 prefetch(I40E_RX_DESC(rx_ring, ntc)); 1293 1294 /* place unused page back on the ring */ 1295 i40e_reuse_rx_page(rx_ring, rx_buffer); 1296 rx_ring->rx_stats.page_reuse_count++; 1297 1298 /* clear contents of buffer_info */ 1299 rx_buffer->page = NULL; 1300 1301 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1302 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1303 1304 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 1305 i40e_fd_handle_status(rx_ring, rx_desc, id); 1306 } 1307 1308 /** 1309 * i40e_setup_tx_descriptors - Allocate the Tx descriptors 1310 * @tx_ring: the tx ring to set up 1311 * 1312 * Return 0 on success, negative on error 1313 **/ 1314 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) 1315 { 1316 struct device *dev = tx_ring->dev; 1317 int bi_size; 1318 1319 if (!dev) 1320 return -ENOMEM; 1321 1322 /* warn if we are about to overwrite the pointer */ 1323 WARN_ON(tx_ring->tx_bi); 1324 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 1325 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 1326 if (!tx_ring->tx_bi) 1327 goto err; 1328 1329 u64_stats_init(&tx_ring->syncp); 1330 1331 /* round up to nearest 4K */ 1332 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1333 /* add u32 for head writeback, align after this takes care of 1334 * guaranteeing this is at least one cache line in size 1335 */ 1336 tx_ring->size += sizeof(u32); 1337 tx_ring->size = ALIGN(tx_ring->size, 4096); 1338 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 1339 &tx_ring->dma, GFP_KERNEL); 1340 if (!tx_ring->desc) { 1341 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 1342 tx_ring->size); 1343 goto err; 1344 } 1345 1346 tx_ring->next_to_use = 0; 1347 tx_ring->next_to_clean = 0; 1348 tx_ring->tx_stats.prev_pkt_ctr = -1; 1349 return 0; 1350 1351 err: 1352 kfree(tx_ring->tx_bi); 1353 tx_ring->tx_bi = NULL; 1354 return -ENOMEM; 1355 } 1356 1357 /** 1358 * i40e_clean_rx_ring - Free Rx buffers 1359 * @rx_ring: ring to be cleaned 1360 **/ 1361 void i40e_clean_rx_ring(struct i40e_ring *rx_ring) 1362 { 1363 unsigned long bi_size; 1364 u16 i; 1365 1366 /* ring already cleared, nothing to do */ 1367 if (!rx_ring->rx_bi) 1368 return; 1369 1370 if (rx_ring->skb) { 1371 dev_kfree_skb(rx_ring->skb); 1372 rx_ring->skb = NULL; 1373 } 1374 1375 /* Free all the Rx ring sk_buffs */ 1376 for (i = 0; i < rx_ring->count; i++) { 1377 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; 1378 1379 if (!rx_bi->page) 1380 continue; 1381 1382 /* Invalidate cache lines that may have been written to by 1383 * device so that we avoid corrupting memory. 1384 */ 1385 dma_sync_single_range_for_cpu(rx_ring->dev, 1386 rx_bi->dma, 1387 rx_bi->page_offset, 1388 rx_ring->rx_buf_len, 1389 DMA_FROM_DEVICE); 1390 1391 /* free resources associated with mapping */ 1392 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 1393 i40e_rx_pg_size(rx_ring), 1394 DMA_FROM_DEVICE, 1395 I40E_RX_DMA_ATTR); 1396 1397 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); 1398 1399 rx_bi->page = NULL; 1400 rx_bi->page_offset = 0; 1401 } 1402 1403 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; 1404 memset(rx_ring->rx_bi, 0, bi_size); 1405 1406 /* Zero out the descriptor ring */ 1407 memset(rx_ring->desc, 0, rx_ring->size); 1408 1409 rx_ring->next_to_alloc = 0; 1410 rx_ring->next_to_clean = 0; 1411 rx_ring->next_to_use = 0; 1412 } 1413 1414 /** 1415 * i40e_free_rx_resources - Free Rx resources 1416 * @rx_ring: ring to clean the resources from 1417 * 1418 * Free all receive software resources 1419 **/ 1420 void i40e_free_rx_resources(struct i40e_ring *rx_ring) 1421 { 1422 i40e_clean_rx_ring(rx_ring); 1423 if (rx_ring->vsi->type == I40E_VSI_MAIN) 1424 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 1425 rx_ring->xdp_prog = NULL; 1426 kfree(rx_ring->rx_bi); 1427 rx_ring->rx_bi = NULL; 1428 1429 if (rx_ring->desc) { 1430 dma_free_coherent(rx_ring->dev, rx_ring->size, 1431 rx_ring->desc, rx_ring->dma); 1432 rx_ring->desc = NULL; 1433 } 1434 } 1435 1436 /** 1437 * i40e_setup_rx_descriptors - Allocate Rx descriptors 1438 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1439 * 1440 * Returns 0 on success, negative on failure 1441 **/ 1442 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1443 { 1444 struct device *dev = rx_ring->dev; 1445 int err = -ENOMEM; 1446 int bi_size; 1447 1448 /* warn if we are about to overwrite the pointer */ 1449 WARN_ON(rx_ring->rx_bi); 1450 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; 1451 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); 1452 if (!rx_ring->rx_bi) 1453 goto err; 1454 1455 u64_stats_init(&rx_ring->syncp); 1456 1457 /* Round up to nearest 4K */ 1458 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); 1459 rx_ring->size = ALIGN(rx_ring->size, 4096); 1460 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 1461 &rx_ring->dma, GFP_KERNEL); 1462 1463 if (!rx_ring->desc) { 1464 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 1465 rx_ring->size); 1466 goto err; 1467 } 1468 1469 rx_ring->next_to_alloc = 0; 1470 rx_ring->next_to_clean = 0; 1471 rx_ring->next_to_use = 0; 1472 1473 /* XDP RX-queue info only needed for RX rings exposed to XDP */ 1474 if (rx_ring->vsi->type == I40E_VSI_MAIN) { 1475 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 1476 rx_ring->queue_index); 1477 if (err < 0) 1478 goto err; 1479 } 1480 1481 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1482 1483 return 0; 1484 err: 1485 kfree(rx_ring->rx_bi); 1486 rx_ring->rx_bi = NULL; 1487 return err; 1488 } 1489 1490 /** 1491 * i40e_release_rx_desc - Store the new tail and head values 1492 * @rx_ring: ring to bump 1493 * @val: new head index 1494 **/ 1495 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) 1496 { 1497 rx_ring->next_to_use = val; 1498 1499 /* update next to alloc since we have filled the ring */ 1500 rx_ring->next_to_alloc = val; 1501 1502 /* Force memory writes to complete before letting h/w 1503 * know there are new descriptors to fetch. (Only 1504 * applicable for weak-ordered memory model archs, 1505 * such as IA-64). 1506 */ 1507 wmb(); 1508 writel(val, rx_ring->tail); 1509 } 1510 1511 /** 1512 * i40e_rx_offset - Return expected offset into page to access data 1513 * @rx_ring: Ring we are requesting offset of 1514 * 1515 * Returns the offset value for ring into the data buffer. 1516 */ 1517 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) 1518 { 1519 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; 1520 } 1521 1522 /** 1523 * i40e_alloc_mapped_page - recycle or make a new page 1524 * @rx_ring: ring to use 1525 * @bi: rx_buffer struct to modify 1526 * 1527 * Returns true if the page was successfully allocated or 1528 * reused. 1529 **/ 1530 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, 1531 struct i40e_rx_buffer *bi) 1532 { 1533 struct page *page = bi->page; 1534 dma_addr_t dma; 1535 1536 /* since we are recycling buffers we should seldom need to alloc */ 1537 if (likely(page)) { 1538 rx_ring->rx_stats.page_reuse_count++; 1539 return true; 1540 } 1541 1542 /* alloc new page for storage */ 1543 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); 1544 if (unlikely(!page)) { 1545 rx_ring->rx_stats.alloc_page_failed++; 1546 return false; 1547 } 1548 1549 /* map page for use */ 1550 dma = dma_map_page_attrs(rx_ring->dev, page, 0, 1551 i40e_rx_pg_size(rx_ring), 1552 DMA_FROM_DEVICE, 1553 I40E_RX_DMA_ATTR); 1554 1555 /* if mapping failed free memory back to system since 1556 * there isn't much point in holding memory we can't use 1557 */ 1558 if (dma_mapping_error(rx_ring->dev, dma)) { 1559 __free_pages(page, i40e_rx_pg_order(rx_ring)); 1560 rx_ring->rx_stats.alloc_page_failed++; 1561 return false; 1562 } 1563 1564 bi->dma = dma; 1565 bi->page = page; 1566 bi->page_offset = i40e_rx_offset(rx_ring); 1567 page_ref_add(page, USHRT_MAX - 1); 1568 bi->pagecnt_bias = USHRT_MAX; 1569 1570 return true; 1571 } 1572 1573 /** 1574 * i40e_receive_skb - Send a completed packet up the stack 1575 * @rx_ring: rx ring in play 1576 * @skb: packet to send up 1577 * @vlan_tag: vlan tag for packet 1578 **/ 1579 static void i40e_receive_skb(struct i40e_ring *rx_ring, 1580 struct sk_buff *skb, u16 vlan_tag) 1581 { 1582 struct i40e_q_vector *q_vector = rx_ring->q_vector; 1583 1584 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1585 (vlan_tag & VLAN_VID_MASK)) 1586 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 1587 1588 napi_gro_receive(&q_vector->napi, skb); 1589 } 1590 1591 /** 1592 * i40e_alloc_rx_buffers - Replace used receive buffers 1593 * @rx_ring: ring to place buffers on 1594 * @cleaned_count: number of buffers to replace 1595 * 1596 * Returns false if all allocations were successful, true if any fail 1597 **/ 1598 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) 1599 { 1600 u16 ntu = rx_ring->next_to_use; 1601 union i40e_rx_desc *rx_desc; 1602 struct i40e_rx_buffer *bi; 1603 1604 /* do nothing if no valid netdev defined */ 1605 if (!rx_ring->netdev || !cleaned_count) 1606 return false; 1607 1608 rx_desc = I40E_RX_DESC(rx_ring, ntu); 1609 bi = &rx_ring->rx_bi[ntu]; 1610 1611 do { 1612 if (!i40e_alloc_mapped_page(rx_ring, bi)) 1613 goto no_buffers; 1614 1615 /* sync the buffer for use by the device */ 1616 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 1617 bi->page_offset, 1618 rx_ring->rx_buf_len, 1619 DMA_FROM_DEVICE); 1620 1621 /* Refresh the desc even if buffer_addrs didn't change 1622 * because each write-back erases this info. 1623 */ 1624 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 1625 1626 rx_desc++; 1627 bi++; 1628 ntu++; 1629 if (unlikely(ntu == rx_ring->count)) { 1630 rx_desc = I40E_RX_DESC(rx_ring, 0); 1631 bi = rx_ring->rx_bi; 1632 ntu = 0; 1633 } 1634 1635 /* clear the status bits for the next_to_use descriptor */ 1636 rx_desc->wb.qword1.status_error_len = 0; 1637 1638 cleaned_count--; 1639 } while (cleaned_count); 1640 1641 if (rx_ring->next_to_use != ntu) 1642 i40e_release_rx_desc(rx_ring, ntu); 1643 1644 return false; 1645 1646 no_buffers: 1647 if (rx_ring->next_to_use != ntu) 1648 i40e_release_rx_desc(rx_ring, ntu); 1649 1650 /* make sure to come back via polling to try again after 1651 * allocation failure 1652 */ 1653 return true; 1654 } 1655 1656 /** 1657 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum 1658 * @vsi: the VSI we care about 1659 * @skb: skb currently being received and modified 1660 * @rx_desc: the receive descriptor 1661 **/ 1662 static inline void i40e_rx_checksum(struct i40e_vsi *vsi, 1663 struct sk_buff *skb, 1664 union i40e_rx_desc *rx_desc) 1665 { 1666 struct i40e_rx_ptype_decoded decoded; 1667 u32 rx_error, rx_status; 1668 bool ipv4, ipv6; 1669 u8 ptype; 1670 u64 qword; 1671 1672 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1673 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; 1674 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> 1675 I40E_RXD_QW1_ERROR_SHIFT; 1676 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1677 I40E_RXD_QW1_STATUS_SHIFT; 1678 decoded = decode_rx_desc_ptype(ptype); 1679 1680 skb->ip_summed = CHECKSUM_NONE; 1681 1682 skb_checksum_none_assert(skb); 1683 1684 /* Rx csum enabled and ip headers found? */ 1685 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 1686 return; 1687 1688 /* did the hardware decode the packet and checksum? */ 1689 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1690 return; 1691 1692 /* both known and outer_ip must be set for the below code to work */ 1693 if (!(decoded.known && decoded.outer_ip)) 1694 return; 1695 1696 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1697 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); 1698 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && 1699 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); 1700 1701 if (ipv4 && 1702 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | 1703 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) 1704 goto checksum_fail; 1705 1706 /* likely incorrect csum if alternate IP extension headers found */ 1707 if (ipv6 && 1708 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1709 /* don't increment checksum err here, non-fatal err */ 1710 return; 1711 1712 /* there was some L4 error, count error and punt packet to the stack */ 1713 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) 1714 goto checksum_fail; 1715 1716 /* handle packets that were not able to be checksummed due 1717 * to arrival speed, in this case the stack can compute 1718 * the csum. 1719 */ 1720 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) 1721 return; 1722 1723 /* If there is an outer header present that might contain a checksum 1724 * we need to bump the checksum level by 1 to reflect the fact that 1725 * we are indicating we validated the inner checksum. 1726 */ 1727 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) 1728 skb->csum_level = 1; 1729 1730 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1731 switch (decoded.inner_prot) { 1732 case I40E_RX_PTYPE_INNER_PROT_TCP: 1733 case I40E_RX_PTYPE_INNER_PROT_UDP: 1734 case I40E_RX_PTYPE_INNER_PROT_SCTP: 1735 skb->ip_summed = CHECKSUM_UNNECESSARY; 1736 /* fall though */ 1737 default: 1738 break; 1739 } 1740 1741 return; 1742 1743 checksum_fail: 1744 vsi->back->hw_csum_rx_error++; 1745 } 1746 1747 /** 1748 * i40e_ptype_to_htype - get a hash type 1749 * @ptype: the ptype value from the descriptor 1750 * 1751 * Returns a hash type to be used by skb_set_hash 1752 **/ 1753 static inline int i40e_ptype_to_htype(u8 ptype) 1754 { 1755 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); 1756 1757 if (!decoded.known) 1758 return PKT_HASH_TYPE_NONE; 1759 1760 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1761 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) 1762 return PKT_HASH_TYPE_L4; 1763 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && 1764 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) 1765 return PKT_HASH_TYPE_L3; 1766 else 1767 return PKT_HASH_TYPE_L2; 1768 } 1769 1770 /** 1771 * i40e_rx_hash - set the hash value in the skb 1772 * @ring: descriptor ring 1773 * @rx_desc: specific descriptor 1774 * @skb: skb currently being received and modified 1775 * @rx_ptype: Rx packet type 1776 **/ 1777 static inline void i40e_rx_hash(struct i40e_ring *ring, 1778 union i40e_rx_desc *rx_desc, 1779 struct sk_buff *skb, 1780 u8 rx_ptype) 1781 { 1782 u32 hash; 1783 const __le64 rss_mask = 1784 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << 1785 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); 1786 1787 if (!(ring->netdev->features & NETIF_F_RXHASH)) 1788 return; 1789 1790 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { 1791 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); 1792 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); 1793 } 1794 } 1795 1796 /** 1797 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor 1798 * @rx_ring: rx descriptor ring packet is being transacted on 1799 * @rx_desc: pointer to the EOP Rx descriptor 1800 * @skb: pointer to current skb being populated 1801 * @rx_ptype: the packet type decoded by hardware 1802 * 1803 * This function checks the ring, descriptor, and packet information in 1804 * order to populate the hash, checksum, VLAN, protocol, and 1805 * other fields within the skb. 1806 **/ 1807 static inline 1808 void i40e_process_skb_fields(struct i40e_ring *rx_ring, 1809 union i40e_rx_desc *rx_desc, struct sk_buff *skb, 1810 u8 rx_ptype) 1811 { 1812 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1813 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1814 I40E_RXD_QW1_STATUS_SHIFT; 1815 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK; 1816 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1817 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; 1818 1819 if (unlikely(tsynvalid)) 1820 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); 1821 1822 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); 1823 1824 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); 1825 1826 skb_record_rx_queue(skb, rx_ring->queue_index); 1827 1828 /* modifies the skb - consumes the enet header */ 1829 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1830 } 1831 1832 /** 1833 * i40e_cleanup_headers - Correct empty headers 1834 * @rx_ring: rx descriptor ring packet is being transacted on 1835 * @skb: pointer to current skb being fixed 1836 * @rx_desc: pointer to the EOP Rx descriptor 1837 * 1838 * Also address the case where we are pulling data in on pages only 1839 * and as such no data is present in the skb header. 1840 * 1841 * In addition if skb is not at least 60 bytes we need to pad it so that 1842 * it is large enough to qualify as a valid Ethernet frame. 1843 * 1844 * Returns true if an error was encountered and skb was freed. 1845 **/ 1846 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, 1847 union i40e_rx_desc *rx_desc) 1848 1849 { 1850 /* XDP packets use error pointer so abort at this point */ 1851 if (IS_ERR(skb)) 1852 return true; 1853 1854 /* ERR_MASK will only have valid bits if EOP set, and 1855 * what we are doing here is actually checking 1856 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in 1857 * the error field 1858 */ 1859 if (unlikely(i40e_test_staterr(rx_desc, 1860 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { 1861 dev_kfree_skb_any(skb); 1862 return true; 1863 } 1864 1865 /* if eth_skb_pad returns an error the skb was freed */ 1866 if (eth_skb_pad(skb)) 1867 return true; 1868 1869 return false; 1870 } 1871 1872 /** 1873 * i40e_page_is_reusable - check if any reuse is possible 1874 * @page: page struct to check 1875 * 1876 * A page is not reusable if it was allocated under low memory 1877 * conditions, or it's not in the same NUMA node as this CPU. 1878 */ 1879 static inline bool i40e_page_is_reusable(struct page *page) 1880 { 1881 return (page_to_nid(page) == numa_mem_id()) && 1882 !page_is_pfmemalloc(page); 1883 } 1884 1885 /** 1886 * i40e_can_reuse_rx_page - Determine if this page can be reused by 1887 * the adapter for another receive 1888 * 1889 * @rx_buffer: buffer containing the page 1890 * 1891 * If page is reusable, rx_buffer->page_offset is adjusted to point to 1892 * an unused region in the page. 1893 * 1894 * For small pages, @truesize will be a constant value, half the size 1895 * of the memory at page. We'll attempt to alternate between high and 1896 * low halves of the page, with one half ready for use by the hardware 1897 * and the other half being consumed by the stack. We use the page 1898 * ref count to determine whether the stack has finished consuming the 1899 * portion of this page that was passed up with a previous packet. If 1900 * the page ref count is >1, we'll assume the "other" half page is 1901 * still busy, and this page cannot be reused. 1902 * 1903 * For larger pages, @truesize will be the actual space used by the 1904 * received packet (adjusted upward to an even multiple of the cache 1905 * line size). This will advance through the page by the amount 1906 * actually consumed by the received packets while there is still 1907 * space for a buffer. Each region of larger pages will be used at 1908 * most once, after which the page will not be reused. 1909 * 1910 * In either case, if the page is reusable its refcount is increased. 1911 **/ 1912 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) 1913 { 1914 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 1915 struct page *page = rx_buffer->page; 1916 1917 /* Is any reuse possible? */ 1918 if (unlikely(!i40e_page_is_reusable(page))) 1919 return false; 1920 1921 #if (PAGE_SIZE < 8192) 1922 /* if we are only owner of page we can reuse it */ 1923 if (unlikely((page_count(page) - pagecnt_bias) > 1)) 1924 return false; 1925 #else 1926 #define I40E_LAST_OFFSET \ 1927 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) 1928 if (rx_buffer->page_offset > I40E_LAST_OFFSET) 1929 return false; 1930 #endif 1931 1932 /* If we have drained the page fragment pool we need to update 1933 * the pagecnt_bias and page count so that we fully restock the 1934 * number of references the driver holds. 1935 */ 1936 if (unlikely(pagecnt_bias == 1)) { 1937 page_ref_add(page, USHRT_MAX - 1); 1938 rx_buffer->pagecnt_bias = USHRT_MAX; 1939 } 1940 1941 return true; 1942 } 1943 1944 /** 1945 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff 1946 * @rx_ring: rx descriptor ring to transact packets on 1947 * @rx_buffer: buffer containing page to add 1948 * @skb: sk_buff to place the data into 1949 * @size: packet length from rx_desc 1950 * 1951 * This function will add the data contained in rx_buffer->page to the skb. 1952 * It will just attach the page as a frag to the skb. 1953 * 1954 * The function will then update the page offset. 1955 **/ 1956 static void i40e_add_rx_frag(struct i40e_ring *rx_ring, 1957 struct i40e_rx_buffer *rx_buffer, 1958 struct sk_buff *skb, 1959 unsigned int size) 1960 { 1961 #if (PAGE_SIZE < 8192) 1962 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1963 #else 1964 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); 1965 #endif 1966 1967 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1968 rx_buffer->page_offset, size, truesize); 1969 1970 /* page is being used so we must update the page offset */ 1971 #if (PAGE_SIZE < 8192) 1972 rx_buffer->page_offset ^= truesize; 1973 #else 1974 rx_buffer->page_offset += truesize; 1975 #endif 1976 } 1977 1978 /** 1979 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use 1980 * @rx_ring: rx descriptor ring to transact packets on 1981 * @size: size of buffer to add to skb 1982 * 1983 * This function will pull an Rx buffer from the ring and synchronize it 1984 * for use by the CPU. 1985 */ 1986 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, 1987 const unsigned int size) 1988 { 1989 struct i40e_rx_buffer *rx_buffer; 1990 1991 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; 1992 prefetchw(rx_buffer->page); 1993 1994 /* we are reusing so sync this buffer for CPU use */ 1995 dma_sync_single_range_for_cpu(rx_ring->dev, 1996 rx_buffer->dma, 1997 rx_buffer->page_offset, 1998 size, 1999 DMA_FROM_DEVICE); 2000 2001 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 2002 rx_buffer->pagecnt_bias--; 2003 2004 return rx_buffer; 2005 } 2006 2007 /** 2008 * i40e_construct_skb - Allocate skb and populate it 2009 * @rx_ring: rx descriptor ring to transact packets on 2010 * @rx_buffer: rx buffer to pull data from 2011 * @xdp: xdp_buff pointing to the data 2012 * 2013 * This function allocates an skb. It then populates it with the page 2014 * data from the current receive descriptor, taking care to set up the 2015 * skb correctly. 2016 */ 2017 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, 2018 struct i40e_rx_buffer *rx_buffer, 2019 struct xdp_buff *xdp) 2020 { 2021 unsigned int size = xdp->data_end - xdp->data; 2022 #if (PAGE_SIZE < 8192) 2023 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2024 #else 2025 unsigned int truesize = SKB_DATA_ALIGN(size); 2026 #endif 2027 unsigned int headlen; 2028 struct sk_buff *skb; 2029 2030 /* prefetch first cache line of first page */ 2031 prefetch(xdp->data); 2032 #if L1_CACHE_BYTES < 128 2033 prefetch(xdp->data + L1_CACHE_BYTES); 2034 #endif 2035 2036 /* allocate a skb to store the frags */ 2037 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 2038 I40E_RX_HDR_SIZE, 2039 GFP_ATOMIC | __GFP_NOWARN); 2040 if (unlikely(!skb)) 2041 return NULL; 2042 2043 /* Determine available headroom for copy */ 2044 headlen = size; 2045 if (headlen > I40E_RX_HDR_SIZE) 2046 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE); 2047 2048 /* align pull length to size of long to optimize memcpy performance */ 2049 memcpy(__skb_put(skb, headlen), xdp->data, 2050 ALIGN(headlen, sizeof(long))); 2051 2052 /* update all of the pointers */ 2053 size -= headlen; 2054 if (size) { 2055 skb_add_rx_frag(skb, 0, rx_buffer->page, 2056 rx_buffer->page_offset + headlen, 2057 size, truesize); 2058 2059 /* buffer is used by skb, update page_offset */ 2060 #if (PAGE_SIZE < 8192) 2061 rx_buffer->page_offset ^= truesize; 2062 #else 2063 rx_buffer->page_offset += truesize; 2064 #endif 2065 } else { 2066 /* buffer is unused, reset bias back to rx_buffer */ 2067 rx_buffer->pagecnt_bias++; 2068 } 2069 2070 return skb; 2071 } 2072 2073 /** 2074 * i40e_build_skb - Build skb around an existing buffer 2075 * @rx_ring: Rx descriptor ring to transact packets on 2076 * @rx_buffer: Rx buffer to pull data from 2077 * @xdp: xdp_buff pointing to the data 2078 * 2079 * This function builds an skb around an existing Rx buffer, taking care 2080 * to set up the skb correctly and avoid any memcpy overhead. 2081 */ 2082 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, 2083 struct i40e_rx_buffer *rx_buffer, 2084 struct xdp_buff *xdp) 2085 { 2086 unsigned int size = xdp->data_end - xdp->data; 2087 #if (PAGE_SIZE < 8192) 2088 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2089 #else 2090 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 2091 SKB_DATA_ALIGN(I40E_SKB_PAD + size); 2092 #endif 2093 struct sk_buff *skb; 2094 2095 /* prefetch first cache line of first page */ 2096 prefetch(xdp->data); 2097 #if L1_CACHE_BYTES < 128 2098 prefetch(xdp->data + L1_CACHE_BYTES); 2099 #endif 2100 /* build an skb around the page buffer */ 2101 skb = build_skb(xdp->data_hard_start, truesize); 2102 if (unlikely(!skb)) 2103 return NULL; 2104 2105 /* update pointers within the skb to store the data */ 2106 skb_reserve(skb, I40E_SKB_PAD); 2107 __skb_put(skb, size); 2108 2109 /* buffer is used by skb, update page_offset */ 2110 #if (PAGE_SIZE < 8192) 2111 rx_buffer->page_offset ^= truesize; 2112 #else 2113 rx_buffer->page_offset += truesize; 2114 #endif 2115 2116 return skb; 2117 } 2118 2119 /** 2120 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free 2121 * @rx_ring: rx descriptor ring to transact packets on 2122 * @rx_buffer: rx buffer to pull data from 2123 * 2124 * This function will clean up the contents of the rx_buffer. It will 2125 * either recycle the buffer or unmap it and free the associated resources. 2126 */ 2127 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, 2128 struct i40e_rx_buffer *rx_buffer) 2129 { 2130 if (i40e_can_reuse_rx_page(rx_buffer)) { 2131 /* hand second half of page back to the ring */ 2132 i40e_reuse_rx_page(rx_ring, rx_buffer); 2133 rx_ring->rx_stats.page_reuse_count++; 2134 } else { 2135 /* we are not reusing the buffer so unmap it */ 2136 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 2137 i40e_rx_pg_size(rx_ring), 2138 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); 2139 __page_frag_cache_drain(rx_buffer->page, 2140 rx_buffer->pagecnt_bias); 2141 } 2142 2143 /* clear contents of buffer_info */ 2144 rx_buffer->page = NULL; 2145 } 2146 2147 /** 2148 * i40e_is_non_eop - process handling of non-EOP buffers 2149 * @rx_ring: Rx ring being processed 2150 * @rx_desc: Rx descriptor for current buffer 2151 * @skb: Current socket buffer containing buffer in progress 2152 * 2153 * This function updates next to clean. If the buffer is an EOP buffer 2154 * this function exits returning false, otherwise it will place the 2155 * sk_buff in the next buffer to be chained and return true indicating 2156 * that this is in fact a non-EOP buffer. 2157 **/ 2158 static bool i40e_is_non_eop(struct i40e_ring *rx_ring, 2159 union i40e_rx_desc *rx_desc, 2160 struct sk_buff *skb) 2161 { 2162 u32 ntc = rx_ring->next_to_clean + 1; 2163 2164 /* fetch, update, and store next to clean */ 2165 ntc = (ntc < rx_ring->count) ? ntc : 0; 2166 rx_ring->next_to_clean = ntc; 2167 2168 prefetch(I40E_RX_DESC(rx_ring, ntc)); 2169 2170 /* if we are the last buffer then there is nothing else to do */ 2171 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) 2172 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) 2173 return false; 2174 2175 rx_ring->rx_stats.non_eop_descs++; 2176 2177 return true; 2178 } 2179 2180 #define I40E_XDP_PASS 0 2181 #define I40E_XDP_CONSUMED 1 2182 #define I40E_XDP_TX 2 2183 2184 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2185 struct i40e_ring *xdp_ring); 2186 2187 static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, 2188 struct i40e_ring *xdp_ring) 2189 { 2190 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); 2191 2192 if (unlikely(!xdpf)) 2193 return I40E_XDP_CONSUMED; 2194 2195 return i40e_xmit_xdp_ring(xdpf, xdp_ring); 2196 } 2197 2198 /** 2199 * i40e_run_xdp - run an XDP program 2200 * @rx_ring: Rx ring being processed 2201 * @xdp: XDP buffer containing the frame 2202 **/ 2203 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, 2204 struct xdp_buff *xdp) 2205 { 2206 int err, result = I40E_XDP_PASS; 2207 struct i40e_ring *xdp_ring; 2208 struct bpf_prog *xdp_prog; 2209 u32 act; 2210 2211 rcu_read_lock(); 2212 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 2213 2214 if (!xdp_prog) 2215 goto xdp_out; 2216 2217 prefetchw(xdp->data_hard_start); /* xdp_frame write */ 2218 2219 act = bpf_prog_run_xdp(xdp_prog, xdp); 2220 switch (act) { 2221 case XDP_PASS: 2222 break; 2223 case XDP_TX: 2224 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2225 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 2226 break; 2227 case XDP_REDIRECT: 2228 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2229 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; 2230 break; 2231 default: 2232 bpf_warn_invalid_xdp_action(act); 2233 case XDP_ABORTED: 2234 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 2235 /* fallthrough -- handle aborts by dropping packet */ 2236 case XDP_DROP: 2237 result = I40E_XDP_CONSUMED; 2238 break; 2239 } 2240 xdp_out: 2241 rcu_read_unlock(); 2242 return ERR_PTR(-result); 2243 } 2244 2245 /** 2246 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region 2247 * @rx_ring: Rx ring 2248 * @rx_buffer: Rx buffer to adjust 2249 * @size: Size of adjustment 2250 **/ 2251 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, 2252 struct i40e_rx_buffer *rx_buffer, 2253 unsigned int size) 2254 { 2255 #if (PAGE_SIZE < 8192) 2256 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2257 2258 rx_buffer->page_offset ^= truesize; 2259 #else 2260 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size); 2261 2262 rx_buffer->page_offset += truesize; 2263 #endif 2264 } 2265 2266 static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) 2267 { 2268 /* Force memory writes to complete before letting h/w 2269 * know there are new descriptors to fetch. 2270 */ 2271 wmb(); 2272 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); 2273 } 2274 2275 /** 2276 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 2277 * @rx_ring: rx descriptor ring to transact packets on 2278 * @budget: Total limit on number of packets to process 2279 * 2280 * This function provides a "bounce buffer" approach to Rx interrupt 2281 * processing. The advantage to this is that on systems that have 2282 * expensive overhead for IOMMU access this provides a means of avoiding 2283 * it by maintaining the mapping of the page to the system. 2284 * 2285 * Returns amount of work completed 2286 **/ 2287 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) 2288 { 2289 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2290 struct sk_buff *skb = rx_ring->skb; 2291 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2292 bool failure = false, xdp_xmit = false; 2293 struct xdp_buff xdp; 2294 2295 xdp.rxq = &rx_ring->xdp_rxq; 2296 2297 while (likely(total_rx_packets < (unsigned int)budget)) { 2298 struct i40e_rx_buffer *rx_buffer; 2299 union i40e_rx_desc *rx_desc; 2300 unsigned int size; 2301 u16 vlan_tag; 2302 u8 rx_ptype; 2303 u64 qword; 2304 2305 /* return some buffers to hardware, one at a time is too slow */ 2306 if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 2307 failure = failure || 2308 i40e_alloc_rx_buffers(rx_ring, cleaned_count); 2309 cleaned_count = 0; 2310 } 2311 2312 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); 2313 2314 /* status_error_len will always be zero for unused descriptors 2315 * because it's cleared in cleanup, and overlaps with hdr_addr 2316 * which is always zero because packet split isn't used, if the 2317 * hardware wrote DD then the length will be non-zero 2318 */ 2319 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 2320 2321 /* This memory barrier is needed to keep us from reading 2322 * any other fields out of the rx_desc until we have 2323 * verified the descriptor has been written back. 2324 */ 2325 dma_rmb(); 2326 2327 if (unlikely(i40e_rx_is_programming_status(qword))) { 2328 i40e_clean_programming_status(rx_ring, rx_desc, qword); 2329 cleaned_count++; 2330 continue; 2331 } 2332 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 2333 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 2334 if (!size) 2335 break; 2336 2337 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); 2338 rx_buffer = i40e_get_rx_buffer(rx_ring, size); 2339 2340 /* retrieve a buffer from the ring */ 2341 if (!skb) { 2342 xdp.data = page_address(rx_buffer->page) + 2343 rx_buffer->page_offset; 2344 xdp_set_data_meta_invalid(&xdp); 2345 xdp.data_hard_start = xdp.data - 2346 i40e_rx_offset(rx_ring); 2347 xdp.data_end = xdp.data + size; 2348 2349 skb = i40e_run_xdp(rx_ring, &xdp); 2350 } 2351 2352 if (IS_ERR(skb)) { 2353 if (PTR_ERR(skb) == -I40E_XDP_TX) { 2354 xdp_xmit = true; 2355 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); 2356 } else { 2357 rx_buffer->pagecnt_bias++; 2358 } 2359 total_rx_bytes += size; 2360 total_rx_packets++; 2361 } else if (skb) { 2362 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); 2363 } else if (ring_uses_build_skb(rx_ring)) { 2364 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); 2365 } else { 2366 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); 2367 } 2368 2369 /* exit if we failed to retrieve a buffer */ 2370 if (!skb) { 2371 rx_ring->rx_stats.alloc_buff_failed++; 2372 rx_buffer->pagecnt_bias++; 2373 break; 2374 } 2375 2376 i40e_put_rx_buffer(rx_ring, rx_buffer); 2377 cleaned_count++; 2378 2379 if (i40e_is_non_eop(rx_ring, rx_desc, skb)) 2380 continue; 2381 2382 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) { 2383 skb = NULL; 2384 continue; 2385 } 2386 2387 /* probably a little skewed due to removing CRC */ 2388 total_rx_bytes += skb->len; 2389 2390 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 2391 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 2392 I40E_RXD_QW1_PTYPE_SHIFT; 2393 2394 /* populate checksum, VLAN, and protocol */ 2395 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 2396 2397 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? 2398 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; 2399 2400 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 2401 i40e_receive_skb(rx_ring, skb, vlan_tag); 2402 skb = NULL; 2403 2404 /* update budget accounting */ 2405 total_rx_packets++; 2406 } 2407 2408 if (xdp_xmit) { 2409 struct i40e_ring *xdp_ring = 2410 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2411 2412 i40e_xdp_ring_update_tail(xdp_ring); 2413 xdp_do_flush_map(); 2414 } 2415 2416 rx_ring->skb = skb; 2417 2418 u64_stats_update_begin(&rx_ring->syncp); 2419 rx_ring->stats.packets += total_rx_packets; 2420 rx_ring->stats.bytes += total_rx_bytes; 2421 u64_stats_update_end(&rx_ring->syncp); 2422 rx_ring->q_vector->rx.total_packets += total_rx_packets; 2423 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 2424 2425 /* guarantee a trip back through this routine if there was a failure */ 2426 return failure ? budget : (int)total_rx_packets; 2427 } 2428 2429 static inline u32 i40e_buildreg_itr(const int type, u16 itr) 2430 { 2431 u32 val; 2432 2433 /* We don't bother with setting the CLEARPBA bit as the data sheet 2434 * points out doing so is "meaningless since it was already 2435 * auto-cleared". The auto-clearing happens when the interrupt is 2436 * asserted. 2437 * 2438 * Hardware errata 28 for also indicates that writing to a 2439 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 2440 * an event in the PBA anyway so we need to rely on the automask 2441 * to hold pending events for us until the interrupt is re-enabled 2442 * 2443 * The itr value is reported in microseconds, and the register 2444 * value is recorded in 2 microsecond units. For this reason we 2445 * only need to shift by the interval shift - 1 instead of the 2446 * full value. 2447 */ 2448 itr &= I40E_ITR_MASK; 2449 2450 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 2451 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | 2452 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); 2453 2454 return val; 2455 } 2456 2457 /* a small macro to shorten up some long lines */ 2458 #define INTREG I40E_PFINT_DYN_CTLN 2459 2460 /* The act of updating the ITR will cause it to immediately trigger. In order 2461 * to prevent this from throwing off adaptive update statistics we defer the 2462 * update so that it can only happen so often. So after either Tx or Rx are 2463 * updated we make the adaptive scheme wait until either the ITR completely 2464 * expires via the next_update expiration or we have been through at least 2465 * 3 interrupts. 2466 */ 2467 #define ITR_COUNTDOWN_START 3 2468 2469 /** 2470 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt 2471 * @vsi: the VSI we care about 2472 * @q_vector: q_vector for which itr is being updated and interrupt enabled 2473 * 2474 **/ 2475 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, 2476 struct i40e_q_vector *q_vector) 2477 { 2478 struct i40e_hw *hw = &vsi->back->hw; 2479 u32 intval; 2480 2481 /* If we don't have MSIX, then we only need to re-enable icr0 */ 2482 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { 2483 i40e_irq_dynamic_enable_icr0(vsi->back); 2484 return; 2485 } 2486 2487 /* These will do nothing if dynamic updates are not enabled */ 2488 i40e_update_itr(q_vector, &q_vector->tx); 2489 i40e_update_itr(q_vector, &q_vector->rx); 2490 2491 /* This block of logic allows us to get away with only updating 2492 * one ITR value with each interrupt. The idea is to perform a 2493 * pseudo-lazy update with the following criteria. 2494 * 2495 * 1. Rx is given higher priority than Tx if both are in same state 2496 * 2. If we must reduce an ITR that is given highest priority. 2497 * 3. We then give priority to increasing ITR based on amount. 2498 */ 2499 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 2500 /* Rx ITR needs to be reduced, this is highest priority */ 2501 intval = i40e_buildreg_itr(I40E_RX_ITR, 2502 q_vector->rx.target_itr); 2503 q_vector->rx.current_itr = q_vector->rx.target_itr; 2504 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2505 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 2506 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 2507 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 2508 /* Tx ITR needs to be reduced, this is second priority 2509 * Tx ITR needs to be increased more than Rx, fourth priority 2510 */ 2511 intval = i40e_buildreg_itr(I40E_TX_ITR, 2512 q_vector->tx.target_itr); 2513 q_vector->tx.current_itr = q_vector->tx.target_itr; 2514 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2515 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 2516 /* Rx ITR needs to be increased, third priority */ 2517 intval = i40e_buildreg_itr(I40E_RX_ITR, 2518 q_vector->rx.target_itr); 2519 q_vector->rx.current_itr = q_vector->rx.target_itr; 2520 q_vector->itr_countdown = ITR_COUNTDOWN_START; 2521 } else { 2522 /* No ITR update, lowest priority */ 2523 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); 2524 if (q_vector->itr_countdown) 2525 q_vector->itr_countdown--; 2526 } 2527 2528 if (!test_bit(__I40E_VSI_DOWN, vsi->state)) 2529 wr32(hw, INTREG(q_vector->reg_idx), intval); 2530 } 2531 2532 /** 2533 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine 2534 * @napi: napi struct with our devices info in it 2535 * @budget: amount of work driver is allowed to do this pass, in packets 2536 * 2537 * This function will clean all queues associated with a q_vector. 2538 * 2539 * Returns the amount of work done 2540 **/ 2541 int i40e_napi_poll(struct napi_struct *napi, int budget) 2542 { 2543 struct i40e_q_vector *q_vector = 2544 container_of(napi, struct i40e_q_vector, napi); 2545 struct i40e_vsi *vsi = q_vector->vsi; 2546 struct i40e_ring *ring; 2547 bool clean_complete = true; 2548 bool arm_wb = false; 2549 int budget_per_ring; 2550 int work_done = 0; 2551 2552 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { 2553 napi_complete(napi); 2554 return 0; 2555 } 2556 2557 /* Since the actual Tx work is minimal, we can give the Tx a larger 2558 * budget and be more aggressive about cleaning up the Tx descriptors. 2559 */ 2560 i40e_for_each_ring(ring, q_vector->tx) { 2561 if (!i40e_clean_tx_irq(vsi, ring, budget)) { 2562 clean_complete = false; 2563 continue; 2564 } 2565 arm_wb |= ring->arm_wb; 2566 ring->arm_wb = false; 2567 } 2568 2569 /* Handle case where we are called by netpoll with a budget of 0 */ 2570 if (budget <= 0) 2571 goto tx_only; 2572 2573 /* We attempt to distribute budget to each Rx queue fairly, but don't 2574 * allow the budget to go below 1 because that would exit polling early. 2575 */ 2576 budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 2577 2578 i40e_for_each_ring(ring, q_vector->rx) { 2579 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); 2580 2581 work_done += cleaned; 2582 /* if we clean as many as budgeted, we must not be done */ 2583 if (cleaned >= budget_per_ring) 2584 clean_complete = false; 2585 } 2586 2587 /* If work not completed, return budget and polling will return */ 2588 if (!clean_complete) { 2589 int cpu_id = smp_processor_id(); 2590 2591 /* It is possible that the interrupt affinity has changed but, 2592 * if the cpu is pegged at 100%, polling will never exit while 2593 * traffic continues and the interrupt will be stuck on this 2594 * cpu. We check to make sure affinity is correct before we 2595 * continue to poll, otherwise we must stop polling so the 2596 * interrupt can move to the correct cpu. 2597 */ 2598 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 2599 /* Tell napi that we are done polling */ 2600 napi_complete_done(napi, work_done); 2601 2602 /* Force an interrupt */ 2603 i40e_force_wb(vsi, q_vector); 2604 2605 /* Return budget-1 so that polling stops */ 2606 return budget - 1; 2607 } 2608 tx_only: 2609 if (arm_wb) { 2610 q_vector->tx.ring[0].tx_stats.tx_force_wb++; 2611 i40e_enable_wb_on_itr(vsi, q_vector); 2612 } 2613 return budget; 2614 } 2615 2616 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) 2617 q_vector->arm_wb_state = false; 2618 2619 /* Work is done so exit the polling mode and re-enable the interrupt */ 2620 napi_complete_done(napi, work_done); 2621 2622 i40e_update_enable_itr(vsi, q_vector); 2623 2624 return min(work_done, budget - 1); 2625 } 2626 2627 /** 2628 * i40e_atr - Add a Flow Director ATR filter 2629 * @tx_ring: ring to add programming descriptor to 2630 * @skb: send buffer 2631 * @tx_flags: send tx flags 2632 **/ 2633 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, 2634 u32 tx_flags) 2635 { 2636 struct i40e_filter_program_desc *fdir_desc; 2637 struct i40e_pf *pf = tx_ring->vsi->back; 2638 union { 2639 unsigned char *network; 2640 struct iphdr *ipv4; 2641 struct ipv6hdr *ipv6; 2642 } hdr; 2643 struct tcphdr *th; 2644 unsigned int hlen; 2645 u32 flex_ptype, dtype_cmd; 2646 int l4_proto; 2647 u16 i; 2648 2649 /* make sure ATR is enabled */ 2650 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) 2651 return; 2652 2653 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2654 return; 2655 2656 /* if sampling is disabled do nothing */ 2657 if (!tx_ring->atr_sample_rate) 2658 return; 2659 2660 /* Currently only IPv4/IPv6 with TCP is supported */ 2661 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) 2662 return; 2663 2664 /* snag network header to get L4 type and address */ 2665 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? 2666 skb_inner_network_header(skb) : skb_network_header(skb); 2667 2668 /* Note: tx_flags gets modified to reflect inner protocols in 2669 * tx_enable_csum function if encap is enabled. 2670 */ 2671 if (tx_flags & I40E_TX_FLAGS_IPV4) { 2672 /* access ihl as u8 to avoid unaligned access on ia64 */ 2673 hlen = (hdr.network[0] & 0x0F) << 2; 2674 l4_proto = hdr.ipv4->protocol; 2675 } else { 2676 /* find the start of the innermost ipv6 header */ 2677 unsigned int inner_hlen = hdr.network - skb->data; 2678 unsigned int h_offset = inner_hlen; 2679 2680 /* this function updates h_offset to the end of the header */ 2681 l4_proto = 2682 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); 2683 /* hlen will contain our best estimate of the tcp header */ 2684 hlen = h_offset - inner_hlen; 2685 } 2686 2687 if (l4_proto != IPPROTO_TCP) 2688 return; 2689 2690 th = (struct tcphdr *)(hdr.network + hlen); 2691 2692 /* Due to lack of space, no more new filters can be programmed */ 2693 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) 2694 return; 2695 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { 2696 /* HW ATR eviction will take care of removing filters on FIN 2697 * and RST packets. 2698 */ 2699 if (th->fin || th->rst) 2700 return; 2701 } 2702 2703 tx_ring->atr_count++; 2704 2705 /* sample on all syn/fin/rst packets or once every atr sample rate */ 2706 if (!th->fin && 2707 !th->syn && 2708 !th->rst && 2709 (tx_ring->atr_count < tx_ring->atr_sample_rate)) 2710 return; 2711 2712 tx_ring->atr_count = 0; 2713 2714 /* grab the next descriptor */ 2715 i = tx_ring->next_to_use; 2716 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 2717 2718 i++; 2719 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2720 2721 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & 2722 I40E_TXD_FLTR_QW0_QINDEX_MASK; 2723 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? 2724 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << 2725 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : 2726 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << 2727 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); 2728 2729 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; 2730 2731 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; 2732 2733 dtype_cmd |= (th->fin || th->rst) ? 2734 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << 2735 I40E_TXD_FLTR_QW1_PCMD_SHIFT) : 2736 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << 2737 I40E_TXD_FLTR_QW1_PCMD_SHIFT); 2738 2739 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << 2740 I40E_TXD_FLTR_QW1_DEST_SHIFT; 2741 2742 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << 2743 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 2744 2745 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 2746 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) 2747 dtype_cmd |= 2748 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << 2749 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2750 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2751 else 2752 dtype_cmd |= 2753 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << 2754 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2755 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2756 2757 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) 2758 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2759 2760 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2761 fdir_desc->rsvd = cpu_to_le32(0); 2762 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 2763 fdir_desc->fd_id = cpu_to_le32(0); 2764 } 2765 2766 /** 2767 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 2768 * @skb: send buffer 2769 * @tx_ring: ring to send buffer on 2770 * @flags: the tx flags to be set 2771 * 2772 * Checks the skb and set up correspondingly several generic transmit flags 2773 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2774 * 2775 * Returns error code indicate the frame should be dropped upon error and the 2776 * otherwise returns 0 to indicate the flags has been set properly. 2777 **/ 2778 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 2779 struct i40e_ring *tx_ring, 2780 u32 *flags) 2781 { 2782 __be16 protocol = skb->protocol; 2783 u32 tx_flags = 0; 2784 2785 if (protocol == htons(ETH_P_8021Q) && 2786 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 2787 /* When HW VLAN acceleration is turned off by the user the 2788 * stack sets the protocol to 8021q so that the driver 2789 * can take any steps required to support the SW only 2790 * VLAN handling. In our case the driver doesn't need 2791 * to take any further steps so just set the protocol 2792 * to the encapsulated ethertype. 2793 */ 2794 skb->protocol = vlan_get_protocol(skb); 2795 goto out; 2796 } 2797 2798 /* if we have a HW VLAN tag being added, default to the HW one */ 2799 if (skb_vlan_tag_present(skb)) { 2800 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 2801 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 2802 /* else if it is a SW VLAN, check the next protocol and store the tag */ 2803 } else if (protocol == htons(ETH_P_8021Q)) { 2804 struct vlan_hdr *vhdr, _vhdr; 2805 2806 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 2807 if (!vhdr) 2808 return -EINVAL; 2809 2810 protocol = vhdr->h_vlan_encapsulated_proto; 2811 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; 2812 tx_flags |= I40E_TX_FLAGS_SW_VLAN; 2813 } 2814 2815 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2816 goto out; 2817 2818 /* Insert 802.1p priority into VLAN header */ 2819 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || 2820 (skb->priority != TC_PRIO_CONTROL)) { 2821 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; 2822 tx_flags |= (skb->priority & 0x7) << 2823 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 2824 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 2825 struct vlan_ethhdr *vhdr; 2826 int rc; 2827 2828 rc = skb_cow_head(skb, 0); 2829 if (rc < 0) 2830 return rc; 2831 vhdr = (struct vlan_ethhdr *)skb->data; 2832 vhdr->h_vlan_TCI = htons(tx_flags >> 2833 I40E_TX_FLAGS_VLAN_SHIFT); 2834 } else { 2835 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 2836 } 2837 } 2838 2839 out: 2840 *flags = tx_flags; 2841 return 0; 2842 } 2843 2844 /** 2845 * i40e_tso - set up the tso context descriptor 2846 * @first: pointer to first Tx buffer for xmit 2847 * @hdr_len: ptr to the size of the packet header 2848 * @cd_type_cmd_tso_mss: Quad Word 1 2849 * 2850 * Returns 0 if no TSO can happen, 1 if tso is going, or error 2851 **/ 2852 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, 2853 u64 *cd_type_cmd_tso_mss) 2854 { 2855 struct sk_buff *skb = first->skb; 2856 u64 cd_cmd, cd_tso_len, cd_mss; 2857 union { 2858 struct iphdr *v4; 2859 struct ipv6hdr *v6; 2860 unsigned char *hdr; 2861 } ip; 2862 union { 2863 struct tcphdr *tcp; 2864 struct udphdr *udp; 2865 unsigned char *hdr; 2866 } l4; 2867 u32 paylen, l4_offset; 2868 u16 gso_segs, gso_size; 2869 int err; 2870 2871 if (skb->ip_summed != CHECKSUM_PARTIAL) 2872 return 0; 2873 2874 if (!skb_is_gso(skb)) 2875 return 0; 2876 2877 err = skb_cow_head(skb, 0); 2878 if (err < 0) 2879 return err; 2880 2881 ip.hdr = skb_network_header(skb); 2882 l4.hdr = skb_transport_header(skb); 2883 2884 /* initialize outer IP header fields */ 2885 if (ip.v4->version == 4) { 2886 ip.v4->tot_len = 0; 2887 ip.v4->check = 0; 2888 } else { 2889 ip.v6->payload_len = 0; 2890 } 2891 2892 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2893 SKB_GSO_GRE_CSUM | 2894 SKB_GSO_IPXIP4 | 2895 SKB_GSO_IPXIP6 | 2896 SKB_GSO_UDP_TUNNEL | 2897 SKB_GSO_UDP_TUNNEL_CSUM)) { 2898 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2899 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2900 l4.udp->len = 0; 2901 2902 /* determine offset of outer transport header */ 2903 l4_offset = l4.hdr - skb->data; 2904 2905 /* remove payload length from outer checksum */ 2906 paylen = skb->len - l4_offset; 2907 csum_replace_by_diff(&l4.udp->check, 2908 (__force __wsum)htonl(paylen)); 2909 } 2910 2911 /* reset pointers to inner headers */ 2912 ip.hdr = skb_inner_network_header(skb); 2913 l4.hdr = skb_inner_transport_header(skb); 2914 2915 /* initialize inner IP header fields */ 2916 if (ip.v4->version == 4) { 2917 ip.v4->tot_len = 0; 2918 ip.v4->check = 0; 2919 } else { 2920 ip.v6->payload_len = 0; 2921 } 2922 } 2923 2924 /* determine offset of inner transport header */ 2925 l4_offset = l4.hdr - skb->data; 2926 2927 /* remove payload length from inner checksum */ 2928 paylen = skb->len - l4_offset; 2929 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 2930 2931 /* compute length of segmentation header */ 2932 *hdr_len = (l4.tcp->doff * 4) + l4_offset; 2933 2934 /* pull values out of skb_shinfo */ 2935 gso_size = skb_shinfo(skb)->gso_size; 2936 gso_segs = skb_shinfo(skb)->gso_segs; 2937 2938 /* update GSO size and bytecount with header size */ 2939 first->gso_segs = gso_segs; 2940 first->bytecount += (first->gso_segs - 1) * *hdr_len; 2941 2942 /* find the field values */ 2943 cd_cmd = I40E_TX_CTX_DESC_TSO; 2944 cd_tso_len = skb->len - *hdr_len; 2945 cd_mss = gso_size; 2946 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | 2947 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | 2948 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); 2949 return 1; 2950 } 2951 2952 /** 2953 * i40e_tsyn - set up the tsyn context descriptor 2954 * @tx_ring: ptr to the ring to send 2955 * @skb: ptr to the skb we're sending 2956 * @tx_flags: the collected send information 2957 * @cd_type_cmd_tso_mss: Quad Word 1 2958 * 2959 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen 2960 **/ 2961 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, 2962 u32 tx_flags, u64 *cd_type_cmd_tso_mss) 2963 { 2964 struct i40e_pf *pf; 2965 2966 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2967 return 0; 2968 2969 /* Tx timestamps cannot be sampled when doing TSO */ 2970 if (tx_flags & I40E_TX_FLAGS_TSO) 2971 return 0; 2972 2973 /* only timestamp the outbound packet if the user has requested it and 2974 * we are not already transmitting a packet to be timestamped 2975 */ 2976 pf = i40e_netdev_to_pf(tx_ring->netdev); 2977 if (!(pf->flags & I40E_FLAG_PTP)) 2978 return 0; 2979 2980 if (pf->ptp_tx && 2981 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { 2982 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2983 pf->ptp_tx_start = jiffies; 2984 pf->ptp_tx_skb = skb_get(skb); 2985 } else { 2986 pf->tx_hwtstamp_skipped++; 2987 return 0; 2988 } 2989 2990 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << 2991 I40E_TXD_CTX_QW1_CMD_SHIFT; 2992 2993 return 1; 2994 } 2995 2996 /** 2997 * i40e_tx_enable_csum - Enable Tx checksum offloads 2998 * @skb: send buffer 2999 * @tx_flags: pointer to Tx flags currently set 3000 * @td_cmd: Tx descriptor command bits to set 3001 * @td_offset: Tx descriptor header offsets to set 3002 * @tx_ring: Tx descriptor ring 3003 * @cd_tunneling: ptr to context desc bits 3004 **/ 3005 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 3006 u32 *td_cmd, u32 *td_offset, 3007 struct i40e_ring *tx_ring, 3008 u32 *cd_tunneling) 3009 { 3010 union { 3011 struct iphdr *v4; 3012 struct ipv6hdr *v6; 3013 unsigned char *hdr; 3014 } ip; 3015 union { 3016 struct tcphdr *tcp; 3017 struct udphdr *udp; 3018 unsigned char *hdr; 3019 } l4; 3020 unsigned char *exthdr; 3021 u32 offset, cmd = 0; 3022 __be16 frag_off; 3023 u8 l4_proto = 0; 3024 3025 if (skb->ip_summed != CHECKSUM_PARTIAL) 3026 return 0; 3027 3028 ip.hdr = skb_network_header(skb); 3029 l4.hdr = skb_transport_header(skb); 3030 3031 /* compute outer L2 header size */ 3032 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 3033 3034 if (skb->encapsulation) { 3035 u32 tunnel = 0; 3036 /* define outer network header type */ 3037 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3038 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3039 I40E_TX_CTX_EXT_IP_IPV4 : 3040 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 3041 3042 l4_proto = ip.v4->protocol; 3043 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3044 tunnel |= I40E_TX_CTX_EXT_IP_IPV6; 3045 3046 exthdr = ip.hdr + sizeof(*ip.v6); 3047 l4_proto = ip.v6->nexthdr; 3048 if (l4.hdr != exthdr) 3049 ipv6_skip_exthdr(skb, exthdr - skb->data, 3050 &l4_proto, &frag_off); 3051 } 3052 3053 /* define outer transport */ 3054 switch (l4_proto) { 3055 case IPPROTO_UDP: 3056 tunnel |= I40E_TXD_CTX_UDP_TUNNELING; 3057 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3058 break; 3059 case IPPROTO_GRE: 3060 tunnel |= I40E_TXD_CTX_GRE_TUNNELING; 3061 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3062 break; 3063 case IPPROTO_IPIP: 3064 case IPPROTO_IPV6: 3065 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; 3066 l4.hdr = skb_inner_network_header(skb); 3067 break; 3068 default: 3069 if (*tx_flags & I40E_TX_FLAGS_TSO) 3070 return -1; 3071 3072 skb_checksum_help(skb); 3073 return 0; 3074 } 3075 3076 /* compute outer L3 header size */ 3077 tunnel |= ((l4.hdr - ip.hdr) / 4) << 3078 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 3079 3080 /* switch IP header pointer from outer to inner header */ 3081 ip.hdr = skb_inner_network_header(skb); 3082 3083 /* compute tunnel header size */ 3084 tunnel |= ((ip.hdr - l4.hdr) / 2) << 3085 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 3086 3087 /* indicate if we need to offload outer UDP header */ 3088 if ((*tx_flags & I40E_TX_FLAGS_TSO) && 3089 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 3090 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 3091 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; 3092 3093 /* record tunnel offload values */ 3094 *cd_tunneling |= tunnel; 3095 3096 /* switch L4 header pointer from outer to inner */ 3097 l4.hdr = skb_inner_transport_header(skb); 3098 l4_proto = 0; 3099 3100 /* reset type as we transition from outer to inner headers */ 3101 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); 3102 if (ip.v4->version == 4) 3103 *tx_flags |= I40E_TX_FLAGS_IPV4; 3104 if (ip.v6->version == 6) 3105 *tx_flags |= I40E_TX_FLAGS_IPV6; 3106 } 3107 3108 /* Enable IP checksum offloads */ 3109 if (*tx_flags & I40E_TX_FLAGS_IPV4) { 3110 l4_proto = ip.v4->protocol; 3111 /* the stack computes the IP header already, the only time we 3112 * need the hardware to recompute it is in the case of TSO. 3113 */ 3114 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? 3115 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : 3116 I40E_TX_DESC_CMD_IIPT_IPV4; 3117 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { 3118 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 3119 3120 exthdr = ip.hdr + sizeof(*ip.v6); 3121 l4_proto = ip.v6->nexthdr; 3122 if (l4.hdr != exthdr) 3123 ipv6_skip_exthdr(skb, exthdr - skb->data, 3124 &l4_proto, &frag_off); 3125 } 3126 3127 /* compute inner L3 header size */ 3128 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 3129 3130 /* Enable L4 checksum offloads */ 3131 switch (l4_proto) { 3132 case IPPROTO_TCP: 3133 /* enable checksum offloads */ 3134 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; 3135 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3136 break; 3137 case IPPROTO_SCTP: 3138 /* enable SCTP checksum offload */ 3139 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; 3140 offset |= (sizeof(struct sctphdr) >> 2) << 3141 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3142 break; 3143 case IPPROTO_UDP: 3144 /* enable UDP checksum offload */ 3145 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; 3146 offset |= (sizeof(struct udphdr) >> 2) << 3147 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 3148 break; 3149 default: 3150 if (*tx_flags & I40E_TX_FLAGS_TSO) 3151 return -1; 3152 skb_checksum_help(skb); 3153 return 0; 3154 } 3155 3156 *td_cmd |= cmd; 3157 *td_offset |= offset; 3158 3159 return 1; 3160 } 3161 3162 /** 3163 * i40e_create_tx_ctx Build the Tx context descriptor 3164 * @tx_ring: ring to create the descriptor on 3165 * @cd_type_cmd_tso_mss: Quad Word 1 3166 * @cd_tunneling: Quad Word 0 - bits 0-31 3167 * @cd_l2tag2: Quad Word 0 - bits 32-63 3168 **/ 3169 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, 3170 const u64 cd_type_cmd_tso_mss, 3171 const u32 cd_tunneling, const u32 cd_l2tag2) 3172 { 3173 struct i40e_tx_context_desc *context_desc; 3174 int i = tx_ring->next_to_use; 3175 3176 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && 3177 !cd_tunneling && !cd_l2tag2) 3178 return; 3179 3180 /* grab the next descriptor */ 3181 context_desc = I40E_TX_CTXTDESC(tx_ring, i); 3182 3183 i++; 3184 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 3185 3186 /* cpu_to_le32 and assign to struct fields */ 3187 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 3188 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 3189 context_desc->rsvd = cpu_to_le16(0); 3190 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 3191 } 3192 3193 /** 3194 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions 3195 * @tx_ring: the ring to be checked 3196 * @size: the size buffer we want to assure is available 3197 * 3198 * Returns -EBUSY if a stop is needed, else 0 3199 **/ 3200 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 3201 { 3202 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3203 /* Memory barrier before checking head and tail */ 3204 smp_mb(); 3205 3206 /* Check again in a case another CPU has just made room available. */ 3207 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) 3208 return -EBUSY; 3209 3210 /* A reprieve! - use start_queue because it doesn't call schedule */ 3211 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3212 ++tx_ring->tx_stats.restart_queue; 3213 return 0; 3214 } 3215 3216 /** 3217 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet 3218 * @skb: send buffer 3219 * 3220 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 3221 * and so we need to figure out the cases where we need to linearize the skb. 3222 * 3223 * For TSO we need to count the TSO header and segment payload separately. 3224 * As such we need to check cases where we have 7 fragments or more as we 3225 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 3226 * the segment payload in the first descriptor, and another 7 for the 3227 * fragments. 3228 **/ 3229 bool __i40e_chk_linearize(struct sk_buff *skb) 3230 { 3231 const struct skb_frag_struct *frag, *stale; 3232 int nr_frags, sum; 3233 3234 /* no need to check if number of frags is less than 7 */ 3235 nr_frags = skb_shinfo(skb)->nr_frags; 3236 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) 3237 return false; 3238 3239 /* We need to walk through the list and validate that each group 3240 * of 6 fragments totals at least gso_size. 3241 */ 3242 nr_frags -= I40E_MAX_BUFFER_TXD - 2; 3243 frag = &skb_shinfo(skb)->frags[0]; 3244 3245 /* Initialize size to the negative value of gso_size minus 1. We 3246 * use this as the worst case scenerio in which the frag ahead 3247 * of us only provides one byte which is why we are limited to 6 3248 * descriptors for a single transmit as the header and previous 3249 * fragment are already consuming 2 descriptors. 3250 */ 3251 sum = 1 - skb_shinfo(skb)->gso_size; 3252 3253 /* Add size of frags 0 through 4 to create our initial sum */ 3254 sum += skb_frag_size(frag++); 3255 sum += skb_frag_size(frag++); 3256 sum += skb_frag_size(frag++); 3257 sum += skb_frag_size(frag++); 3258 sum += skb_frag_size(frag++); 3259 3260 /* Walk through fragments adding latest fragment, testing it, and 3261 * then removing stale fragments from the sum. 3262 */ 3263 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 3264 int stale_size = skb_frag_size(stale); 3265 3266 sum += skb_frag_size(frag++); 3267 3268 /* The stale fragment may present us with a smaller 3269 * descriptor than the actual fragment size. To account 3270 * for that we need to remove all the data on the front and 3271 * figure out what the remainder would be in the last 3272 * descriptor associated with the fragment. 3273 */ 3274 if (stale_size > I40E_MAX_DATA_PER_TXD) { 3275 int align_pad = -(stale->page_offset) & 3276 (I40E_MAX_READ_REQ_SIZE - 1); 3277 3278 sum -= align_pad; 3279 stale_size -= align_pad; 3280 3281 do { 3282 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3283 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; 3284 } while (stale_size > I40E_MAX_DATA_PER_TXD); 3285 } 3286 3287 /* if sum is negative we failed to make sufficient progress */ 3288 if (sum < 0) 3289 return true; 3290 3291 if (!nr_frags--) 3292 break; 3293 3294 sum -= stale_size; 3295 } 3296 3297 return false; 3298 } 3299 3300 /** 3301 * i40e_tx_map - Build the Tx descriptor 3302 * @tx_ring: ring to send buffer on 3303 * @skb: send buffer 3304 * @first: first buffer info buffer to use 3305 * @tx_flags: collected send information 3306 * @hdr_len: size of the packet header 3307 * @td_cmd: the command field in the descriptor 3308 * @td_offset: offset for checksum or crc 3309 * 3310 * Returns 0 on success, -1 on failure to DMA 3311 **/ 3312 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 3313 struct i40e_tx_buffer *first, u32 tx_flags, 3314 const u8 hdr_len, u32 td_cmd, u32 td_offset) 3315 { 3316 unsigned int data_len = skb->data_len; 3317 unsigned int size = skb_headlen(skb); 3318 struct skb_frag_struct *frag; 3319 struct i40e_tx_buffer *tx_bi; 3320 struct i40e_tx_desc *tx_desc; 3321 u16 i = tx_ring->next_to_use; 3322 u32 td_tag = 0; 3323 dma_addr_t dma; 3324 u16 desc_count = 1; 3325 3326 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { 3327 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; 3328 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> 3329 I40E_TX_FLAGS_VLAN_SHIFT; 3330 } 3331 3332 first->tx_flags = tx_flags; 3333 3334 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3335 3336 tx_desc = I40E_TX_DESC(tx_ring, i); 3337 tx_bi = first; 3338 3339 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 3340 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3341 3342 if (dma_mapping_error(tx_ring->dev, dma)) 3343 goto dma_error; 3344 3345 /* record length, and DMA address */ 3346 dma_unmap_len_set(tx_bi, len, size); 3347 dma_unmap_addr_set(tx_bi, dma, dma); 3348 3349 /* align size to end of page */ 3350 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); 3351 tx_desc->buffer_addr = cpu_to_le64(dma); 3352 3353 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { 3354 tx_desc->cmd_type_offset_bsz = 3355 build_ctob(td_cmd, td_offset, 3356 max_data, td_tag); 3357 3358 tx_desc++; 3359 i++; 3360 desc_count++; 3361 3362 if (i == tx_ring->count) { 3363 tx_desc = I40E_TX_DESC(tx_ring, 0); 3364 i = 0; 3365 } 3366 3367 dma += max_data; 3368 size -= max_data; 3369 3370 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; 3371 tx_desc->buffer_addr = cpu_to_le64(dma); 3372 } 3373 3374 if (likely(!data_len)) 3375 break; 3376 3377 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 3378 size, td_tag); 3379 3380 tx_desc++; 3381 i++; 3382 desc_count++; 3383 3384 if (i == tx_ring->count) { 3385 tx_desc = I40E_TX_DESC(tx_ring, 0); 3386 i = 0; 3387 } 3388 3389 size = skb_frag_size(frag); 3390 data_len -= size; 3391 3392 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3393 DMA_TO_DEVICE); 3394 3395 tx_bi = &tx_ring->tx_bi[i]; 3396 } 3397 3398 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 3399 3400 i++; 3401 if (i == tx_ring->count) 3402 i = 0; 3403 3404 tx_ring->next_to_use = i; 3405 3406 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); 3407 3408 /* write last descriptor with EOP bit */ 3409 td_cmd |= I40E_TX_DESC_CMD_EOP; 3410 3411 /* We OR these values together to check both against 4 (WB_STRIDE) 3412 * below. This is safe since we don't re-use desc_count afterwards. 3413 */ 3414 desc_count |= ++tx_ring->packet_stride; 3415 3416 if (desc_count >= WB_STRIDE) { 3417 /* write last descriptor with RS bit set */ 3418 td_cmd |= I40E_TX_DESC_CMD_RS; 3419 tx_ring->packet_stride = 0; 3420 } 3421 3422 tx_desc->cmd_type_offset_bsz = 3423 build_ctob(td_cmd, td_offset, size, td_tag); 3424 3425 /* Force memory writes to complete before letting h/w know there 3426 * are new descriptors to fetch. 3427 * 3428 * We also use this memory barrier to make certain all of the 3429 * status bits have been updated before next_to_watch is written. 3430 */ 3431 wmb(); 3432 3433 /* set next_to_watch value indicating a packet is present */ 3434 first->next_to_watch = tx_desc; 3435 3436 /* notify HW of packet */ 3437 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 3438 writel(i, tx_ring->tail); 3439 3440 /* we need this if more than one processor can write to our tail 3441 * at a time, it synchronizes IO on IA64/Altix systems 3442 */ 3443 mmiowb(); 3444 } 3445 3446 return 0; 3447 3448 dma_error: 3449 dev_info(tx_ring->dev, "TX DMA map failed\n"); 3450 3451 /* clear dma mappings for failed tx_bi map */ 3452 for (;;) { 3453 tx_bi = &tx_ring->tx_bi[i]; 3454 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); 3455 if (tx_bi == first) 3456 break; 3457 if (i == 0) 3458 i = tx_ring->count; 3459 i--; 3460 } 3461 3462 tx_ring->next_to_use = i; 3463 3464 return -1; 3465 } 3466 3467 /** 3468 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring 3469 * @xdp: data to transmit 3470 * @xdp_ring: XDP Tx ring 3471 **/ 3472 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 3473 struct i40e_ring *xdp_ring) 3474 { 3475 u16 i = xdp_ring->next_to_use; 3476 struct i40e_tx_buffer *tx_bi; 3477 struct i40e_tx_desc *tx_desc; 3478 u32 size = xdpf->len; 3479 dma_addr_t dma; 3480 3481 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { 3482 xdp_ring->tx_stats.tx_busy++; 3483 return I40E_XDP_CONSUMED; 3484 } 3485 3486 dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE); 3487 if (dma_mapping_error(xdp_ring->dev, dma)) 3488 return I40E_XDP_CONSUMED; 3489 3490 tx_bi = &xdp_ring->tx_bi[i]; 3491 tx_bi->bytecount = size; 3492 tx_bi->gso_segs = 1; 3493 tx_bi->xdpf = xdpf; 3494 3495 /* record length, and DMA address */ 3496 dma_unmap_len_set(tx_bi, len, size); 3497 dma_unmap_addr_set(tx_bi, dma, dma); 3498 3499 tx_desc = I40E_TX_DESC(xdp_ring, i); 3500 tx_desc->buffer_addr = cpu_to_le64(dma); 3501 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC 3502 | I40E_TXD_CMD, 3503 0, size, 0); 3504 3505 /* Make certain all of the status bits have been updated 3506 * before next_to_watch is written. 3507 */ 3508 smp_wmb(); 3509 3510 i++; 3511 if (i == xdp_ring->count) 3512 i = 0; 3513 3514 tx_bi->next_to_watch = tx_desc; 3515 xdp_ring->next_to_use = i; 3516 3517 return I40E_XDP_TX; 3518 } 3519 3520 /** 3521 * i40e_xmit_frame_ring - Sends buffer on Tx ring 3522 * @skb: send buffer 3523 * @tx_ring: ring to send buffer on 3524 * 3525 * Returns NETDEV_TX_OK if sent, else an error code 3526 **/ 3527 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, 3528 struct i40e_ring *tx_ring) 3529 { 3530 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; 3531 u32 cd_tunneling = 0, cd_l2tag2 = 0; 3532 struct i40e_tx_buffer *first; 3533 u32 td_offset = 0; 3534 u32 tx_flags = 0; 3535 __be16 protocol; 3536 u32 td_cmd = 0; 3537 u8 hdr_len = 0; 3538 int tso, count; 3539 int tsyn; 3540 3541 /* prefetch the data, we'll need it later */ 3542 prefetch(skb->data); 3543 3544 i40e_trace(xmit_frame_ring, skb, tx_ring); 3545 3546 count = i40e_xmit_descriptor_count(skb); 3547 if (i40e_chk_linearize(skb, count)) { 3548 if (__skb_linearize(skb)) { 3549 dev_kfree_skb_any(skb); 3550 return NETDEV_TX_OK; 3551 } 3552 count = i40e_txd_use_count(skb->len); 3553 tx_ring->tx_stats.tx_linearize++; 3554 } 3555 3556 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 3557 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 3558 * + 4 desc gap to avoid the cache line where head is, 3559 * + 1 desc for context descriptor, 3560 * otherwise try next time 3561 */ 3562 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 3563 tx_ring->tx_stats.tx_busy++; 3564 return NETDEV_TX_BUSY; 3565 } 3566 3567 /* record the location of the first descriptor for this packet */ 3568 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 3569 first->skb = skb; 3570 first->bytecount = skb->len; 3571 first->gso_segs = 1; 3572 3573 /* prepare the xmit flags */ 3574 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) 3575 goto out_drop; 3576 3577 /* obtain protocol of skb */ 3578 protocol = vlan_get_protocol(skb); 3579 3580 /* setup IPv4/IPv6 offloads */ 3581 if (protocol == htons(ETH_P_IP)) 3582 tx_flags |= I40E_TX_FLAGS_IPV4; 3583 else if (protocol == htons(ETH_P_IPV6)) 3584 tx_flags |= I40E_TX_FLAGS_IPV6; 3585 3586 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 3587 3588 if (tso < 0) 3589 goto out_drop; 3590 else if (tso) 3591 tx_flags |= I40E_TX_FLAGS_TSO; 3592 3593 /* Always offload the checksum, since it's in the data descriptor */ 3594 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 3595 tx_ring, &cd_tunneling); 3596 if (tso < 0) 3597 goto out_drop; 3598 3599 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); 3600 3601 if (tsyn) 3602 tx_flags |= I40E_TX_FLAGS_TSYN; 3603 3604 skb_tx_timestamp(skb); 3605 3606 /* always enable CRC insertion offload */ 3607 td_cmd |= I40E_TX_DESC_CMD_ICRC; 3608 3609 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 3610 cd_tunneling, cd_l2tag2); 3611 3612 /* Add Flow Director ATR if it's enabled. 3613 * 3614 * NOTE: this must always be directly before the data descriptor. 3615 */ 3616 i40e_atr(tx_ring, skb, tx_flags); 3617 3618 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 3619 td_cmd, td_offset)) 3620 goto cleanup_tx_tstamp; 3621 3622 return NETDEV_TX_OK; 3623 3624 out_drop: 3625 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); 3626 dev_kfree_skb_any(first->skb); 3627 first->skb = NULL; 3628 cleanup_tx_tstamp: 3629 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { 3630 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); 3631 3632 dev_kfree_skb_any(pf->ptp_tx_skb); 3633 pf->ptp_tx_skb = NULL; 3634 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); 3635 } 3636 3637 return NETDEV_TX_OK; 3638 } 3639 3640 /** 3641 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer 3642 * @skb: send buffer 3643 * @netdev: network interface device structure 3644 * 3645 * Returns NETDEV_TX_OK if sent, else an error code 3646 **/ 3647 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3648 { 3649 struct i40e_netdev_priv *np = netdev_priv(netdev); 3650 struct i40e_vsi *vsi = np->vsi; 3651 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; 3652 3653 /* hardware can't handle really short frames, hardware padding works 3654 * beyond this point 3655 */ 3656 if (skb_put_padto(skb, I40E_MIN_TX_LEN)) 3657 return NETDEV_TX_OK; 3658 3659 return i40e_xmit_frame_ring(skb, tx_ring); 3660 } 3661 3662 /** 3663 * i40e_xdp_xmit - Implements ndo_xdp_xmit 3664 * @dev: netdev 3665 * @xdp: XDP buffer 3666 * 3667 * Returns number of frames successfully sent. Frames that fail are 3668 * free'ed via XDP return API. 3669 * 3670 * For error cases, a negative errno code is returned and no-frames 3671 * are transmitted (caller must handle freeing frames). 3672 **/ 3673 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames) 3674 { 3675 struct i40e_netdev_priv *np = netdev_priv(dev); 3676 unsigned int queue_index = smp_processor_id(); 3677 struct i40e_vsi *vsi = np->vsi; 3678 int drops = 0; 3679 int i; 3680 3681 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3682 return -ENETDOWN; 3683 3684 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3685 return -ENXIO; 3686 3687 for (i = 0; i < n; i++) { 3688 struct xdp_frame *xdpf = frames[i]; 3689 int err; 3690 3691 err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]); 3692 if (err != I40E_XDP_TX) { 3693 xdp_return_frame_rx_napi(xdpf); 3694 drops++; 3695 } 3696 } 3697 3698 return n - drops; 3699 } 3700 3701 /** 3702 * i40e_xdp_flush - Implements ndo_xdp_flush 3703 * @dev: netdev 3704 **/ 3705 void i40e_xdp_flush(struct net_device *dev) 3706 { 3707 struct i40e_netdev_priv *np = netdev_priv(dev); 3708 unsigned int queue_index = smp_processor_id(); 3709 struct i40e_vsi *vsi = np->vsi; 3710 3711 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 3712 return; 3713 3714 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) 3715 return; 3716 3717 i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); 3718 } 3719