1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include <linux/crc32.h> 18 #include "net_driver.h" 19 #include "bitfield.h" 20 #include "efx.h" 21 #include "nic.h" 22 #include "farch_regs.h" 23 #include "io.h" 24 #include "workarounds.h" 25 26 /* Falcon-architecture (SFC4000) support */ 27 28 /************************************************************************** 29 * 30 * Configurable values 31 * 32 ************************************************************************** 33 */ 34 35 /* This is set to 16 for a good reason. In summary, if larger than 36 * 16, the descriptor cache holds more than a default socket 37 * buffer's worth of packets (for UDP we can only have at most one 38 * socket buffer's worth outstanding). This combined with the fact 39 * that we only get 1 TX event per descriptor cache means the NIC 40 * goes idle. 41 */ 42 #define TX_DC_ENTRIES 16 43 #define TX_DC_ENTRIES_ORDER 1 44 45 #define RX_DC_ENTRIES 64 46 #define RX_DC_ENTRIES_ORDER 3 47 48 /* If EF4_MAX_INT_ERRORS internal errors occur within 49 * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 50 * disable it. 51 */ 52 #define EF4_INT_ERROR_EXPIRE 3600 53 #define EF4_MAX_INT_ERRORS 5 54 55 /* Depth of RX flush request fifo */ 56 #define EF4_RX_FLUSH_COUNT 4 57 58 /* Driver generated events */ 59 #define _EF4_CHANNEL_MAGIC_TEST 0x000101 60 #define _EF4_CHANNEL_MAGIC_FILL 0x000102 61 #define _EF4_CHANNEL_MAGIC_RX_DRAIN 0x000103 62 #define _EF4_CHANNEL_MAGIC_TX_DRAIN 0x000104 63 64 #define _EF4_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 65 #define _EF4_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 66 67 #define EF4_CHANNEL_MAGIC_TEST(_channel) \ 68 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel) 69 #define EF4_CHANNEL_MAGIC_FILL(_rx_queue) \ 70 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL, \ 71 ef4_rx_queue_index(_rx_queue)) 72 #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 73 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN, \ 74 ef4_rx_queue_index(_rx_queue)) 75 #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 76 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN, \ 77 (_tx_queue)->queue) 78 79 static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic); 80 81 /************************************************************************** 82 * 83 * Hardware access 84 * 85 **************************************************************************/ 86 87 static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value, 88 unsigned int index) 89 { 90 ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 91 value, index); 92 } 93 94 static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b, 95 const ef4_oword_t *mask) 96 { 97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 99 } 100 101 int ef4_farch_test_registers(struct ef4_nic *efx, 102 const struct ef4_farch_register_test *regs, 103 size_t n_regs) 104 { 105 unsigned address = 0; 106 int i, j; 107 ef4_oword_t mask, imask, original, reg, buf; 108 109 for (i = 0; i < n_regs; ++i) { 110 address = regs[i].address; 111 mask = imask = regs[i].mask; 112 EF4_INVERT_OWORD(imask); 113 114 ef4_reado(efx, &original, address); 115 116 /* bit sweep on and off */ 117 for (j = 0; j < 128; j++) { 118 if (!EF4_EXTRACT_OWORD32(mask, j, j)) 119 continue; 120 121 /* Test this testable bit can be set in isolation */ 122 EF4_AND_OWORD(reg, original, mask); 123 EF4_SET_OWORD32(reg, j, j, 1); 124 125 ef4_writeo(efx, ®, address); 126 ef4_reado(efx, &buf, address); 127 128 if (ef4_masked_compare_oword(®, &buf, &mask)) 129 goto fail; 130 131 /* Test this testable bit can be cleared in isolation */ 132 EF4_OR_OWORD(reg, original, mask); 133 EF4_SET_OWORD32(reg, j, j, 0); 134 135 ef4_writeo(efx, ®, address); 136 ef4_reado(efx, &buf, address); 137 138 if (ef4_masked_compare_oword(®, &buf, &mask)) 139 goto fail; 140 } 141 142 ef4_writeo(efx, &original, address); 143 } 144 145 return 0; 146 147 fail: 148 netif_err(efx, hw, efx->net_dev, 149 "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT 150 " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg), 151 EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask)); 152 return -EIO; 153 } 154 155 /************************************************************************** 156 * 157 * Special buffer handling 158 * Special buffers are used for event queues and the TX and RX 159 * descriptor rings. 160 * 161 *************************************************************************/ 162 163 /* 164 * Initialise a special buffer 165 * 166 * This will define a buffer (previously allocated via 167 * ef4_alloc_special_buffer()) in the buffer table, allowing 168 * it to be used for event queues, descriptor rings etc. 169 */ 170 static void 171 ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) 172 { 173 ef4_qword_t buf_desc; 174 unsigned int index; 175 dma_addr_t dma_addr; 176 int i; 177 178 EF4_BUG_ON_PARANOID(!buffer->buf.addr); 179 180 /* Write buffer descriptors to NIC */ 181 for (i = 0; i < buffer->entries; i++) { 182 index = buffer->index + i; 183 dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE); 184 netif_dbg(efx, probe, efx->net_dev, 185 "mapping special buffer %d at %llx\n", 186 index, (unsigned long long)dma_addr); 187 EF4_POPULATE_QWORD_3(buf_desc, 188 FRF_AZ_BUF_ADR_REGION, 0, 189 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 190 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 191 ef4_write_buf_tbl(efx, &buf_desc, index); 192 } 193 } 194 195 /* Unmaps a buffer and clears the buffer table entries */ 196 static void 197 ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) 198 { 199 ef4_oword_t buf_tbl_upd; 200 unsigned int start = buffer->index; 201 unsigned int end = (buffer->index + buffer->entries - 1); 202 203 if (!buffer->entries) 204 return; 205 206 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 207 buffer->index, buffer->index + buffer->entries - 1); 208 209 EF4_POPULATE_OWORD_4(buf_tbl_upd, 210 FRF_AZ_BUF_UPD_CMD, 0, 211 FRF_AZ_BUF_CLR_CMD, 1, 212 FRF_AZ_BUF_CLR_END_ID, end, 213 FRF_AZ_BUF_CLR_START_ID, start); 214 ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 215 } 216 217 /* 218 * Allocate a new special buffer 219 * 220 * This allocates memory for a new buffer, clears it and allocates a 221 * new buffer ID range. It does not write into the buffer table. 222 * 223 * This call will allocate 4KB buffers, since 8KB buffers can't be 224 * used for event queues and descriptor rings. 225 */ 226 static int ef4_alloc_special_buffer(struct ef4_nic *efx, 227 struct ef4_special_buffer *buffer, 228 unsigned int len) 229 { 230 len = ALIGN(len, EF4_BUF_SIZE); 231 232 if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) 233 return -ENOMEM; 234 buffer->entries = len / EF4_BUF_SIZE; 235 BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1)); 236 237 /* Select new buffer ID */ 238 buffer->index = efx->next_buffer_table; 239 efx->next_buffer_table += buffer->entries; 240 241 netif_dbg(efx, probe, efx->net_dev, 242 "allocating special buffers %d-%d at %llx+%x " 243 "(virt %p phys %llx)\n", buffer->index, 244 buffer->index + buffer->entries - 1, 245 (u64)buffer->buf.dma_addr, len, 246 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 247 248 return 0; 249 } 250 251 static void 252 ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) 253 { 254 if (!buffer->buf.addr) 255 return; 256 257 netif_dbg(efx, hw, efx->net_dev, 258 "deallocating special buffers %d-%d at %llx+%x " 259 "(virt %p phys %llx)\n", buffer->index, 260 buffer->index + buffer->entries - 1, 261 (u64)buffer->buf.dma_addr, buffer->buf.len, 262 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 263 264 ef4_nic_free_buffer(efx, &buffer->buf); 265 buffer->entries = 0; 266 } 267 268 /************************************************************************** 269 * 270 * TX path 271 * 272 **************************************************************************/ 273 274 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 275 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue) 276 { 277 unsigned write_ptr; 278 ef4_dword_t reg; 279 280 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 281 EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 282 ef4_writed_page(tx_queue->efx, ®, 283 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 284 } 285 286 /* Write pointer and first descriptor for TX descriptor ring */ 287 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue, 288 const ef4_qword_t *txd) 289 { 290 unsigned write_ptr; 291 ef4_oword_t reg; 292 293 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 294 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 295 296 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 297 EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 298 FRF_AZ_TX_DESC_WPTR, write_ptr); 299 reg.qword[0] = *txd; 300 ef4_writeo_page(tx_queue->efx, ®, 301 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 302 } 303 304 305 /* For each entry inserted into the software descriptor ring, create a 306 * descriptor in the hardware TX descriptor ring (in host memory), and 307 * write a doorbell. 308 */ 309 void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue) 310 { 311 struct ef4_tx_buffer *buffer; 312 ef4_qword_t *txd; 313 unsigned write_ptr; 314 unsigned old_write_count = tx_queue->write_count; 315 316 tx_queue->xmit_more_available = false; 317 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 318 return; 319 320 do { 321 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 322 buffer = &tx_queue->buffer[write_ptr]; 323 txd = ef4_tx_desc(tx_queue, write_ptr); 324 ++tx_queue->write_count; 325 326 EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION); 327 328 /* Create TX descriptor ring entry */ 329 BUILD_BUG_ON(EF4_TX_BUF_CONT != 1); 330 EF4_POPULATE_QWORD_4(*txd, 331 FSF_AZ_TX_KER_CONT, 332 buffer->flags & EF4_TX_BUF_CONT, 333 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 334 FSF_AZ_TX_KER_BUF_REGION, 0, 335 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 336 } while (tx_queue->write_count != tx_queue->insert_count); 337 338 wmb(); /* Ensure descriptors are written before they are fetched */ 339 340 if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) { 341 txd = ef4_tx_desc(tx_queue, 342 old_write_count & tx_queue->ptr_mask); 343 ef4_farch_push_tx_desc(tx_queue, txd); 344 ++tx_queue->pushes; 345 } else { 346 ef4_farch_notify_tx_desc(tx_queue); 347 } 348 } 349 350 unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue, 351 dma_addr_t dma_addr, unsigned int len) 352 { 353 /* Don't cross 4K boundaries with descriptors. */ 354 unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1; 355 356 len = min(limit, len); 357 358 if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf)) 359 len = min_t(unsigned int, len, 512 - (dma_addr & 0xf)); 360 361 return len; 362 } 363 364 365 /* Allocate hardware resources for a TX queue */ 366 int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue) 367 { 368 struct ef4_nic *efx = tx_queue->efx; 369 unsigned entries; 370 371 entries = tx_queue->ptr_mask + 1; 372 return ef4_alloc_special_buffer(efx, &tx_queue->txd, 373 entries * sizeof(ef4_qword_t)); 374 } 375 376 void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue) 377 { 378 struct ef4_nic *efx = tx_queue->efx; 379 ef4_oword_t reg; 380 381 /* Pin TX descriptor ring */ 382 ef4_init_special_buffer(efx, &tx_queue->txd); 383 384 /* Push TX descriptor ring to card */ 385 EF4_POPULATE_OWORD_10(reg, 386 FRF_AZ_TX_DESCQ_EN, 1, 387 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 388 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 389 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 390 FRF_AZ_TX_DESCQ_EVQ_ID, 391 tx_queue->channel->channel, 392 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 393 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 394 FRF_AZ_TX_DESCQ_SIZE, 395 __ffs(tx_queue->txd.entries), 396 FRF_AZ_TX_DESCQ_TYPE, 0, 397 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 398 399 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 400 int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD; 401 EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 402 EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 403 !csum); 404 } 405 406 ef4_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 407 tx_queue->queue); 408 409 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) { 410 /* Only 128 bits in this register */ 411 BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128); 412 413 ef4_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 414 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD) 415 __clear_bit_le(tx_queue->queue, ®); 416 else 417 __set_bit_le(tx_queue->queue, ®); 418 ef4_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 419 } 420 421 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 422 EF4_POPULATE_OWORD_1(reg, 423 FRF_BZ_TX_PACE, 424 (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ? 425 FFE_BZ_TX_PACE_OFF : 426 FFE_BZ_TX_PACE_RESERVED); 427 ef4_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 428 tx_queue->queue); 429 } 430 } 431 432 static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue) 433 { 434 struct ef4_nic *efx = tx_queue->efx; 435 ef4_oword_t tx_flush_descq; 436 437 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 438 atomic_set(&tx_queue->flush_outstanding, 1); 439 440 EF4_POPULATE_OWORD_2(tx_flush_descq, 441 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 442 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 443 ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 444 } 445 446 void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue) 447 { 448 struct ef4_nic *efx = tx_queue->efx; 449 ef4_oword_t tx_desc_ptr; 450 451 /* Remove TX descriptor ring from card */ 452 EF4_ZERO_OWORD(tx_desc_ptr); 453 ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 454 tx_queue->queue); 455 456 /* Unpin TX descriptor ring */ 457 ef4_fini_special_buffer(efx, &tx_queue->txd); 458 } 459 460 /* Free buffers backing TX queue */ 461 void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue) 462 { 463 ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd); 464 } 465 466 /************************************************************************** 467 * 468 * RX path 469 * 470 **************************************************************************/ 471 472 /* This creates an entry in the RX descriptor queue */ 473 static inline void 474 ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index) 475 { 476 struct ef4_rx_buffer *rx_buf; 477 ef4_qword_t *rxd; 478 479 rxd = ef4_rx_desc(rx_queue, index); 480 rx_buf = ef4_rx_buffer(rx_queue, index); 481 EF4_POPULATE_QWORD_3(*rxd, 482 FSF_AZ_RX_KER_BUF_SIZE, 483 rx_buf->len - 484 rx_queue->efx->type->rx_buffer_padding, 485 FSF_AZ_RX_KER_BUF_REGION, 0, 486 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 487 } 488 489 /* This writes to the RX_DESC_WPTR register for the specified receive 490 * descriptor ring. 491 */ 492 void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue) 493 { 494 struct ef4_nic *efx = rx_queue->efx; 495 ef4_dword_t reg; 496 unsigned write_ptr; 497 498 while (rx_queue->notified_count != rx_queue->added_count) { 499 ef4_farch_build_rx_desc( 500 rx_queue, 501 rx_queue->notified_count & rx_queue->ptr_mask); 502 ++rx_queue->notified_count; 503 } 504 505 wmb(); 506 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 507 EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 508 ef4_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 509 ef4_rx_queue_index(rx_queue)); 510 } 511 512 int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue) 513 { 514 struct ef4_nic *efx = rx_queue->efx; 515 unsigned entries; 516 517 entries = rx_queue->ptr_mask + 1; 518 return ef4_alloc_special_buffer(efx, &rx_queue->rxd, 519 entries * sizeof(ef4_qword_t)); 520 } 521 522 void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue) 523 { 524 ef4_oword_t rx_desc_ptr; 525 struct ef4_nic *efx = rx_queue->efx; 526 bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0; 527 bool iscsi_digest_en = is_b0; 528 bool jumbo_en; 529 530 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 531 * DMA to continue after a PCIe page boundary (and scattering 532 * is not possible). In Falcon B0 and Siena, it enables 533 * scatter. 534 */ 535 jumbo_en = !is_b0 || efx->rx_scatter; 536 537 netif_dbg(efx, hw, efx->net_dev, 538 "RX queue %d ring in special buffers %d-%d\n", 539 ef4_rx_queue_index(rx_queue), rx_queue->rxd.index, 540 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 541 542 rx_queue->scatter_n = 0; 543 544 /* Pin RX descriptor ring */ 545 ef4_init_special_buffer(efx, &rx_queue->rxd); 546 547 /* Push RX descriptor ring to card */ 548 EF4_POPULATE_OWORD_10(rx_desc_ptr, 549 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 550 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 551 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 552 FRF_AZ_RX_DESCQ_EVQ_ID, 553 ef4_rx_queue_channel(rx_queue)->channel, 554 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 555 FRF_AZ_RX_DESCQ_LABEL, 556 ef4_rx_queue_index(rx_queue), 557 FRF_AZ_RX_DESCQ_SIZE, 558 __ffs(rx_queue->rxd.entries), 559 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 560 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 561 FRF_AZ_RX_DESCQ_EN, 1); 562 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 563 ef4_rx_queue_index(rx_queue)); 564 } 565 566 static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue) 567 { 568 struct ef4_nic *efx = rx_queue->efx; 569 ef4_oword_t rx_flush_descq; 570 571 EF4_POPULATE_OWORD_2(rx_flush_descq, 572 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 573 FRF_AZ_RX_FLUSH_DESCQ, 574 ef4_rx_queue_index(rx_queue)); 575 ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 576 } 577 578 void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue) 579 { 580 ef4_oword_t rx_desc_ptr; 581 struct ef4_nic *efx = rx_queue->efx; 582 583 /* Remove RX descriptor ring from card */ 584 EF4_ZERO_OWORD(rx_desc_ptr); 585 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 586 ef4_rx_queue_index(rx_queue)); 587 588 /* Unpin RX descriptor ring */ 589 ef4_fini_special_buffer(efx, &rx_queue->rxd); 590 } 591 592 /* Free buffers backing RX queue */ 593 void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue) 594 { 595 ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 596 } 597 598 /************************************************************************** 599 * 600 * Flush handling 601 * 602 **************************************************************************/ 603 604 /* ef4_farch_flush_queues() must be woken up when all flushes are completed, 605 * or more RX flushes can be kicked off. 606 */ 607 static bool ef4_farch_flush_wake(struct ef4_nic *efx) 608 { 609 /* Ensure that all updates are visible to ef4_farch_flush_queues() */ 610 smp_mb(); 611 612 return (atomic_read(&efx->active_queues) == 0 || 613 (atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT 614 && atomic_read(&efx->rxq_flush_pending) > 0)); 615 } 616 617 static bool ef4_check_tx_flush_complete(struct ef4_nic *efx) 618 { 619 bool i = true; 620 ef4_oword_t txd_ptr_tbl; 621 struct ef4_channel *channel; 622 struct ef4_tx_queue *tx_queue; 623 624 ef4_for_each_channel(channel, efx) { 625 ef4_for_each_channel_tx_queue(tx_queue, channel) { 626 ef4_reado_table(efx, &txd_ptr_tbl, 627 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 628 if (EF4_OWORD_FIELD(txd_ptr_tbl, 629 FRF_AZ_TX_DESCQ_FLUSH) || 630 EF4_OWORD_FIELD(txd_ptr_tbl, 631 FRF_AZ_TX_DESCQ_EN)) { 632 netif_dbg(efx, hw, efx->net_dev, 633 "flush did not complete on TXQ %d\n", 634 tx_queue->queue); 635 i = false; 636 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 637 1, 0)) { 638 /* The flush is complete, but we didn't 639 * receive a flush completion event 640 */ 641 netif_dbg(efx, hw, efx->net_dev, 642 "flush complete on TXQ %d, so drain " 643 "the queue\n", tx_queue->queue); 644 /* Don't need to increment active_queues as it 645 * has already been incremented for the queues 646 * which did not drain 647 */ 648 ef4_farch_magic_event(channel, 649 EF4_CHANNEL_MAGIC_TX_DRAIN( 650 tx_queue)); 651 } 652 } 653 } 654 655 return i; 656 } 657 658 /* Flush all the transmit queues, and continue flushing receive queues until 659 * they're all flushed. Wait for the DRAIN events to be received so that there 660 * are no more RX and TX events left on any channel. */ 661 static int ef4_farch_do_flush(struct ef4_nic *efx) 662 { 663 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 664 struct ef4_channel *channel; 665 struct ef4_rx_queue *rx_queue; 666 struct ef4_tx_queue *tx_queue; 667 int rc = 0; 668 669 ef4_for_each_channel(channel, efx) { 670 ef4_for_each_channel_tx_queue(tx_queue, channel) { 671 ef4_farch_flush_tx_queue(tx_queue); 672 } 673 ef4_for_each_channel_rx_queue(rx_queue, channel) { 674 rx_queue->flush_pending = true; 675 atomic_inc(&efx->rxq_flush_pending); 676 } 677 } 678 679 while (timeout && atomic_read(&efx->active_queues) > 0) { 680 /* The hardware supports four concurrent rx flushes, each of 681 * which may need to be retried if there is an outstanding 682 * descriptor fetch 683 */ 684 ef4_for_each_channel(channel, efx) { 685 ef4_for_each_channel_rx_queue(rx_queue, channel) { 686 if (atomic_read(&efx->rxq_flush_outstanding) >= 687 EF4_RX_FLUSH_COUNT) 688 break; 689 690 if (rx_queue->flush_pending) { 691 rx_queue->flush_pending = false; 692 atomic_dec(&efx->rxq_flush_pending); 693 atomic_inc(&efx->rxq_flush_outstanding); 694 ef4_farch_flush_rx_queue(rx_queue); 695 } 696 } 697 } 698 699 timeout = wait_event_timeout(efx->flush_wq, 700 ef4_farch_flush_wake(efx), 701 timeout); 702 } 703 704 if (atomic_read(&efx->active_queues) && 705 !ef4_check_tx_flush_complete(efx)) { 706 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 707 "(rx %d+%d)\n", atomic_read(&efx->active_queues), 708 atomic_read(&efx->rxq_flush_outstanding), 709 atomic_read(&efx->rxq_flush_pending)); 710 rc = -ETIMEDOUT; 711 712 atomic_set(&efx->active_queues, 0); 713 atomic_set(&efx->rxq_flush_pending, 0); 714 atomic_set(&efx->rxq_flush_outstanding, 0); 715 } 716 717 return rc; 718 } 719 720 int ef4_farch_fini_dmaq(struct ef4_nic *efx) 721 { 722 struct ef4_channel *channel; 723 struct ef4_tx_queue *tx_queue; 724 struct ef4_rx_queue *rx_queue; 725 int rc = 0; 726 727 /* Do not attempt to write to the NIC during EEH recovery */ 728 if (efx->state != STATE_RECOVERY) { 729 /* Only perform flush if DMA is enabled */ 730 if (efx->pci_dev->is_busmaster) { 731 efx->type->prepare_flush(efx); 732 rc = ef4_farch_do_flush(efx); 733 efx->type->finish_flush(efx); 734 } 735 736 ef4_for_each_channel(channel, efx) { 737 ef4_for_each_channel_rx_queue(rx_queue, channel) 738 ef4_farch_rx_fini(rx_queue); 739 ef4_for_each_channel_tx_queue(tx_queue, channel) 740 ef4_farch_tx_fini(tx_queue); 741 } 742 } 743 744 return rc; 745 } 746 747 /* Reset queue and flush accounting after FLR 748 * 749 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus 750 * mastering was disabled), in which case we don't receive (RXQ) flush 751 * completion events. This means that efx->rxq_flush_outstanding remained at 4 752 * after the FLR; also, efx->active_queues was non-zero (as no flush completion 753 * events were received, and we didn't go through ef4_check_tx_flush_complete()) 754 * If we don't fix this up, on the next call to ef4_realloc_channels() we won't 755 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 756 * for batched flush requests; and the efx->active_queues gets messed up because 757 * we keep incrementing for the newly initialised queues, but it never went to 758 * zero previously. Then we get a timeout every time we try to restart the 759 * queues, as it doesn't go back to zero when we should be flushing the queues. 760 */ 761 void ef4_farch_finish_flr(struct ef4_nic *efx) 762 { 763 atomic_set(&efx->rxq_flush_pending, 0); 764 atomic_set(&efx->rxq_flush_outstanding, 0); 765 atomic_set(&efx->active_queues, 0); 766 } 767 768 769 /************************************************************************** 770 * 771 * Event queue processing 772 * Event queues are processed by per-channel tasklets. 773 * 774 **************************************************************************/ 775 776 /* Update a channel's event queue's read pointer (RPTR) register 777 * 778 * This writes the EVQ_RPTR_REG register for the specified channel's 779 * event queue. 780 */ 781 void ef4_farch_ev_read_ack(struct ef4_channel *channel) 782 { 783 ef4_dword_t reg; 784 struct ef4_nic *efx = channel->efx; 785 786 EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 787 channel->eventq_read_ptr & channel->eventq_mask); 788 789 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 790 * of 4 bytes, but it is really 16 bytes just like later revisions. 791 */ 792 ef4_writed(efx, ®, 793 efx->type->evq_rptr_tbl_base + 794 FR_BZ_EVQ_RPTR_STEP * channel->channel); 795 } 796 797 /* Use HW to insert a SW defined event */ 798 void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq, 799 ef4_qword_t *event) 800 { 801 ef4_oword_t drv_ev_reg; 802 803 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 804 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 805 drv_ev_reg.u32[0] = event->u32[0]; 806 drv_ev_reg.u32[1] = event->u32[1]; 807 drv_ev_reg.u32[2] = 0; 808 drv_ev_reg.u32[3] = 0; 809 EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 810 ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 811 } 812 813 static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic) 814 { 815 ef4_qword_t event; 816 817 EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 818 FSE_AZ_EV_CODE_DRV_GEN_EV, 819 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 820 ef4_farch_generate_event(channel->efx, channel->channel, &event); 821 } 822 823 /* Handle a transmit completion event 824 * 825 * The NIC batches TX completion events; the message we receive is of 826 * the form "complete all TX events up to this index". 827 */ 828 static int 829 ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) 830 { 831 unsigned int tx_ev_desc_ptr; 832 unsigned int tx_ev_q_label; 833 struct ef4_tx_queue *tx_queue; 834 struct ef4_nic *efx = channel->efx; 835 int tx_packets = 0; 836 837 if (unlikely(READ_ONCE(efx->reset_pending))) 838 return 0; 839 840 if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 841 /* Transmit completion */ 842 tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 843 tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 844 tx_queue = ef4_channel_get_tx_queue( 845 channel, tx_ev_q_label % EF4_TXQ_TYPES); 846 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 847 tx_queue->ptr_mask); 848 ef4_xmit_done(tx_queue, tx_ev_desc_ptr); 849 } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 850 /* Rewrite the FIFO write pointer */ 851 tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 852 tx_queue = ef4_channel_get_tx_queue( 853 channel, tx_ev_q_label % EF4_TXQ_TYPES); 854 855 netif_tx_lock(efx->net_dev); 856 ef4_farch_notify_tx_desc(tx_queue); 857 netif_tx_unlock(efx->net_dev); 858 } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { 859 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 860 } else { 861 netif_err(efx, tx_err, efx->net_dev, 862 "channel %d unexpected TX event " 863 EF4_QWORD_FMT"\n", channel->channel, 864 EF4_QWORD_VAL(*event)); 865 } 866 867 return tx_packets; 868 } 869 870 /* Detect errors included in the rx_evt_pkt_ok bit. */ 871 static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue, 872 const ef4_qword_t *event) 873 { 874 struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); 875 struct ef4_nic *efx = rx_queue->efx; 876 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 877 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 878 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 879 bool rx_ev_other_err, rx_ev_pause_frm; 880 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 881 unsigned rx_ev_pkt_type; 882 883 rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 884 rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 885 rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 886 rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 887 rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event, 888 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 889 rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event, 890 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 891 rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event, 892 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 893 rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 894 rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 895 rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ? 896 0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 897 rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 898 899 /* Every error apart from tobe_disc and pause_frm */ 900 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 901 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 902 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 903 904 /* Count errors that are not in MAC stats. Ignore expected 905 * checksum errors during self-test. */ 906 if (rx_ev_frm_trunc) 907 ++channel->n_rx_frm_trunc; 908 else if (rx_ev_tobe_disc) 909 ++channel->n_rx_tobe_disc; 910 else if (!efx->loopback_selftest) { 911 if (rx_ev_ip_hdr_chksum_err) 912 ++channel->n_rx_ip_hdr_chksum_err; 913 else if (rx_ev_tcp_udp_chksum_err) 914 ++channel->n_rx_tcp_udp_chksum_err; 915 } 916 917 /* TOBE_DISC is expected on unicast mismatches; don't print out an 918 * error message. FRM_TRUNC indicates RXDP dropped the packet due 919 * to a FIFO overflow. 920 */ 921 #ifdef DEBUG 922 if (rx_ev_other_err && net_ratelimit()) { 923 netif_dbg(efx, rx_err, efx->net_dev, 924 " RX queue %d unexpected RX event " 925 EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 926 ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event), 927 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 928 rx_ev_ip_hdr_chksum_err ? 929 " [IP_HDR_CHKSUM_ERR]" : "", 930 rx_ev_tcp_udp_chksum_err ? 931 " [TCP_UDP_CHKSUM_ERR]" : "", 932 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 933 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 934 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 935 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 936 rx_ev_pause_frm ? " [PAUSE]" : ""); 937 } 938 #endif 939 940 /* The frame must be discarded if any of these are true. */ 941 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 942 rx_ev_tobe_disc | rx_ev_pause_frm) ? 943 EF4_RX_PKT_DISCARD : 0; 944 } 945 946 /* Handle receive events that are not in-order. Return true if this 947 * can be handled as a partial packet discard, false if it's more 948 * serious. 949 */ 950 static bool 951 ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index) 952 { 953 struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); 954 struct ef4_nic *efx = rx_queue->efx; 955 unsigned expected, dropped; 956 957 if (rx_queue->scatter_n && 958 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 959 rx_queue->ptr_mask)) { 960 ++channel->n_rx_nodesc_trunc; 961 return true; 962 } 963 964 expected = rx_queue->removed_count & rx_queue->ptr_mask; 965 dropped = (index - expected) & rx_queue->ptr_mask; 966 netif_info(efx, rx_err, efx->net_dev, 967 "dropped %d events (index=%d expected=%d)\n", 968 dropped, index, expected); 969 970 ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ? 971 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 972 return false; 973 } 974 975 /* Handle a packet received event 976 * 977 * The NIC gives a "discard" flag if it's a unicast packet with the 978 * wrong destination address 979 * Also "is multicast" and "matches multicast filter" flags can be used to 980 * discard non-matching multicast packets. 981 */ 982 static void 983 ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) 984 { 985 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 986 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 987 unsigned expected_ptr; 988 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 989 u16 flags; 990 struct ef4_rx_queue *rx_queue; 991 struct ef4_nic *efx = channel->efx; 992 993 if (unlikely(READ_ONCE(efx->reset_pending))) 994 return; 995 996 rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 997 rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 998 WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 999 channel->channel); 1000 1001 rx_queue = ef4_channel_get_rx_queue(channel); 1002 1003 rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1004 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 1005 rx_queue->ptr_mask); 1006 1007 /* Check for partial drops and other errors */ 1008 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 1009 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 1010 if (rx_ev_desc_ptr != expected_ptr && 1011 !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 1012 return; 1013 1014 /* Discard all pending fragments */ 1015 if (rx_queue->scatter_n) { 1016 ef4_rx_packet( 1017 rx_queue, 1018 rx_queue->removed_count & rx_queue->ptr_mask, 1019 rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD); 1020 rx_queue->removed_count += rx_queue->scatter_n; 1021 rx_queue->scatter_n = 0; 1022 } 1023 1024 /* Return if there is no new fragment */ 1025 if (rx_ev_desc_ptr != expected_ptr) 1026 return; 1027 1028 /* Discard new fragment if not SOP */ 1029 if (!rx_ev_sop) { 1030 ef4_rx_packet( 1031 rx_queue, 1032 rx_queue->removed_count & rx_queue->ptr_mask, 1033 1, 0, EF4_RX_PKT_DISCARD); 1034 ++rx_queue->removed_count; 1035 return; 1036 } 1037 } 1038 1039 ++rx_queue->scatter_n; 1040 if (rx_ev_cont) 1041 return; 1042 1043 rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1044 rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1045 rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1046 1047 if (likely(rx_ev_pkt_ok)) { 1048 /* If packet is marked as OK then we can rely on the 1049 * hardware checksum and classification. 1050 */ 1051 flags = 0; 1052 switch (rx_ev_hdr_type) { 1053 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 1054 flags |= EF4_RX_PKT_TCP; 1055 /* fall through */ 1056 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 1057 flags |= EF4_RX_PKT_CSUMMED; 1058 /* fall through */ 1059 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 1060 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 1061 break; 1062 } 1063 } else { 1064 flags = ef4_farch_handle_rx_not_ok(rx_queue, event); 1065 } 1066 1067 /* Detect multicast packets that didn't match the filter */ 1068 rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1069 if (rx_ev_mcast_pkt) { 1070 unsigned int rx_ev_mcast_hash_match = 1071 EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1072 1073 if (unlikely(!rx_ev_mcast_hash_match)) { 1074 ++channel->n_rx_mcast_mismatch; 1075 flags |= EF4_RX_PKT_DISCARD; 1076 } 1077 } 1078 1079 channel->irq_mod_score += 2; 1080 1081 /* Handle received packet */ 1082 ef4_rx_packet(rx_queue, 1083 rx_queue->removed_count & rx_queue->ptr_mask, 1084 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1085 rx_queue->removed_count += rx_queue->scatter_n; 1086 rx_queue->scatter_n = 0; 1087 } 1088 1089 /* If this flush done event corresponds to a &struct ef4_tx_queue, then 1090 * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1091 * of all transmit completions. 1092 */ 1093 static void 1094 ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) 1095 { 1096 struct ef4_tx_queue *tx_queue; 1097 int qid; 1098 1099 qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1100 if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) { 1101 tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES, 1102 qid % EF4_TXQ_TYPES); 1103 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1104 ef4_farch_magic_event(tx_queue->channel, 1105 EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1106 } 1107 } 1108 } 1109 1110 /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush 1111 * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1112 * the RX queue back to the mask of RX queues in need of flushing. 1113 */ 1114 static void 1115 ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) 1116 { 1117 struct ef4_channel *channel; 1118 struct ef4_rx_queue *rx_queue; 1119 int qid; 1120 bool failed; 1121 1122 qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1123 failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1124 if (qid >= efx->n_channels) 1125 return; 1126 channel = ef4_get_channel(efx, qid); 1127 if (!ef4_channel_has_rx_queue(channel)) 1128 return; 1129 rx_queue = ef4_channel_get_rx_queue(channel); 1130 1131 if (failed) { 1132 netif_info(efx, hw, efx->net_dev, 1133 "RXQ %d flush retry\n", qid); 1134 rx_queue->flush_pending = true; 1135 atomic_inc(&efx->rxq_flush_pending); 1136 } else { 1137 ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue), 1138 EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1139 } 1140 atomic_dec(&efx->rxq_flush_outstanding); 1141 if (ef4_farch_flush_wake(efx)) 1142 wake_up(&efx->flush_wq); 1143 } 1144 1145 static void 1146 ef4_farch_handle_drain_event(struct ef4_channel *channel) 1147 { 1148 struct ef4_nic *efx = channel->efx; 1149 1150 WARN_ON(atomic_read(&efx->active_queues) == 0); 1151 atomic_dec(&efx->active_queues); 1152 if (ef4_farch_flush_wake(efx)) 1153 wake_up(&efx->flush_wq); 1154 } 1155 1156 static void ef4_farch_handle_generated_event(struct ef4_channel *channel, 1157 ef4_qword_t *event) 1158 { 1159 struct ef4_nic *efx = channel->efx; 1160 struct ef4_rx_queue *rx_queue = 1161 ef4_channel_has_rx_queue(channel) ? 1162 ef4_channel_get_rx_queue(channel) : NULL; 1163 unsigned magic, code; 1164 1165 magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1166 code = _EF4_CHANNEL_MAGIC_CODE(magic); 1167 1168 if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) { 1169 channel->event_test_cpu = raw_smp_processor_id(); 1170 } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) { 1171 /* The queue must be empty, so we won't receive any rx 1172 * events, so ef4_process_channel() won't refill the 1173 * queue. Refill it here */ 1174 ef4_fast_push_rx_descriptors(rx_queue, true); 1175 } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1176 ef4_farch_handle_drain_event(channel); 1177 } else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) { 1178 ef4_farch_handle_drain_event(channel); 1179 } else { 1180 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1181 "generated event "EF4_QWORD_FMT"\n", 1182 channel->channel, EF4_QWORD_VAL(*event)); 1183 } 1184 } 1185 1186 static void 1187 ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event) 1188 { 1189 struct ef4_nic *efx = channel->efx; 1190 unsigned int ev_sub_code; 1191 unsigned int ev_sub_data; 1192 1193 ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1194 ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1195 1196 switch (ev_sub_code) { 1197 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1198 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1199 channel->channel, ev_sub_data); 1200 ef4_farch_handle_tx_flush_done(efx, event); 1201 break; 1202 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1203 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1204 channel->channel, ev_sub_data); 1205 ef4_farch_handle_rx_flush_done(efx, event); 1206 break; 1207 case FSE_AZ_EVQ_INIT_DONE_EV: 1208 netif_dbg(efx, hw, efx->net_dev, 1209 "channel %d EVQ %d initialised\n", 1210 channel->channel, ev_sub_data); 1211 break; 1212 case FSE_AZ_SRM_UPD_DONE_EV: 1213 netif_vdbg(efx, hw, efx->net_dev, 1214 "channel %d SRAM update done\n", channel->channel); 1215 break; 1216 case FSE_AZ_WAKE_UP_EV: 1217 netif_vdbg(efx, hw, efx->net_dev, 1218 "channel %d RXQ %d wakeup event\n", 1219 channel->channel, ev_sub_data); 1220 break; 1221 case FSE_AZ_TIMER_EV: 1222 netif_vdbg(efx, hw, efx->net_dev, 1223 "channel %d RX queue %d timer expired\n", 1224 channel->channel, ev_sub_data); 1225 break; 1226 case FSE_AA_RX_RECOVER_EV: 1227 netif_err(efx, rx_err, efx->net_dev, 1228 "channel %d seen DRIVER RX_RESET event. " 1229 "Resetting.\n", channel->channel); 1230 atomic_inc(&efx->rx_reset); 1231 ef4_schedule_reset(efx, 1232 EF4_WORKAROUND_6555(efx) ? 1233 RESET_TYPE_RX_RECOVERY : 1234 RESET_TYPE_DISABLE); 1235 break; 1236 case FSE_BZ_RX_DSC_ERROR_EV: 1237 netif_err(efx, rx_err, efx->net_dev, 1238 "RX DMA Q %d reports descriptor fetch error." 1239 " RX Q %d is disabled.\n", ev_sub_data, 1240 ev_sub_data); 1241 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1242 break; 1243 case FSE_BZ_TX_DSC_ERROR_EV: 1244 netif_err(efx, tx_err, efx->net_dev, 1245 "TX DMA Q %d reports descriptor fetch error." 1246 " TX Q %d is disabled.\n", ev_sub_data, 1247 ev_sub_data); 1248 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1249 break; 1250 default: 1251 netif_vdbg(efx, hw, efx->net_dev, 1252 "channel %d unknown driver event code %d " 1253 "data %04x\n", channel->channel, ev_sub_code, 1254 ev_sub_data); 1255 break; 1256 } 1257 } 1258 1259 int ef4_farch_ev_process(struct ef4_channel *channel, int budget) 1260 { 1261 struct ef4_nic *efx = channel->efx; 1262 unsigned int read_ptr; 1263 ef4_qword_t event, *p_event; 1264 int ev_code; 1265 int tx_packets = 0; 1266 int spent = 0; 1267 1268 if (budget <= 0) 1269 return spent; 1270 1271 read_ptr = channel->eventq_read_ptr; 1272 1273 for (;;) { 1274 p_event = ef4_event(channel, read_ptr); 1275 event = *p_event; 1276 1277 if (!ef4_event_present(&event)) 1278 /* End of events */ 1279 break; 1280 1281 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1282 "channel %d event is "EF4_QWORD_FMT"\n", 1283 channel->channel, EF4_QWORD_VAL(event)); 1284 1285 /* Clear this event by marking it all ones */ 1286 EF4_SET_QWORD(*p_event); 1287 1288 ++read_ptr; 1289 1290 ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1291 1292 switch (ev_code) { 1293 case FSE_AZ_EV_CODE_RX_EV: 1294 ef4_farch_handle_rx_event(channel, &event); 1295 if (++spent == budget) 1296 goto out; 1297 break; 1298 case FSE_AZ_EV_CODE_TX_EV: 1299 tx_packets += ef4_farch_handle_tx_event(channel, 1300 &event); 1301 if (tx_packets > efx->txq_entries) { 1302 spent = budget; 1303 goto out; 1304 } 1305 break; 1306 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1307 ef4_farch_handle_generated_event(channel, &event); 1308 break; 1309 case FSE_AZ_EV_CODE_DRIVER_EV: 1310 ef4_farch_handle_driver_event(channel, &event); 1311 break; 1312 case FSE_AZ_EV_CODE_GLOBAL_EV: 1313 if (efx->type->handle_global_event && 1314 efx->type->handle_global_event(channel, &event)) 1315 break; 1316 /* else fall through */ 1317 default: 1318 netif_err(channel->efx, hw, channel->efx->net_dev, 1319 "channel %d unknown event type %d (data " 1320 EF4_QWORD_FMT ")\n", channel->channel, 1321 ev_code, EF4_QWORD_VAL(event)); 1322 } 1323 } 1324 1325 out: 1326 channel->eventq_read_ptr = read_ptr; 1327 return spent; 1328 } 1329 1330 /* Allocate buffer table entries for event queue */ 1331 int ef4_farch_ev_probe(struct ef4_channel *channel) 1332 { 1333 struct ef4_nic *efx = channel->efx; 1334 unsigned entries; 1335 1336 entries = channel->eventq_mask + 1; 1337 return ef4_alloc_special_buffer(efx, &channel->eventq, 1338 entries * sizeof(ef4_qword_t)); 1339 } 1340 1341 int ef4_farch_ev_init(struct ef4_channel *channel) 1342 { 1343 ef4_oword_t reg; 1344 struct ef4_nic *efx = channel->efx; 1345 1346 netif_dbg(efx, hw, efx->net_dev, 1347 "channel %d event queue in special buffers %d-%d\n", 1348 channel->channel, channel->eventq.index, 1349 channel->eventq.index + channel->eventq.entries - 1); 1350 1351 /* Pin event queue buffer */ 1352 ef4_init_special_buffer(efx, &channel->eventq); 1353 1354 /* Fill event queue with all ones (i.e. empty events) */ 1355 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 1356 1357 /* Push event queue to card */ 1358 EF4_POPULATE_OWORD_3(reg, 1359 FRF_AZ_EVQ_EN, 1, 1360 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1361 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1362 ef4_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1363 channel->channel); 1364 1365 return 0; 1366 } 1367 1368 void ef4_farch_ev_fini(struct ef4_channel *channel) 1369 { 1370 ef4_oword_t reg; 1371 struct ef4_nic *efx = channel->efx; 1372 1373 /* Remove event queue from card */ 1374 EF4_ZERO_OWORD(reg); 1375 ef4_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1376 channel->channel); 1377 1378 /* Unpin event queue */ 1379 ef4_fini_special_buffer(efx, &channel->eventq); 1380 } 1381 1382 /* Free buffers backing event queue */ 1383 void ef4_farch_ev_remove(struct ef4_channel *channel) 1384 { 1385 ef4_free_special_buffer(channel->efx, &channel->eventq); 1386 } 1387 1388 1389 void ef4_farch_ev_test_generate(struct ef4_channel *channel) 1390 { 1391 ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel)); 1392 } 1393 1394 void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue) 1395 { 1396 ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue), 1397 EF4_CHANNEL_MAGIC_FILL(rx_queue)); 1398 } 1399 1400 /************************************************************************** 1401 * 1402 * Hardware interrupts 1403 * The hardware interrupt handler does very little work; all the event 1404 * queue processing is carried out by per-channel tasklets. 1405 * 1406 **************************************************************************/ 1407 1408 /* Enable/disable/generate interrupts */ 1409 static inline void ef4_farch_interrupts(struct ef4_nic *efx, 1410 bool enabled, bool force) 1411 { 1412 ef4_oword_t int_en_reg_ker; 1413 1414 EF4_POPULATE_OWORD_3(int_en_reg_ker, 1415 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1416 FRF_AZ_KER_INT_KER, force, 1417 FRF_AZ_DRV_INT_EN_KER, enabled); 1418 ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1419 } 1420 1421 void ef4_farch_irq_enable_master(struct ef4_nic *efx) 1422 { 1423 EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr)); 1424 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1425 1426 ef4_farch_interrupts(efx, true, false); 1427 } 1428 1429 void ef4_farch_irq_disable_master(struct ef4_nic *efx) 1430 { 1431 /* Disable interrupts */ 1432 ef4_farch_interrupts(efx, false, false); 1433 } 1434 1435 /* Generate a test interrupt 1436 * Interrupt must already have been enabled, otherwise nasty things 1437 * may happen. 1438 */ 1439 int ef4_farch_irq_test_generate(struct ef4_nic *efx) 1440 { 1441 ef4_farch_interrupts(efx, true, true); 1442 return 0; 1443 } 1444 1445 /* Process a fatal interrupt 1446 * Disable bus mastering ASAP and schedule a reset 1447 */ 1448 irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) 1449 { 1450 struct falcon_nic_data *nic_data = efx->nic_data; 1451 ef4_oword_t *int_ker = efx->irq_status.addr; 1452 ef4_oword_t fatal_intr; 1453 int error, mem_perr; 1454 1455 ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1456 error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1457 1458 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status " 1459 EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker), 1460 EF4_OWORD_VAL(fatal_intr), 1461 error ? "disabling bus mastering" : "no recognised error"); 1462 1463 /* If this is a memory parity error dump which blocks are offending */ 1464 mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1465 EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1466 if (mem_perr) { 1467 ef4_oword_t reg; 1468 ef4_reado(efx, ®, FR_AZ_MEM_STAT); 1469 netif_err(efx, hw, efx->net_dev, 1470 "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n", 1471 EF4_OWORD_VAL(reg)); 1472 } 1473 1474 /* Disable both devices */ 1475 pci_clear_master(efx->pci_dev); 1476 if (ef4_nic_is_dual_func(efx)) 1477 pci_clear_master(nic_data->pci_dev2); 1478 ef4_farch_irq_disable_master(efx); 1479 1480 /* Count errors and reset or disable the NIC accordingly */ 1481 if (efx->int_error_count == 0 || 1482 time_after(jiffies, efx->int_error_expire)) { 1483 efx->int_error_count = 0; 1484 efx->int_error_expire = 1485 jiffies + EF4_INT_ERROR_EXPIRE * HZ; 1486 } 1487 if (++efx->int_error_count < EF4_MAX_INT_ERRORS) { 1488 netif_err(efx, hw, efx->net_dev, 1489 "SYSTEM ERROR - reset scheduled\n"); 1490 ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1491 } else { 1492 netif_err(efx, hw, efx->net_dev, 1493 "SYSTEM ERROR - max number of errors seen." 1494 "NIC will be disabled\n"); 1495 ef4_schedule_reset(efx, RESET_TYPE_DISABLE); 1496 } 1497 1498 return IRQ_HANDLED; 1499 } 1500 1501 /* Handle a legacy interrupt 1502 * Acknowledges the interrupt and schedule event queue processing. 1503 */ 1504 irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) 1505 { 1506 struct ef4_nic *efx = dev_id; 1507 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 1508 ef4_oword_t *int_ker = efx->irq_status.addr; 1509 irqreturn_t result = IRQ_NONE; 1510 struct ef4_channel *channel; 1511 ef4_dword_t reg; 1512 u32 queues; 1513 int syserr; 1514 1515 /* Read the ISR which also ACKs the interrupts */ 1516 ef4_readd(efx, ®, FR_BZ_INT_ISR0); 1517 queues = EF4_EXTRACT_DWORD(reg, 0, 31); 1518 1519 /* Legacy interrupts are disabled too late by the EEH kernel 1520 * code. Disable them earlier. 1521 * If an EEH error occurred, the read will have returned all ones. 1522 */ 1523 if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) && 1524 !efx->eeh_disabled_legacy_irq) { 1525 disable_irq_nosync(efx->legacy_irq); 1526 efx->eeh_disabled_legacy_irq = true; 1527 } 1528 1529 /* Handle non-event-queue sources */ 1530 if (queues & (1U << efx->irq_level) && soft_enabled) { 1531 syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1532 if (unlikely(syserr)) 1533 return ef4_farch_fatal_interrupt(efx); 1534 efx->last_irq_cpu = raw_smp_processor_id(); 1535 } 1536 1537 if (queues != 0) { 1538 efx->irq_zero_count = 0; 1539 1540 /* Schedule processing of any interrupting queues */ 1541 if (likely(soft_enabled)) { 1542 ef4_for_each_channel(channel, efx) { 1543 if (queues & 1) 1544 ef4_schedule_channel_irq(channel); 1545 queues >>= 1; 1546 } 1547 } 1548 result = IRQ_HANDLED; 1549 1550 } else { 1551 ef4_qword_t *event; 1552 1553 /* Legacy ISR read can return zero once (SF bug 15783) */ 1554 1555 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1556 * because this might be a shared interrupt. */ 1557 if (efx->irq_zero_count++ == 0) 1558 result = IRQ_HANDLED; 1559 1560 /* Ensure we schedule or rearm all event queues */ 1561 if (likely(soft_enabled)) { 1562 ef4_for_each_channel(channel, efx) { 1563 event = ef4_event(channel, 1564 channel->eventq_read_ptr); 1565 if (ef4_event_present(event)) 1566 ef4_schedule_channel_irq(channel); 1567 else 1568 ef4_farch_ev_read_ack(channel); 1569 } 1570 } 1571 } 1572 1573 if (result == IRQ_HANDLED) 1574 netif_vdbg(efx, intr, efx->net_dev, 1575 "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n", 1576 irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg)); 1577 1578 return result; 1579 } 1580 1581 /* Handle an MSI interrupt 1582 * 1583 * Handle an MSI hardware interrupt. This routine schedules event 1584 * queue processing. No interrupt acknowledgement cycle is necessary. 1585 * Also, we never need to check that the interrupt is for us, since 1586 * MSI interrupts cannot be shared. 1587 */ 1588 irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) 1589 { 1590 struct ef4_msi_context *context = dev_id; 1591 struct ef4_nic *efx = context->efx; 1592 ef4_oword_t *int_ker = efx->irq_status.addr; 1593 int syserr; 1594 1595 netif_vdbg(efx, intr, efx->net_dev, 1596 "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", 1597 irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); 1598 1599 if (!likely(READ_ONCE(efx->irq_soft_enabled))) 1600 return IRQ_HANDLED; 1601 1602 /* Handle non-event-queue sources */ 1603 if (context->index == efx->irq_level) { 1604 syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1605 if (unlikely(syserr)) 1606 return ef4_farch_fatal_interrupt(efx); 1607 efx->last_irq_cpu = raw_smp_processor_id(); 1608 } 1609 1610 /* Schedule processing of the channel */ 1611 ef4_schedule_channel_irq(efx->channel[context->index]); 1612 1613 return IRQ_HANDLED; 1614 } 1615 1616 /* Setup RSS indirection table. 1617 * This maps from the hash value of the packet to RXQ 1618 */ 1619 void ef4_farch_rx_push_indir_table(struct ef4_nic *efx) 1620 { 1621 size_t i = 0; 1622 ef4_dword_t dword; 1623 1624 BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0); 1625 1626 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1627 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1628 1629 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1630 EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1631 efx->rx_indir_table[i]); 1632 ef4_writed(efx, &dword, 1633 FR_BZ_RX_INDIRECTION_TBL + 1634 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1635 } 1636 } 1637 1638 /* Looks at available SRAM resources and works out how many queues we 1639 * can support, and where things like descriptor caches should live. 1640 * 1641 * SRAM is split up as follows: 1642 * 0 buftbl entries for channels 1643 * efx->vf_buftbl_base buftbl entries for SR-IOV 1644 * efx->rx_dc_base RX descriptor caches 1645 * efx->tx_dc_base TX descriptor caches 1646 */ 1647 void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw) 1648 { 1649 unsigned vi_count, buftbl_min; 1650 1651 /* Account for the buffer table entries backing the datapath channels 1652 * and the descriptor caches for those channels. 1653 */ 1654 buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE + 1655 efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE + 1656 efx->n_channels * EF4_MAX_EVQ_SIZE) 1657 * sizeof(ef4_qword_t) / EF4_BUF_SIZE); 1658 vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES); 1659 1660 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1661 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1662 } 1663 1664 u32 ef4_farch_fpga_ver(struct ef4_nic *efx) 1665 { 1666 ef4_oword_t altera_build; 1667 ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1668 return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1669 } 1670 1671 void ef4_farch_init_common(struct ef4_nic *efx) 1672 { 1673 ef4_oword_t temp; 1674 1675 /* Set positions of descriptor caches in SRAM. */ 1676 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1677 ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1678 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1679 ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1680 1681 /* Set TX descriptor cache size. */ 1682 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1683 EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1684 ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1685 1686 /* Set RX descriptor cache size. Set low watermark to size-8, as 1687 * this allows most efficient prefetching. 1688 */ 1689 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1690 EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1691 ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1692 EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1693 ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1694 1695 /* Program INT_KER address */ 1696 EF4_POPULATE_OWORD_2(temp, 1697 FRF_AZ_NORM_INT_VEC_DIS_KER, 1698 EF4_INT_MODE_USE_MSI(efx), 1699 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1700 ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1701 1702 /* Use a valid MSI-X vector */ 1703 efx->irq_level = 0; 1704 1705 /* Enable all the genuinely fatal interrupts. (They are still 1706 * masked by the overall interrupt mask, controlled by 1707 * falcon_interrupts()). 1708 * 1709 * Note: All other fatal interrupts are enabled 1710 */ 1711 EF4_POPULATE_OWORD_3(temp, 1712 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1713 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1714 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1715 EF4_INVERT_OWORD(temp); 1716 ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1717 1718 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1719 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1720 */ 1721 ef4_reado(efx, &temp, FR_AZ_TX_RESERVED); 1722 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1723 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1724 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1725 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1726 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1727 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1728 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1729 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1730 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1731 /* Disable hardware watchdog which can misfire */ 1732 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1733 /* Squash TX of packets of 16 bytes or less */ 1734 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) 1735 EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1736 ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1737 1738 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 1739 EF4_POPULATE_OWORD_4(temp, 1740 /* Default values */ 1741 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1742 FRF_BZ_TX_PACE_SB_AF, 0xb, 1743 FRF_BZ_TX_PACE_FB_BASE, 0, 1744 /* Allow large pace values in the 1745 * fast bin. */ 1746 FRF_BZ_TX_PACE_BIN_TH, 1747 FFE_BZ_TX_PACE_RESERVED); 1748 ef4_writeo(efx, &temp, FR_BZ_TX_PACE); 1749 } 1750 } 1751 1752 /************************************************************************** 1753 * 1754 * Filter tables 1755 * 1756 ************************************************************************** 1757 */ 1758 1759 /* "Fudge factors" - difference between programmed value and actual depth. 1760 * Due to pipelined implementation we need to program H/W with a value that 1761 * is larger than the hop limit we want. 1762 */ 1763 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 1764 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 1765 1766 /* Hard maximum search limit. Hardware will time-out beyond 200-something. 1767 * We also need to avoid infinite loops in ef4_farch_filter_search() when the 1768 * table is full. 1769 */ 1770 #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200 1771 1772 /* Don't try very hard to find space for performance hints, as this is 1773 * counter-productive. */ 1774 #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 1775 1776 enum ef4_farch_filter_type { 1777 EF4_FARCH_FILTER_TCP_FULL = 0, 1778 EF4_FARCH_FILTER_TCP_WILD, 1779 EF4_FARCH_FILTER_UDP_FULL, 1780 EF4_FARCH_FILTER_UDP_WILD, 1781 EF4_FARCH_FILTER_MAC_FULL = 4, 1782 EF4_FARCH_FILTER_MAC_WILD, 1783 EF4_FARCH_FILTER_UC_DEF = 8, 1784 EF4_FARCH_FILTER_MC_DEF, 1785 EF4_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ 1786 }; 1787 1788 enum ef4_farch_filter_table_id { 1789 EF4_FARCH_FILTER_TABLE_RX_IP = 0, 1790 EF4_FARCH_FILTER_TABLE_RX_MAC, 1791 EF4_FARCH_FILTER_TABLE_RX_DEF, 1792 EF4_FARCH_FILTER_TABLE_TX_MAC, 1793 EF4_FARCH_FILTER_TABLE_COUNT, 1794 }; 1795 1796 enum ef4_farch_filter_index { 1797 EF4_FARCH_FILTER_INDEX_UC_DEF, 1798 EF4_FARCH_FILTER_INDEX_MC_DEF, 1799 EF4_FARCH_FILTER_SIZE_RX_DEF, 1800 }; 1801 1802 struct ef4_farch_filter_spec { 1803 u8 type:4; 1804 u8 priority:4; 1805 u8 flags; 1806 u16 dmaq_id; 1807 u32 data[3]; 1808 }; 1809 1810 struct ef4_farch_filter_table { 1811 enum ef4_farch_filter_table_id id; 1812 u32 offset; /* address of table relative to BAR */ 1813 unsigned size; /* number of entries */ 1814 unsigned step; /* step between entries */ 1815 unsigned used; /* number currently used */ 1816 unsigned long *used_bitmap; 1817 struct ef4_farch_filter_spec *spec; 1818 unsigned search_limit[EF4_FARCH_FILTER_TYPE_COUNT]; 1819 }; 1820 1821 struct ef4_farch_filter_state { 1822 struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT]; 1823 }; 1824 1825 static void 1826 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx, 1827 struct ef4_farch_filter_table *table, 1828 unsigned int filter_idx); 1829 1830 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 1831 * key derived from the n-tuple. The initial LFSR state is 0xffff. */ 1832 static u16 ef4_farch_filter_hash(u32 key) 1833 { 1834 u16 tmp; 1835 1836 /* First 16 rounds */ 1837 tmp = 0x1fff ^ key >> 16; 1838 tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1839 tmp = tmp ^ tmp >> 9; 1840 /* Last 16 rounds */ 1841 tmp = tmp ^ tmp << 13 ^ key; 1842 tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1843 return tmp ^ tmp >> 9; 1844 } 1845 1846 /* To allow for hash collisions, filter search continues at these 1847 * increments from the first possible entry selected by the hash. */ 1848 static u16 ef4_farch_filter_increment(u32 key) 1849 { 1850 return key * 2 - 1; 1851 } 1852 1853 static enum ef4_farch_filter_table_id 1854 ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec) 1855 { 1856 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != 1857 (EF4_FARCH_FILTER_TCP_FULL >> 2)); 1858 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != 1859 (EF4_FARCH_FILTER_TCP_WILD >> 2)); 1860 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != 1861 (EF4_FARCH_FILTER_UDP_FULL >> 2)); 1862 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != 1863 (EF4_FARCH_FILTER_UDP_WILD >> 2)); 1864 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC != 1865 (EF4_FARCH_FILTER_MAC_FULL >> 2)); 1866 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC != 1867 (EF4_FARCH_FILTER_MAC_WILD >> 2)); 1868 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC != 1869 EF4_FARCH_FILTER_TABLE_RX_MAC + 2); 1870 return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0); 1871 } 1872 1873 static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx) 1874 { 1875 struct ef4_farch_filter_state *state = efx->filter_state; 1876 struct ef4_farch_filter_table *table; 1877 ef4_oword_t filter_ctl; 1878 1879 ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 1880 1881 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; 1882 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, 1883 table->search_limit[EF4_FARCH_FILTER_TCP_FULL] + 1884 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1885 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, 1886 table->search_limit[EF4_FARCH_FILTER_TCP_WILD] + 1887 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1888 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, 1889 table->search_limit[EF4_FARCH_FILTER_UDP_FULL] + 1890 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1891 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, 1892 table->search_limit[EF4_FARCH_FILTER_UDP_WILD] + 1893 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1894 1895 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC]; 1896 if (table->size) { 1897 EF4_SET_OWORD_FIELD( 1898 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, 1899 table->search_limit[EF4_FARCH_FILTER_MAC_FULL] + 1900 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1901 EF4_SET_OWORD_FIELD( 1902 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, 1903 table->search_limit[EF4_FARCH_FILTER_MAC_WILD] + 1904 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1905 } 1906 1907 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF]; 1908 if (table->size) { 1909 EF4_SET_OWORD_FIELD( 1910 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, 1911 table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); 1912 EF4_SET_OWORD_FIELD( 1913 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, 1914 !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags & 1915 EF4_FILTER_FLAG_RX_RSS)); 1916 EF4_SET_OWORD_FIELD( 1917 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, 1918 table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); 1919 EF4_SET_OWORD_FIELD( 1920 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 1921 !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags & 1922 EF4_FILTER_FLAG_RX_RSS)); 1923 1924 /* There is a single bit to enable RX scatter for all 1925 * unmatched packets. Only set it if scatter is 1926 * enabled in both filter specs. 1927 */ 1928 EF4_SET_OWORD_FIELD( 1929 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1930 !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags & 1931 table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags & 1932 EF4_FILTER_FLAG_RX_SCATTER)); 1933 } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 1934 /* We don't expose 'default' filters because unmatched 1935 * packets always go to the queue number found in the 1936 * RSS table. But we still need to set the RX scatter 1937 * bit here. 1938 */ 1939 EF4_SET_OWORD_FIELD( 1940 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1941 efx->rx_scatter); 1942 } 1943 1944 ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 1945 } 1946 1947 static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx) 1948 { 1949 struct ef4_farch_filter_state *state = efx->filter_state; 1950 struct ef4_farch_filter_table *table; 1951 ef4_oword_t tx_cfg; 1952 1953 ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG); 1954 1955 table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC]; 1956 if (table->size) { 1957 EF4_SET_OWORD_FIELD( 1958 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, 1959 table->search_limit[EF4_FARCH_FILTER_MAC_FULL] + 1960 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1961 EF4_SET_OWORD_FIELD( 1962 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, 1963 table->search_limit[EF4_FARCH_FILTER_MAC_WILD] + 1964 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1965 } 1966 1967 ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); 1968 } 1969 1970 static int 1971 ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec, 1972 const struct ef4_filter_spec *gen_spec) 1973 { 1974 bool is_full = false; 1975 1976 if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) && 1977 gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT) 1978 return -EINVAL; 1979 1980 spec->priority = gen_spec->priority; 1981 spec->flags = gen_spec->flags; 1982 spec->dmaq_id = gen_spec->dmaq_id; 1983 1984 switch (gen_spec->match_flags) { 1985 case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | 1986 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT | 1987 EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT): 1988 is_full = true; 1989 /* fall through */ 1990 case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | 1991 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): { 1992 __be32 rhost, host1, host2; 1993 __be16 rport, port1, port2; 1994 1995 EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX)); 1996 1997 if (gen_spec->ether_type != htons(ETH_P_IP)) 1998 return -EPROTONOSUPPORT; 1999 if (gen_spec->loc_port == 0 || 2000 (is_full && gen_spec->rem_port == 0)) 2001 return -EADDRNOTAVAIL; 2002 switch (gen_spec->ip_proto) { 2003 case IPPROTO_TCP: 2004 spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL : 2005 EF4_FARCH_FILTER_TCP_WILD); 2006 break; 2007 case IPPROTO_UDP: 2008 spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL : 2009 EF4_FARCH_FILTER_UDP_WILD); 2010 break; 2011 default: 2012 return -EPROTONOSUPPORT; 2013 } 2014 2015 /* Filter is constructed in terms of source and destination, 2016 * with the odd wrinkle that the ports are swapped in a UDP 2017 * wildcard filter. We need to convert from local and remote 2018 * (= zero for wildcard) addresses. 2019 */ 2020 rhost = is_full ? gen_spec->rem_host[0] : 0; 2021 rport = is_full ? gen_spec->rem_port : 0; 2022 host1 = rhost; 2023 host2 = gen_spec->loc_host[0]; 2024 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { 2025 port1 = gen_spec->loc_port; 2026 port2 = rport; 2027 } else { 2028 port1 = rport; 2029 port2 = gen_spec->loc_port; 2030 } 2031 spec->data[0] = ntohl(host1) << 16 | ntohs(port1); 2032 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; 2033 spec->data[2] = ntohl(host2); 2034 2035 break; 2036 } 2037 2038 case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID: 2039 is_full = true; 2040 /* fall through */ 2041 case EF4_FILTER_MATCH_LOC_MAC: 2042 spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL : 2043 EF4_FARCH_FILTER_MAC_WILD); 2044 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; 2045 spec->data[1] = (gen_spec->loc_mac[2] << 24 | 2046 gen_spec->loc_mac[3] << 16 | 2047 gen_spec->loc_mac[4] << 8 | 2048 gen_spec->loc_mac[5]); 2049 spec->data[2] = (gen_spec->loc_mac[0] << 8 | 2050 gen_spec->loc_mac[1]); 2051 break; 2052 2053 case EF4_FILTER_MATCH_LOC_MAC_IG: 2054 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? 2055 EF4_FARCH_FILTER_MC_DEF : 2056 EF4_FARCH_FILTER_UC_DEF); 2057 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ 2058 break; 2059 2060 default: 2061 return -EPROTONOSUPPORT; 2062 } 2063 2064 return 0; 2065 } 2066 2067 static void 2068 ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec, 2069 const struct ef4_farch_filter_spec *spec) 2070 { 2071 bool is_full = false; 2072 2073 /* *gen_spec should be completely initialised, to be consistent 2074 * with ef4_filter_init_{rx,tx}() and in case we want to copy 2075 * it back to userland. 2076 */ 2077 memset(gen_spec, 0, sizeof(*gen_spec)); 2078 2079 gen_spec->priority = spec->priority; 2080 gen_spec->flags = spec->flags; 2081 gen_spec->dmaq_id = spec->dmaq_id; 2082 2083 switch (spec->type) { 2084 case EF4_FARCH_FILTER_TCP_FULL: 2085 case EF4_FARCH_FILTER_UDP_FULL: 2086 is_full = true; 2087 /* fall through */ 2088 case EF4_FARCH_FILTER_TCP_WILD: 2089 case EF4_FARCH_FILTER_UDP_WILD: { 2090 __be32 host1, host2; 2091 __be16 port1, port2; 2092 2093 gen_spec->match_flags = 2094 EF4_FILTER_MATCH_ETHER_TYPE | 2095 EF4_FILTER_MATCH_IP_PROTO | 2096 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT; 2097 if (is_full) 2098 gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST | 2099 EF4_FILTER_MATCH_REM_PORT); 2100 gen_spec->ether_type = htons(ETH_P_IP); 2101 gen_spec->ip_proto = 2102 (spec->type == EF4_FARCH_FILTER_TCP_FULL || 2103 spec->type == EF4_FARCH_FILTER_TCP_WILD) ? 2104 IPPROTO_TCP : IPPROTO_UDP; 2105 2106 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); 2107 port1 = htons(spec->data[0]); 2108 host2 = htonl(spec->data[2]); 2109 port2 = htons(spec->data[1] >> 16); 2110 if (spec->flags & EF4_FILTER_FLAG_TX) { 2111 gen_spec->loc_host[0] = host1; 2112 gen_spec->rem_host[0] = host2; 2113 } else { 2114 gen_spec->loc_host[0] = host2; 2115 gen_spec->rem_host[0] = host1; 2116 } 2117 if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^ 2118 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { 2119 gen_spec->loc_port = port1; 2120 gen_spec->rem_port = port2; 2121 } else { 2122 gen_spec->loc_port = port2; 2123 gen_spec->rem_port = port1; 2124 } 2125 2126 break; 2127 } 2128 2129 case EF4_FARCH_FILTER_MAC_FULL: 2130 is_full = true; 2131 /* fall through */ 2132 case EF4_FARCH_FILTER_MAC_WILD: 2133 gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC; 2134 if (is_full) 2135 gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID; 2136 gen_spec->loc_mac[0] = spec->data[2] >> 8; 2137 gen_spec->loc_mac[1] = spec->data[2]; 2138 gen_spec->loc_mac[2] = spec->data[1] >> 24; 2139 gen_spec->loc_mac[3] = spec->data[1] >> 16; 2140 gen_spec->loc_mac[4] = spec->data[1] >> 8; 2141 gen_spec->loc_mac[5] = spec->data[1]; 2142 gen_spec->outer_vid = htons(spec->data[0]); 2143 break; 2144 2145 case EF4_FARCH_FILTER_UC_DEF: 2146 case EF4_FARCH_FILTER_MC_DEF: 2147 gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG; 2148 gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF; 2149 break; 2150 2151 default: 2152 WARN_ON(1); 2153 break; 2154 } 2155 } 2156 2157 static void 2158 ef4_farch_filter_init_rx_auto(struct ef4_nic *efx, 2159 struct ef4_farch_filter_spec *spec) 2160 { 2161 /* If there's only one channel then disable RSS for non VF 2162 * traffic, thereby allowing VFs to use RSS when the PF can't. 2163 */ 2164 spec->priority = EF4_FILTER_PRI_AUTO; 2165 spec->flags = (EF4_FILTER_FLAG_RX | 2166 (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) | 2167 (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0)); 2168 spec->dmaq_id = 0; 2169 } 2170 2171 /* Build a filter entry and return its n-tuple key. */ 2172 static u32 ef4_farch_filter_build(ef4_oword_t *filter, 2173 struct ef4_farch_filter_spec *spec) 2174 { 2175 u32 data3; 2176 2177 switch (ef4_farch_filter_spec_table_id(spec)) { 2178 case EF4_FARCH_FILTER_TABLE_RX_IP: { 2179 bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL || 2180 spec->type == EF4_FARCH_FILTER_UDP_WILD); 2181 EF4_POPULATE_OWORD_7( 2182 *filter, 2183 FRF_BZ_RSS_EN, 2184 !!(spec->flags & EF4_FILTER_FLAG_RX_RSS), 2185 FRF_BZ_SCATTER_EN, 2186 !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER), 2187 FRF_BZ_TCP_UDP, is_udp, 2188 FRF_BZ_RXQ_ID, spec->dmaq_id, 2189 EF4_DWORD_2, spec->data[2], 2190 EF4_DWORD_1, spec->data[1], 2191 EF4_DWORD_0, spec->data[0]); 2192 data3 = is_udp; 2193 break; 2194 } 2195 2196 case EF4_FARCH_FILTER_TABLE_RX_MAC: { 2197 bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD; 2198 EF4_POPULATE_OWORD_7( 2199 *filter, 2200 FRF_CZ_RMFT_RSS_EN, 2201 !!(spec->flags & EF4_FILTER_FLAG_RX_RSS), 2202 FRF_CZ_RMFT_SCATTER_EN, 2203 !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER), 2204 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, 2205 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, 2206 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], 2207 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], 2208 FRF_CZ_RMFT_VLAN_ID, spec->data[0]); 2209 data3 = is_wild; 2210 break; 2211 } 2212 2213 case EF4_FARCH_FILTER_TABLE_TX_MAC: { 2214 bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD; 2215 EF4_POPULATE_OWORD_5(*filter, 2216 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, 2217 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, 2218 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], 2219 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], 2220 FRF_CZ_TMFT_VLAN_ID, spec->data[0]); 2221 data3 = is_wild | spec->dmaq_id << 1; 2222 break; 2223 } 2224 2225 default: 2226 BUG(); 2227 } 2228 2229 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; 2230 } 2231 2232 static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left, 2233 const struct ef4_farch_filter_spec *right) 2234 { 2235 if (left->type != right->type || 2236 memcmp(left->data, right->data, sizeof(left->data))) 2237 return false; 2238 2239 if (left->flags & EF4_FILTER_FLAG_TX && 2240 left->dmaq_id != right->dmaq_id) 2241 return false; 2242 2243 return true; 2244 } 2245 2246 /* 2247 * Construct/deconstruct external filter IDs. At least the RX filter 2248 * IDs must be ordered by matching priority, for RX NFC semantics. 2249 * 2250 * Deconstruction needs to be robust against invalid IDs so that 2251 * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can 2252 * accept user-provided IDs. 2253 */ 2254 2255 #define EF4_FARCH_FILTER_MATCH_PRI_COUNT 5 2256 2257 static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = { 2258 [EF4_FARCH_FILTER_TCP_FULL] = 0, 2259 [EF4_FARCH_FILTER_UDP_FULL] = 0, 2260 [EF4_FARCH_FILTER_TCP_WILD] = 1, 2261 [EF4_FARCH_FILTER_UDP_WILD] = 1, 2262 [EF4_FARCH_FILTER_MAC_FULL] = 2, 2263 [EF4_FARCH_FILTER_MAC_WILD] = 3, 2264 [EF4_FARCH_FILTER_UC_DEF] = 4, 2265 [EF4_FARCH_FILTER_MC_DEF] = 4, 2266 }; 2267 2268 static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = { 2269 EF4_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ 2270 EF4_FARCH_FILTER_TABLE_RX_IP, 2271 EF4_FARCH_FILTER_TABLE_RX_MAC, 2272 EF4_FARCH_FILTER_TABLE_RX_MAC, 2273 EF4_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ 2274 EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ 2275 EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ 2276 }; 2277 2278 #define EF4_FARCH_FILTER_INDEX_WIDTH 13 2279 #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1) 2280 2281 static inline u32 2282 ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec, 2283 unsigned int index) 2284 { 2285 unsigned int range; 2286 2287 range = ef4_farch_filter_type_match_pri[spec->type]; 2288 if (!(spec->flags & EF4_FILTER_FLAG_RX)) 2289 range += EF4_FARCH_FILTER_MATCH_PRI_COUNT; 2290 2291 return range << EF4_FARCH_FILTER_INDEX_WIDTH | index; 2292 } 2293 2294 static inline enum ef4_farch_filter_table_id 2295 ef4_farch_filter_id_table_id(u32 id) 2296 { 2297 unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH; 2298 2299 if (range < ARRAY_SIZE(ef4_farch_filter_range_table)) 2300 return ef4_farch_filter_range_table[range]; 2301 else 2302 return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */ 2303 } 2304 2305 static inline unsigned int ef4_farch_filter_id_index(u32 id) 2306 { 2307 return id & EF4_FARCH_FILTER_INDEX_MASK; 2308 } 2309 2310 u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx) 2311 { 2312 struct ef4_farch_filter_state *state = efx->filter_state; 2313 unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1; 2314 enum ef4_farch_filter_table_id table_id; 2315 2316 do { 2317 table_id = ef4_farch_filter_range_table[range]; 2318 if (state->table[table_id].size != 0) 2319 return range << EF4_FARCH_FILTER_INDEX_WIDTH | 2320 state->table[table_id].size; 2321 } while (range--); 2322 2323 return 0; 2324 } 2325 2326 s32 ef4_farch_filter_insert(struct ef4_nic *efx, 2327 struct ef4_filter_spec *gen_spec, 2328 bool replace_equal) 2329 { 2330 struct ef4_farch_filter_state *state = efx->filter_state; 2331 struct ef4_farch_filter_table *table; 2332 struct ef4_farch_filter_spec spec; 2333 ef4_oword_t filter; 2334 int rep_index, ins_index; 2335 unsigned int depth = 0; 2336 int rc; 2337 2338 rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec); 2339 if (rc) 2340 return rc; 2341 2342 table = &state->table[ef4_farch_filter_spec_table_id(&spec)]; 2343 if (table->size == 0) 2344 return -EINVAL; 2345 2346 netif_vdbg(efx, hw, efx->net_dev, 2347 "%s: type %d search_limit=%d", __func__, spec.type, 2348 table->search_limit[spec.type]); 2349 2350 if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) { 2351 /* One filter spec per type */ 2352 BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0); 2353 BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF != 2354 EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF); 2355 rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF; 2356 ins_index = rep_index; 2357 2358 spin_lock_bh(&efx->filter_lock); 2359 } else { 2360 /* Search concurrently for 2361 * (1) a filter to be replaced (rep_index): any filter 2362 * with the same match values, up to the current 2363 * search depth for this type, and 2364 * (2) the insertion point (ins_index): (1) or any 2365 * free slot before it or up to the maximum search 2366 * depth for this priority 2367 * We fail if we cannot find (2). 2368 * 2369 * We can stop once either 2370 * (a) we find (1), in which case we have definitely 2371 * found (2) as well; or 2372 * (b) we have searched exhaustively for (1), and have 2373 * either found (2) or searched exhaustively for it 2374 */ 2375 u32 key = ef4_farch_filter_build(&filter, &spec); 2376 unsigned int hash = ef4_farch_filter_hash(key); 2377 unsigned int incr = ef4_farch_filter_increment(key); 2378 unsigned int max_rep_depth = table->search_limit[spec.type]; 2379 unsigned int max_ins_depth = 2380 spec.priority <= EF4_FILTER_PRI_HINT ? 2381 EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX : 2382 EF4_FARCH_FILTER_CTL_SRCH_MAX; 2383 unsigned int i = hash & (table->size - 1); 2384 2385 ins_index = -1; 2386 depth = 1; 2387 2388 spin_lock_bh(&efx->filter_lock); 2389 2390 for (;;) { 2391 if (!test_bit(i, table->used_bitmap)) { 2392 if (ins_index < 0) 2393 ins_index = i; 2394 } else if (ef4_farch_filter_equal(&spec, 2395 &table->spec[i])) { 2396 /* Case (a) */ 2397 if (ins_index < 0) 2398 ins_index = i; 2399 rep_index = i; 2400 break; 2401 } 2402 2403 if (depth >= max_rep_depth && 2404 (ins_index >= 0 || depth >= max_ins_depth)) { 2405 /* Case (b) */ 2406 if (ins_index < 0) { 2407 rc = -EBUSY; 2408 goto out; 2409 } 2410 rep_index = -1; 2411 break; 2412 } 2413 2414 i = (i + incr) & (table->size - 1); 2415 ++depth; 2416 } 2417 } 2418 2419 /* If we found a filter to be replaced, check whether we 2420 * should do so 2421 */ 2422 if (rep_index >= 0) { 2423 struct ef4_farch_filter_spec *saved_spec = 2424 &table->spec[rep_index]; 2425 2426 if (spec.priority == saved_spec->priority && !replace_equal) { 2427 rc = -EEXIST; 2428 goto out; 2429 } 2430 if (spec.priority < saved_spec->priority) { 2431 rc = -EPERM; 2432 goto out; 2433 } 2434 if (saved_spec->priority == EF4_FILTER_PRI_AUTO || 2435 saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) 2436 spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO; 2437 } 2438 2439 /* Insert the filter */ 2440 if (ins_index != rep_index) { 2441 __set_bit(ins_index, table->used_bitmap); 2442 ++table->used; 2443 } 2444 table->spec[ins_index] = spec; 2445 2446 if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) { 2447 ef4_farch_filter_push_rx_config(efx); 2448 } else { 2449 if (table->search_limit[spec.type] < depth) { 2450 table->search_limit[spec.type] = depth; 2451 if (spec.flags & EF4_FILTER_FLAG_TX) 2452 ef4_farch_filter_push_tx_limits(efx); 2453 else 2454 ef4_farch_filter_push_rx_config(efx); 2455 } 2456 2457 ef4_writeo(efx, &filter, 2458 table->offset + table->step * ins_index); 2459 2460 /* If we were able to replace a filter by inserting 2461 * at a lower depth, clear the replaced filter 2462 */ 2463 if (ins_index != rep_index && rep_index >= 0) 2464 ef4_farch_filter_table_clear_entry(efx, table, 2465 rep_index); 2466 } 2467 2468 netif_vdbg(efx, hw, efx->net_dev, 2469 "%s: filter type %d index %d rxq %u set", 2470 __func__, spec.type, ins_index, spec.dmaq_id); 2471 rc = ef4_farch_filter_make_id(&spec, ins_index); 2472 2473 out: 2474 spin_unlock_bh(&efx->filter_lock); 2475 return rc; 2476 } 2477 2478 static void 2479 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx, 2480 struct ef4_farch_filter_table *table, 2481 unsigned int filter_idx) 2482 { 2483 static ef4_oword_t filter; 2484 2485 EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); 2486 BUG_ON(table->offset == 0); /* can't clear MAC default filters */ 2487 2488 __clear_bit(filter_idx, table->used_bitmap); 2489 --table->used; 2490 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); 2491 2492 ef4_writeo(efx, &filter, table->offset + table->step * filter_idx); 2493 2494 /* If this filter required a greater search depth than 2495 * any other, the search limit for its type can now be 2496 * decreased. However, it is hard to determine that 2497 * unless the table has become completely empty - in 2498 * which case, all its search limits can be set to 0. 2499 */ 2500 if (unlikely(table->used == 0)) { 2501 memset(table->search_limit, 0, sizeof(table->search_limit)); 2502 if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC) 2503 ef4_farch_filter_push_tx_limits(efx); 2504 else 2505 ef4_farch_filter_push_rx_config(efx); 2506 } 2507 } 2508 2509 static int ef4_farch_filter_remove(struct ef4_nic *efx, 2510 struct ef4_farch_filter_table *table, 2511 unsigned int filter_idx, 2512 enum ef4_filter_priority priority) 2513 { 2514 struct ef4_farch_filter_spec *spec = &table->spec[filter_idx]; 2515 2516 if (!test_bit(filter_idx, table->used_bitmap) || 2517 spec->priority != priority) 2518 return -ENOENT; 2519 2520 if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) { 2521 ef4_farch_filter_init_rx_auto(efx, spec); 2522 ef4_farch_filter_push_rx_config(efx); 2523 } else { 2524 ef4_farch_filter_table_clear_entry(efx, table, filter_idx); 2525 } 2526 2527 return 0; 2528 } 2529 2530 int ef4_farch_filter_remove_safe(struct ef4_nic *efx, 2531 enum ef4_filter_priority priority, 2532 u32 filter_id) 2533 { 2534 struct ef4_farch_filter_state *state = efx->filter_state; 2535 enum ef4_farch_filter_table_id table_id; 2536 struct ef4_farch_filter_table *table; 2537 unsigned int filter_idx; 2538 struct ef4_farch_filter_spec *spec; 2539 int rc; 2540 2541 table_id = ef4_farch_filter_id_table_id(filter_id); 2542 if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT) 2543 return -ENOENT; 2544 table = &state->table[table_id]; 2545 2546 filter_idx = ef4_farch_filter_id_index(filter_id); 2547 if (filter_idx >= table->size) 2548 return -ENOENT; 2549 spec = &table->spec[filter_idx]; 2550 2551 spin_lock_bh(&efx->filter_lock); 2552 rc = ef4_farch_filter_remove(efx, table, filter_idx, priority); 2553 spin_unlock_bh(&efx->filter_lock); 2554 2555 return rc; 2556 } 2557 2558 int ef4_farch_filter_get_safe(struct ef4_nic *efx, 2559 enum ef4_filter_priority priority, 2560 u32 filter_id, struct ef4_filter_spec *spec_buf) 2561 { 2562 struct ef4_farch_filter_state *state = efx->filter_state; 2563 enum ef4_farch_filter_table_id table_id; 2564 struct ef4_farch_filter_table *table; 2565 struct ef4_farch_filter_spec *spec; 2566 unsigned int filter_idx; 2567 int rc; 2568 2569 table_id = ef4_farch_filter_id_table_id(filter_id); 2570 if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT) 2571 return -ENOENT; 2572 table = &state->table[table_id]; 2573 2574 filter_idx = ef4_farch_filter_id_index(filter_id); 2575 if (filter_idx >= table->size) 2576 return -ENOENT; 2577 spec = &table->spec[filter_idx]; 2578 2579 spin_lock_bh(&efx->filter_lock); 2580 2581 if (test_bit(filter_idx, table->used_bitmap) && 2582 spec->priority == priority) { 2583 ef4_farch_filter_to_gen_spec(spec_buf, spec); 2584 rc = 0; 2585 } else { 2586 rc = -ENOENT; 2587 } 2588 2589 spin_unlock_bh(&efx->filter_lock); 2590 2591 return rc; 2592 } 2593 2594 static void 2595 ef4_farch_filter_table_clear(struct ef4_nic *efx, 2596 enum ef4_farch_filter_table_id table_id, 2597 enum ef4_filter_priority priority) 2598 { 2599 struct ef4_farch_filter_state *state = efx->filter_state; 2600 struct ef4_farch_filter_table *table = &state->table[table_id]; 2601 unsigned int filter_idx; 2602 2603 spin_lock_bh(&efx->filter_lock); 2604 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { 2605 if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO) 2606 ef4_farch_filter_remove(efx, table, 2607 filter_idx, priority); 2608 } 2609 spin_unlock_bh(&efx->filter_lock); 2610 } 2611 2612 int ef4_farch_filter_clear_rx(struct ef4_nic *efx, 2613 enum ef4_filter_priority priority) 2614 { 2615 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP, 2616 priority); 2617 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC, 2618 priority); 2619 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF, 2620 priority); 2621 return 0; 2622 } 2623 2624 u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx, 2625 enum ef4_filter_priority priority) 2626 { 2627 struct ef4_farch_filter_state *state = efx->filter_state; 2628 enum ef4_farch_filter_table_id table_id; 2629 struct ef4_farch_filter_table *table; 2630 unsigned int filter_idx; 2631 u32 count = 0; 2632 2633 spin_lock_bh(&efx->filter_lock); 2634 2635 for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP; 2636 table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF; 2637 table_id++) { 2638 table = &state->table[table_id]; 2639 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2640 if (test_bit(filter_idx, table->used_bitmap) && 2641 table->spec[filter_idx].priority == priority) 2642 ++count; 2643 } 2644 } 2645 2646 spin_unlock_bh(&efx->filter_lock); 2647 2648 return count; 2649 } 2650 2651 s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx, 2652 enum ef4_filter_priority priority, 2653 u32 *buf, u32 size) 2654 { 2655 struct ef4_farch_filter_state *state = efx->filter_state; 2656 enum ef4_farch_filter_table_id table_id; 2657 struct ef4_farch_filter_table *table; 2658 unsigned int filter_idx; 2659 s32 count = 0; 2660 2661 spin_lock_bh(&efx->filter_lock); 2662 2663 for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP; 2664 table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF; 2665 table_id++) { 2666 table = &state->table[table_id]; 2667 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2668 if (test_bit(filter_idx, table->used_bitmap) && 2669 table->spec[filter_idx].priority == priority) { 2670 if (count == size) { 2671 count = -EMSGSIZE; 2672 goto out; 2673 } 2674 buf[count++] = ef4_farch_filter_make_id( 2675 &table->spec[filter_idx], filter_idx); 2676 } 2677 } 2678 } 2679 out: 2680 spin_unlock_bh(&efx->filter_lock); 2681 2682 return count; 2683 } 2684 2685 /* Restore filter stater after reset */ 2686 void ef4_farch_filter_table_restore(struct ef4_nic *efx) 2687 { 2688 struct ef4_farch_filter_state *state = efx->filter_state; 2689 enum ef4_farch_filter_table_id table_id; 2690 struct ef4_farch_filter_table *table; 2691 ef4_oword_t filter; 2692 unsigned int filter_idx; 2693 2694 spin_lock_bh(&efx->filter_lock); 2695 2696 for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) { 2697 table = &state->table[table_id]; 2698 2699 /* Check whether this is a regular register table */ 2700 if (table->step == 0) 2701 continue; 2702 2703 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2704 if (!test_bit(filter_idx, table->used_bitmap)) 2705 continue; 2706 ef4_farch_filter_build(&filter, &table->spec[filter_idx]); 2707 ef4_writeo(efx, &filter, 2708 table->offset + table->step * filter_idx); 2709 } 2710 } 2711 2712 ef4_farch_filter_push_rx_config(efx); 2713 ef4_farch_filter_push_tx_limits(efx); 2714 2715 spin_unlock_bh(&efx->filter_lock); 2716 } 2717 2718 void ef4_farch_filter_table_remove(struct ef4_nic *efx) 2719 { 2720 struct ef4_farch_filter_state *state = efx->filter_state; 2721 enum ef4_farch_filter_table_id table_id; 2722 2723 for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) { 2724 kfree(state->table[table_id].used_bitmap); 2725 vfree(state->table[table_id].spec); 2726 } 2727 kfree(state); 2728 } 2729 2730 int ef4_farch_filter_table_probe(struct ef4_nic *efx) 2731 { 2732 struct ef4_farch_filter_state *state; 2733 struct ef4_farch_filter_table *table; 2734 unsigned table_id; 2735 2736 state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL); 2737 if (!state) 2738 return -ENOMEM; 2739 efx->filter_state = state; 2740 2741 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { 2742 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; 2743 table->id = EF4_FARCH_FILTER_TABLE_RX_IP; 2744 table->offset = FR_BZ_RX_FILTER_TBL0; 2745 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; 2746 table->step = FR_BZ_RX_FILTER_TBL0_STEP; 2747 } 2748 2749 for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) { 2750 table = &state->table[table_id]; 2751 if (table->size == 0) 2752 continue; 2753 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), 2754 sizeof(unsigned long), 2755 GFP_KERNEL); 2756 if (!table->used_bitmap) 2757 goto fail; 2758 table->spec = vzalloc(table->size * sizeof(*table->spec)); 2759 if (!table->spec) 2760 goto fail; 2761 } 2762 2763 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF]; 2764 if (table->size) { 2765 /* RX default filters must always exist */ 2766 struct ef4_farch_filter_spec *spec; 2767 unsigned i; 2768 2769 for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) { 2770 spec = &table->spec[i]; 2771 spec->type = EF4_FARCH_FILTER_UC_DEF + i; 2772 ef4_farch_filter_init_rx_auto(efx, spec); 2773 __set_bit(i, table->used_bitmap); 2774 } 2775 } 2776 2777 ef4_farch_filter_push_rx_config(efx); 2778 2779 return 0; 2780 2781 fail: 2782 ef4_farch_filter_table_remove(efx); 2783 return -ENOMEM; 2784 } 2785 2786 /* Update scatter enable flags for filters pointing to our own RX queues */ 2787 void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx) 2788 { 2789 struct ef4_farch_filter_state *state = efx->filter_state; 2790 enum ef4_farch_filter_table_id table_id; 2791 struct ef4_farch_filter_table *table; 2792 ef4_oword_t filter; 2793 unsigned int filter_idx; 2794 2795 spin_lock_bh(&efx->filter_lock); 2796 2797 for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP; 2798 table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF; 2799 table_id++) { 2800 table = &state->table[table_id]; 2801 2802 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2803 if (!test_bit(filter_idx, table->used_bitmap) || 2804 table->spec[filter_idx].dmaq_id >= 2805 efx->n_rx_channels) 2806 continue; 2807 2808 if (efx->rx_scatter) 2809 table->spec[filter_idx].flags |= 2810 EF4_FILTER_FLAG_RX_SCATTER; 2811 else 2812 table->spec[filter_idx].flags &= 2813 ~EF4_FILTER_FLAG_RX_SCATTER; 2814 2815 if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF) 2816 /* Pushed by ef4_farch_filter_push_rx_config() */ 2817 continue; 2818 2819 ef4_farch_filter_build(&filter, &table->spec[filter_idx]); 2820 ef4_writeo(efx, &filter, 2821 table->offset + table->step * filter_idx); 2822 } 2823 } 2824 2825 ef4_farch_filter_push_rx_config(efx); 2826 2827 spin_unlock_bh(&efx->filter_lock); 2828 } 2829 2830 #ifdef CONFIG_RFS_ACCEL 2831 2832 s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx, 2833 struct ef4_filter_spec *gen_spec) 2834 { 2835 return ef4_farch_filter_insert(efx, gen_spec, true); 2836 } 2837 2838 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id, 2839 unsigned int index) 2840 { 2841 struct ef4_farch_filter_state *state = efx->filter_state; 2842 struct ef4_farch_filter_table *table = 2843 &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; 2844 2845 if (test_bit(index, table->used_bitmap) && 2846 table->spec[index].priority == EF4_FILTER_PRI_HINT && 2847 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, 2848 flow_id, index)) { 2849 ef4_farch_filter_table_clear_entry(efx, table, index); 2850 return true; 2851 } 2852 2853 return false; 2854 } 2855 2856 #endif /* CONFIG_RFS_ACCEL */ 2857 2858 void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx) 2859 { 2860 struct net_device *net_dev = efx->net_dev; 2861 struct netdev_hw_addr *ha; 2862 union ef4_multicast_hash *mc_hash = &efx->multicast_hash; 2863 u32 crc; 2864 int bit; 2865 2866 if (!ef4_dev_registered(efx)) 2867 return; 2868 2869 netif_addr_lock_bh(net_dev); 2870 2871 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); 2872 2873 /* Build multicast hash table */ 2874 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2875 memset(mc_hash, 0xff, sizeof(*mc_hash)); 2876 } else { 2877 memset(mc_hash, 0x00, sizeof(*mc_hash)); 2878 netdev_for_each_mc_addr(ha, net_dev) { 2879 crc = ether_crc_le(ETH_ALEN, ha->addr); 2880 bit = crc & (EF4_MCAST_HASH_ENTRIES - 1); 2881 __set_bit_le(bit, mc_hash); 2882 } 2883 2884 /* Broadcast packets go through the multicast hash filter. 2885 * ether_crc_le() of the broadcast address is 0xbe2612ff 2886 * so we always add bit 0xff to the mask. 2887 */ 2888 __set_bit_le(0xff, mc_hash); 2889 } 2890 2891 netif_addr_unlock_bh(net_dev); 2892 } 2893