Lines Matching +full:pci +full:- +full:host2

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
11 #include <linux/pci.h>
27 /* Falcon-architecture (SFC9000-family) support */
69 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
78 (_tx_queue)->queue)
91 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, in efx_write_buf_tbl()
98 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || in efx_masked_compare_oword()
99 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); in efx_masked_compare_oword()
149 netif_err(efx, hw, efx->net_dev, in efx_farch_test_registers()
153 return -EIO; in efx_farch_test_registers()
179 EFX_WARN_ON_PARANOID(!buffer->buf.addr); in efx_init_special_buffer()
182 for (i = 0; i < buffer->entries; i++) { in efx_init_special_buffer()
183 index = buffer->index + i; in efx_init_special_buffer()
184 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); in efx_init_special_buffer()
185 netif_dbg(efx, probe, efx->net_dev, in efx_init_special_buffer()
201 unsigned int start = buffer->index; in efx_fini_special_buffer()
202 unsigned int end = (buffer->index + buffer->entries - 1); in efx_fini_special_buffer()
204 if (!buffer->entries) in efx_fini_special_buffer()
207 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", in efx_fini_special_buffer()
208 buffer->index, buffer->index + buffer->entries - 1); in efx_fini_special_buffer()
232 struct siena_nic_data *nic_data = efx->nic_data; in efx_alloc_special_buffer()
236 if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) in efx_alloc_special_buffer()
237 return -ENOMEM; in efx_alloc_special_buffer()
238 buffer->entries = len / EFX_BUF_SIZE; in efx_alloc_special_buffer()
239 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); in efx_alloc_special_buffer()
242 buffer->index = efx->next_buffer_table; in efx_alloc_special_buffer()
243 efx->next_buffer_table += buffer->entries; in efx_alloc_special_buffer()
246 nic_data->vf_buftbl_base < efx->next_buffer_table); in efx_alloc_special_buffer()
249 netif_dbg(efx, probe, efx->net_dev, in efx_alloc_special_buffer()
250 "allocating special buffers %d-%d at %llx+%x " in efx_alloc_special_buffer()
251 "(virt %p phys %llx)\n", buffer->index, in efx_alloc_special_buffer()
252 buffer->index + buffer->entries - 1, in efx_alloc_special_buffer()
253 (u64)buffer->buf.dma_addr, len, in efx_alloc_special_buffer()
254 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); in efx_alloc_special_buffer()
262 if (!buffer->buf.addr) in efx_free_special_buffer()
265 netif_dbg(efx, hw, efx->net_dev, in efx_free_special_buffer()
266 "deallocating special buffers %d-%d at %llx+%x " in efx_free_special_buffer()
267 "(virt %p phys %llx)\n", buffer->index, in efx_free_special_buffer()
268 buffer->index + buffer->entries - 1, in efx_free_special_buffer()
269 (u64)buffer->buf.dma_addr, buffer->buf.len, in efx_free_special_buffer()
270 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); in efx_free_special_buffer()
272 efx_siena_free_buffer(efx, &buffer->buf); in efx_free_special_buffer()
273 buffer->entries = 0; in efx_free_special_buffer()
288 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc()
290 efx_writed_page(tx_queue->efx, &reg, in efx_farch_notify_tx_desc()
291 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in efx_farch_notify_tx_desc()
304 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc()
308 efx_writeo_page(tx_queue->efx, &reg, in efx_farch_push_tx_desc()
309 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in efx_farch_push_tx_desc()
322 unsigned old_write_count = tx_queue->write_count; in efx_farch_tx_write()
324 tx_queue->xmit_pending = false; in efx_farch_tx_write()
325 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in efx_farch_tx_write()
329 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_tx_write()
330 buffer = &tx_queue->buffer[write_ptr]; in efx_farch_tx_write()
332 ++tx_queue->write_count; in efx_farch_tx_write()
334 EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); in efx_farch_tx_write()
340 buffer->flags & EFX_TX_BUF_CONT, in efx_farch_tx_write()
341 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, in efx_farch_tx_write()
343 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); in efx_farch_tx_write()
344 } while (tx_queue->write_count != tx_queue->insert_count); in efx_farch_tx_write()
350 old_write_count & tx_queue->ptr_mask); in efx_farch_tx_write()
352 ++tx_queue->pushes; in efx_farch_tx_write()
362 unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; in efx_farch_tx_limit_len()
373 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_probe()
376 tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | in efx_farch_tx_probe()
377 ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); in efx_farch_tx_probe()
378 entries = tx_queue->ptr_mask + 1; in efx_farch_tx_probe()
379 return efx_alloc_special_buffer(efx, &tx_queue->txd, in efx_farch_tx_probe()
385 int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; in efx_farch_tx_init()
386 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_init()
390 efx_init_special_buffer(efx, &tx_queue->txd); in efx_farch_tx_init()
397 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, in efx_farch_tx_init()
399 tx_queue->channel->channel, in efx_farch_tx_init()
401 FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, in efx_farch_tx_init()
403 __ffs(tx_queue->txd.entries), in efx_farch_tx_init()
410 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base, in efx_farch_tx_init()
411 tx_queue->queue); in efx_farch_tx_init()
415 (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? in efx_farch_tx_init()
418 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue); in efx_farch_tx_init()
420 tx_queue->tso_version = 1; in efx_farch_tx_init()
425 struct efx_nic *efx = tx_queue->efx; in efx_farch_flush_tx_queue()
428 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); in efx_farch_flush_tx_queue()
429 atomic_set(&tx_queue->flush_outstanding, 1); in efx_farch_flush_tx_queue()
433 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); in efx_farch_flush_tx_queue()
439 struct efx_nic *efx = tx_queue->efx; in efx_farch_tx_fini()
444 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, in efx_farch_tx_fini()
445 tx_queue->queue); in efx_farch_tx_fini()
448 efx_fini_special_buffer(efx, &tx_queue->txd); in efx_farch_tx_fini()
454 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); in efx_farch_tx_remove()
474 rx_buf->len - in efx_farch_build_rx_desc()
475 rx_queue->efx->type->rx_buffer_padding, in efx_farch_build_rx_desc()
477 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); in efx_farch_build_rx_desc()
485 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_write()
489 while (rx_queue->notified_count != rx_queue->added_count) { in efx_farch_rx_write()
492 rx_queue->notified_count & rx_queue->ptr_mask); in efx_farch_rx_write()
493 ++rx_queue->notified_count; in efx_farch_rx_write()
497 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; in efx_farch_rx_write()
505 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_probe()
508 entries = rx_queue->ptr_mask + 1; in efx_farch_rx_probe()
509 return efx_alloc_special_buffer(efx, &rx_queue->rxd, in efx_farch_rx_probe()
516 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_init()
519 /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ in efx_farch_rx_init()
520 jumbo_en = efx->rx_scatter; in efx_farch_rx_init()
522 netif_dbg(efx, hw, efx->net_dev, in efx_farch_rx_init()
523 "RX queue %d ring in special buffers %d-%d\n", in efx_farch_rx_init()
524 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, in efx_farch_rx_init()
525 rx_queue->rxd.index + rx_queue->rxd.entries - 1); in efx_farch_rx_init()
527 rx_queue->scatter_n = 0; in efx_farch_rx_init()
530 efx_init_special_buffer(efx, &rx_queue->rxd); in efx_farch_rx_init()
536 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, in efx_farch_rx_init()
538 efx_rx_queue_channel(rx_queue)->channel, in efx_farch_rx_init()
543 __ffs(rx_queue->rxd.entries), in efx_farch_rx_init()
547 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in efx_farch_rx_init()
553 struct efx_nic *efx = rx_queue->efx; in efx_farch_flush_rx_queue()
566 struct efx_nic *efx = rx_queue->efx; in efx_farch_rx_fini()
570 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in efx_farch_rx_fini()
574 efx_fini_special_buffer(efx, &rx_queue->rxd); in efx_farch_rx_fini()
580 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); in efx_farch_rx_remove()
597 return (atomic_read(&efx->active_queues) == 0 || in efx_farch_flush_wake()
598 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT in efx_farch_flush_wake()
599 && atomic_read(&efx->rxq_flush_pending) > 0)); in efx_farch_flush_wake()
612 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); in efx_check_tx_flush_complete()
617 netif_dbg(efx, hw, efx->net_dev, in efx_check_tx_flush_complete()
619 tx_queue->queue); in efx_check_tx_flush_complete()
621 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, in efx_check_tx_flush_complete()
626 netif_dbg(efx, hw, efx->net_dev, in efx_check_tx_flush_complete()
628 "the queue\n", tx_queue->queue); in efx_check_tx_flush_complete()
659 rx_queue->flush_pending = true; in efx_farch_do_flush()
660 atomic_inc(&efx->rxq_flush_pending); in efx_farch_do_flush()
664 while (timeout && atomic_read(&efx->active_queues) > 0) { in efx_farch_do_flush()
681 if (atomic_read(&efx->rxq_flush_outstanding) >= in efx_farch_do_flush()
685 if (rx_queue->flush_pending) { in efx_farch_do_flush()
686 rx_queue->flush_pending = false; in efx_farch_do_flush()
687 atomic_dec(&efx->rxq_flush_pending); in efx_farch_do_flush()
688 atomic_inc(&efx->rxq_flush_outstanding); in efx_farch_do_flush()
695 timeout = wait_event_timeout(efx->flush_wq, in efx_farch_do_flush()
700 if (atomic_read(&efx->active_queues) && in efx_farch_do_flush()
702 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " in efx_farch_do_flush()
703 "(rx %d+%d)\n", atomic_read(&efx->active_queues), in efx_farch_do_flush()
704 atomic_read(&efx->rxq_flush_outstanding), in efx_farch_do_flush()
705 atomic_read(&efx->rxq_flush_pending)); in efx_farch_do_flush()
706 rc = -ETIMEDOUT; in efx_farch_do_flush()
708 atomic_set(&efx->active_queues, 0); in efx_farch_do_flush()
709 atomic_set(&efx->rxq_flush_pending, 0); in efx_farch_do_flush()
710 atomic_set(&efx->rxq_flush_outstanding, 0); in efx_farch_do_flush()
724 if (efx->state != STATE_RECOVERY) { in efx_farch_fini_dmaq()
726 if (efx->pci_dev->is_busmaster) { in efx_farch_fini_dmaq()
727 efx->type->prepare_flush(efx); in efx_farch_fini_dmaq()
729 efx->type->finish_flush(efx); in efx_farch_fini_dmaq()
747 * completion events. This means that efx->rxq_flush_outstanding remained at 4
748 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
751 * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
752 * of 4 for batched flush requests; and the efx->active_queues gets messed up
760 atomic_set(&efx->rxq_flush_pending, 0); in efx_farch_finish_flr()
761 atomic_set(&efx->rxq_flush_outstanding, 0); in efx_farch_finish_flr()
762 atomic_set(&efx->active_queues, 0); in efx_farch_finish_flr()
769 * Event queues are processed by per-channel tasklets.
781 struct efx_nic *efx = channel->efx; in efx_farch_ev_read_ack()
784 channel->eventq_read_ptr & channel->eventq_mask); in efx_farch_ev_read_ack()
790 efx->type->evq_rptr_tbl_base + in efx_farch_ev_read_ack()
791 FR_BZ_EVQ_RPTR_STEP * channel->channel); in efx_farch_ev_read_ack()
802 drv_ev_reg.u32[0] = event->u32[0]; in efx_farch_generate_event()
803 drv_ev_reg.u32[1] = event->u32[1]; in efx_farch_generate_event()
817 efx_farch_generate_event(channel->efx, channel->channel, &event); in efx_farch_magic_event()
831 struct efx_nic *efx = channel->efx; in efx_farch_handle_tx_event()
833 if (unlikely(READ_ONCE(efx->reset_pending))) in efx_farch_handle_tx_event()
840 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event()
846 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event()
849 netif_tx_lock(efx->net_dev); in efx_farch_handle_tx_event()
851 netif_tx_unlock(efx->net_dev); in efx_farch_handle_tx_event()
855 netif_err(efx, tx_err, efx->net_dev, in efx_farch_handle_tx_event()
857 EFX_QWORD_FMT"\n", channel->channel, in efx_farch_handle_tx_event()
867 struct efx_nic *efx = rx_queue->efx; in efx_farch_handle_rx_not_ok()
890 * checksum errors during self-test. */ in efx_farch_handle_rx_not_ok()
892 ++channel->n_rx_frm_trunc; in efx_farch_handle_rx_not_ok()
894 ++channel->n_rx_tobe_disc; in efx_farch_handle_rx_not_ok()
895 else if (!efx->loopback_selftest) { in efx_farch_handle_rx_not_ok()
897 ++channel->n_rx_ip_hdr_chksum_err; in efx_farch_handle_rx_not_ok()
899 ++channel->n_rx_tcp_udp_chksum_err; in efx_farch_handle_rx_not_ok()
908 netif_dbg(efx, rx_err, efx->net_dev, in efx_farch_handle_rx_not_ok()
926 if (efx->net_dev->features & NETIF_F_RXALL) in efx_farch_handle_rx_not_ok()
936 /* Handle receive events that are not in-order. Return true if this
944 struct efx_nic *efx = rx_queue->efx; in efx_farch_handle_rx_bad_index()
947 if (rx_queue->scatter_n && in efx_farch_handle_rx_bad_index()
948 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & in efx_farch_handle_rx_bad_index()
949 rx_queue->ptr_mask)) { in efx_farch_handle_rx_bad_index()
950 ++channel->n_rx_nodesc_trunc; in efx_farch_handle_rx_bad_index()
954 expected = rx_queue->removed_count & rx_queue->ptr_mask; in efx_farch_handle_rx_bad_index()
955 dropped = (index - expected) & rx_queue->ptr_mask; in efx_farch_handle_rx_bad_index()
956 netif_info(efx, rx_err, efx->net_dev, in efx_farch_handle_rx_bad_index()
969 * discard non-matching multicast packets.
980 struct efx_nic *efx = channel->efx; in efx_farch_handle_rx_event()
982 if (unlikely(READ_ONCE(efx->reset_pending))) in efx_farch_handle_rx_event()
988 channel->channel); in efx_farch_handle_rx_event()
993 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & in efx_farch_handle_rx_event()
994 rx_queue->ptr_mask); in efx_farch_handle_rx_event()
998 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { in efx_farch_handle_rx_event()
1004 if (rx_queue->scatter_n) { in efx_farch_handle_rx_event()
1007 rx_queue->removed_count & rx_queue->ptr_mask, in efx_farch_handle_rx_event()
1008 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); in efx_farch_handle_rx_event()
1009 rx_queue->removed_count += rx_queue->scatter_n; in efx_farch_handle_rx_event()
1010 rx_queue->scatter_n = 0; in efx_farch_handle_rx_event()
1021 rx_queue->removed_count & rx_queue->ptr_mask, in efx_farch_handle_rx_event()
1023 ++rx_queue->removed_count; in efx_farch_handle_rx_event()
1028 ++rx_queue->scatter_n; in efx_farch_handle_rx_event()
1063 ++channel->n_rx_mcast_mismatch; in efx_farch_handle_rx_event()
1068 channel->irq_mod_score += 2; in efx_farch_handle_rx_event()
1072 rx_queue->removed_count & rx_queue->ptr_mask, in efx_farch_handle_rx_event()
1073 rx_queue->scatter_n, rx_ev_byte_cnt, flags); in efx_farch_handle_rx_event()
1074 rx_queue->removed_count += rx_queue->scatter_n; in efx_farch_handle_rx_event()
1075 rx_queue->scatter_n = 0; in efx_farch_handle_rx_event()
1090 if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { in efx_farch_handle_tx_flush_done()
1092 tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); in efx_farch_handle_tx_flush_done()
1093 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) in efx_farch_handle_tx_flush_done()
1094 efx_farch_magic_event(tx_queue->channel, in efx_farch_handle_tx_flush_done()
1113 if (qid >= efx->n_channels) in efx_farch_handle_rx_flush_done()
1121 netif_info(efx, hw, efx->net_dev, in efx_farch_handle_rx_flush_done()
1123 rx_queue->flush_pending = true; in efx_farch_handle_rx_flush_done()
1124 atomic_inc(&efx->rxq_flush_pending); in efx_farch_handle_rx_flush_done()
1129 atomic_dec(&efx->rxq_flush_outstanding); in efx_farch_handle_rx_flush_done()
1131 wake_up(&efx->flush_wq); in efx_farch_handle_rx_flush_done()
1137 struct efx_nic *efx = channel->efx; in efx_farch_handle_drain_event()
1139 WARN_ON(atomic_read(&efx->active_queues) == 0); in efx_farch_handle_drain_event()
1140 atomic_dec(&efx->active_queues); in efx_farch_handle_drain_event()
1142 wake_up(&efx->flush_wq); in efx_farch_handle_drain_event()
1148 struct efx_nic *efx = channel->efx; in efx_farch_handle_generated_event()
1158 channel->event_test_cpu = raw_smp_processor_id(); in efx_farch_handle_generated_event()
1169 netif_dbg(efx, hw, efx->net_dev, "channel %d received " in efx_farch_handle_generated_event()
1171 channel->channel, EFX_QWORD_VAL(*event)); in efx_farch_handle_generated_event()
1178 struct efx_nic *efx = channel->efx; in efx_farch_handle_driver_event()
1187 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", in efx_farch_handle_driver_event()
1188 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1195 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", in efx_farch_handle_driver_event()
1196 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1203 netif_dbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1205 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1208 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1209 "channel %d SRAM update done\n", channel->channel); in efx_farch_handle_driver_event()
1212 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1214 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1217 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1219 channel->channel, ev_sub_data); in efx_farch_handle_driver_event()
1222 netif_err(efx, rx_err, efx->net_dev, in efx_farch_handle_driver_event()
1224 "Resetting.\n", channel->channel); in efx_farch_handle_driver_event()
1225 atomic_inc(&efx->rx_reset); in efx_farch_handle_driver_event()
1230 netif_err(efx, rx_err, efx->net_dev, in efx_farch_handle_driver_event()
1243 netif_err(efx, tx_err, efx->net_dev, in efx_farch_handle_driver_event()
1255 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_handle_driver_event()
1257 "data %04x\n", channel->channel, ev_sub_code, in efx_farch_handle_driver_event()
1265 struct efx_nic *efx = channel->efx; in efx_farch_ev_process()
1274 read_ptr = channel->eventq_read_ptr; in efx_farch_ev_process()
1284 netif_vdbg(channel->efx, intr, channel->efx->net_dev, in efx_farch_ev_process()
1286 channel->channel, EFX_QWORD_VAL(event)); in efx_farch_ev_process()
1319 if (efx->type->handle_global_event && in efx_farch_ev_process()
1320 efx->type->handle_global_event(channel, &event)) in efx_farch_ev_process()
1324 netif_err(channel->efx, hw, channel->efx->net_dev, in efx_farch_ev_process()
1326 EFX_QWORD_FMT ")\n", channel->channel, in efx_farch_ev_process()
1332 channel->eventq_read_ptr = read_ptr; in efx_farch_ev_process()
1339 struct efx_nic *efx = channel->efx; in efx_farch_ev_probe()
1342 entries = channel->eventq_mask + 1; in efx_farch_ev_probe()
1343 return efx_alloc_special_buffer(efx, &channel->eventq, in efx_farch_ev_probe()
1350 struct efx_nic *efx = channel->efx; in efx_farch_ev_init()
1352 netif_dbg(efx, hw, efx->net_dev, in efx_farch_ev_init()
1353 "channel %d event queue in special buffers %d-%d\n", in efx_farch_ev_init()
1354 channel->channel, channel->eventq.index, in efx_farch_ev_init()
1355 channel->eventq.index + channel->eventq.entries - 1); in efx_farch_ev_init()
1361 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); in efx_farch_ev_init()
1364 efx_init_special_buffer(efx, &channel->eventq); in efx_farch_ev_init()
1367 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); in efx_farch_ev_init()
1372 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), in efx_farch_ev_init()
1373 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); in efx_farch_ev_init()
1374 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, in efx_farch_ev_init()
1375 channel->channel); in efx_farch_ev_init()
1383 struct efx_nic *efx = channel->efx; in efx_farch_ev_fini()
1387 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, in efx_farch_ev_fini()
1388 channel->channel); in efx_farch_ev_fini()
1389 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); in efx_farch_ev_fini()
1392 efx_fini_special_buffer(efx, &channel->eventq); in efx_farch_ev_fini()
1398 efx_free_special_buffer(channel->efx, &channel->eventq); in efx_farch_ev_remove()
1417 * queue processing is carried out by per-channel tasklets.
1428 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, in efx_farch_interrupts()
1436 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); in efx_farch_irq_enable_master()
1463 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_fatal_interrupt()
1470 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " in efx_farch_fatal_interrupt()
1481 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1487 pci_clear_master(efx->pci_dev); in efx_farch_fatal_interrupt()
1491 if (efx->int_error_count == 0 || in efx_farch_fatal_interrupt()
1492 time_after(jiffies, efx->int_error_expire)) { in efx_farch_fatal_interrupt()
1493 efx->int_error_count = 0; in efx_farch_fatal_interrupt()
1494 efx->int_error_expire = in efx_farch_fatal_interrupt()
1497 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { in efx_farch_fatal_interrupt()
1498 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1499 "SYSTEM ERROR - reset scheduled\n"); in efx_farch_fatal_interrupt()
1502 netif_err(efx, hw, efx->net_dev, in efx_farch_fatal_interrupt()
1503 "SYSTEM ERROR - max number of errors seen." in efx_farch_fatal_interrupt()
1517 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); in efx_farch_legacy_interrupt()
1518 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_legacy_interrupt()
1534 !efx->eeh_disabled_legacy_irq) { in efx_farch_legacy_interrupt()
1535 disable_irq_nosync(efx->legacy_irq); in efx_farch_legacy_interrupt()
1536 efx->eeh_disabled_legacy_irq = true; in efx_farch_legacy_interrupt()
1539 /* Handle non-event-queue sources */ in efx_farch_legacy_interrupt()
1540 if (queues & (1U << efx->irq_level) && soft_enabled) { in efx_farch_legacy_interrupt()
1544 efx->last_irq_cpu = raw_smp_processor_id(); in efx_farch_legacy_interrupt()
1548 efx->irq_zero_count = 0; in efx_farch_legacy_interrupt()
1567 if (efx->irq_zero_count++ == 0) in efx_farch_legacy_interrupt()
1574 channel->eventq_read_ptr); in efx_farch_legacy_interrupt()
1584 netif_vdbg(efx, intr, efx->net_dev, in efx_farch_legacy_interrupt()
1601 struct efx_nic *efx = context->efx; in efx_farch_msi_interrupt()
1602 efx_oword_t *int_ker = efx->irq_status.addr; in efx_farch_msi_interrupt()
1605 netif_vdbg(efx, intr, efx->net_dev, in efx_farch_msi_interrupt()
1609 if (!likely(READ_ONCE(efx->irq_soft_enabled))) in efx_farch_msi_interrupt()
1612 /* Handle non-event-queue sources */ in efx_farch_msi_interrupt()
1613 if (context->index == efx->irq_level) { in efx_farch_msi_interrupt()
1617 efx->last_irq_cpu = raw_smp_processor_id(); in efx_farch_msi_interrupt()
1621 efx_schedule_channel_irq(efx->channel[context->index]); in efx_farch_msi_interrupt()
1634 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != in efx_farch_rx_push_indir_table()
1639 efx->rss_context.rx_indir_table[i]); in efx_farch_rx_push_indir_table()
1651 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != in efx_farch_rx_pull_indir_table()
1658 efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); in efx_farch_rx_pull_indir_table()
1667 * efx->vf_buftbl_base buftbl entries for SR-IOV
1668 * efx->rx_dc_base RX descriptor caches
1669 * efx->tx_dc_base TX descriptor caches
1679 total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; in efx_farch_dimension_resources()
1680 vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); in efx_farch_dimension_resources()
1683 nic_data = efx->nic_data; in efx_farch_dimension_resources()
1687 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + in efx_farch_dimension_resources()
1689 efx->n_channels * EFX_MAX_EVQ_SIZE) in efx_farch_dimension_resources()
1691 if (efx->type->sriov_wanted) { in efx_farch_dimension_resources()
1692 if (efx->type->sriov_wanted(efx)) { in efx_farch_dimension_resources()
1696 nic_data->vf_buftbl_base = buftbl_min; in efx_farch_dimension_resources()
1700 buftbl_free = (sram_lim_qw - buftbl_min - in efx_farch_dimension_resources()
1707 (1024U - EFX_VI_BASE) >> efx->vi_scale); in efx_farch_dimension_resources()
1709 if (efx->vf_count > vf_limit) { in efx_farch_dimension_resources()
1710 netif_err(efx, probe, efx->net_dev, in efx_farch_dimension_resources()
1712 efx->vf_count, vf_limit); in efx_farch_dimension_resources()
1713 efx->vf_count = vf_limit; in efx_farch_dimension_resources()
1715 vi_count += efx->vf_count * efx_vf_size(efx); in efx_farch_dimension_resources()
1720 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; in efx_farch_dimension_resources()
1721 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; in efx_farch_dimension_resources()
1736 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); in efx_farch_init_common()
1738 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); in efx_farch_init_common()
1746 /* Set RX descriptor cache size. Set low watermark to size-8, as in efx_farch_init_common()
1752 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); in efx_farch_init_common()
1759 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); in efx_farch_init_common()
1764 efx->irq_level = 0x1f; in efx_farch_init_common()
1766 /* Use a valid MSI-X vector */ in efx_farch_init_common()
1767 efx->irq_level = 0; in efx_farch_init_common()
1783 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be in efx_farch_init_common()
1792 /* Enable SW_EV to inherit in char driver - assume harmless here */ in efx_farch_init_common()
1820 /* "Fudge factors" - difference between programmed value and actual depth.
1827 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1834 * counter-productive. */
1892 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1893 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1912 return key * 2 - 1; in efx_farch_filter_increment()
1932 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); in efx_farch_filter_spec_table_id()
1937 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_push_rx_config()
1943 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_push_rx_config()
1945 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + in efx_farch_filter_push_rx_config()
1948 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + in efx_farch_filter_push_rx_config()
1951 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + in efx_farch_filter_push_rx_config()
1954 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + in efx_farch_filter_push_rx_config()
1957 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; in efx_farch_filter_push_rx_config()
1958 if (table->size) { in efx_farch_filter_push_rx_config()
1961 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + in efx_farch_filter_push_rx_config()
1965 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + in efx_farch_filter_push_rx_config()
1969 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; in efx_farch_filter_push_rx_config()
1970 if (table->size) { in efx_farch_filter_push_rx_config()
1973 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); in efx_farch_filter_push_rx_config()
1976 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & in efx_farch_filter_push_rx_config()
1980 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); in efx_farch_filter_push_rx_config()
1983 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & in efx_farch_filter_push_rx_config()
1992 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & in efx_farch_filter_push_rx_config()
1993 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & in efx_farch_filter_push_rx_config()
2003 efx->rx_scatter); in efx_farch_filter_push_rx_config()
2011 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_push_tx_limits()
2017 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; in efx_farch_filter_push_tx_limits()
2018 if (table->size) { in efx_farch_filter_push_tx_limits()
2021 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + in efx_farch_filter_push_tx_limits()
2025 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + in efx_farch_filter_push_tx_limits()
2038 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) in efx_farch_filter_from_gen_spec()
2039 return -EINVAL; in efx_farch_filter_from_gen_spec()
2041 spec->priority = gen_spec->priority; in efx_farch_filter_from_gen_spec()
2042 spec->flags = gen_spec->flags; in efx_farch_filter_from_gen_spec()
2043 spec->dmaq_id = gen_spec->dmaq_id; in efx_farch_filter_from_gen_spec()
2045 switch (gen_spec->match_flags) { in efx_farch_filter_from_gen_spec()
2053 __be32 rhost, host1, host2; in efx_farch_filter_from_gen_spec() local
2056 EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); in efx_farch_filter_from_gen_spec()
2058 if (gen_spec->ether_type != htons(ETH_P_IP)) in efx_farch_filter_from_gen_spec()
2059 return -EPROTONOSUPPORT; in efx_farch_filter_from_gen_spec()
2060 if (gen_spec->loc_port == 0 || in efx_farch_filter_from_gen_spec()
2061 (is_full && gen_spec->rem_port == 0)) in efx_farch_filter_from_gen_spec()
2062 return -EADDRNOTAVAIL; in efx_farch_filter_from_gen_spec()
2063 switch (gen_spec->ip_proto) { in efx_farch_filter_from_gen_spec()
2065 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : in efx_farch_filter_from_gen_spec()
2069 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : in efx_farch_filter_from_gen_spec()
2073 return -EPROTONOSUPPORT; in efx_farch_filter_from_gen_spec()
2081 rhost = is_full ? gen_spec->rem_host[0] : 0; in efx_farch_filter_from_gen_spec()
2082 rport = is_full ? gen_spec->rem_port : 0; in efx_farch_filter_from_gen_spec()
2084 host2 = gen_spec->loc_host[0]; in efx_farch_filter_from_gen_spec()
2085 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { in efx_farch_filter_from_gen_spec()
2086 port1 = gen_spec->loc_port; in efx_farch_filter_from_gen_spec()
2090 port2 = gen_spec->loc_port; in efx_farch_filter_from_gen_spec()
2092 spec->data[0] = ntohl(host1) << 16 | ntohs(port1); in efx_farch_filter_from_gen_spec()
2093 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; in efx_farch_filter_from_gen_spec()
2094 spec->data[2] = ntohl(host2); in efx_farch_filter_from_gen_spec()
2103 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : in efx_farch_filter_from_gen_spec()
2105 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; in efx_farch_filter_from_gen_spec()
2106 spec->data[1] = (gen_spec->loc_mac[2] << 24 | in efx_farch_filter_from_gen_spec()
2107 gen_spec->loc_mac[3] << 16 | in efx_farch_filter_from_gen_spec()
2108 gen_spec->loc_mac[4] << 8 | in efx_farch_filter_from_gen_spec()
2109 gen_spec->loc_mac[5]); in efx_farch_filter_from_gen_spec()
2110 spec->data[2] = (gen_spec->loc_mac[0] << 8 | in efx_farch_filter_from_gen_spec()
2111 gen_spec->loc_mac[1]); in efx_farch_filter_from_gen_spec()
2115 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? in efx_farch_filter_from_gen_spec()
2118 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ in efx_farch_filter_from_gen_spec()
2122 return -EPROTONOSUPPORT; in efx_farch_filter_from_gen_spec()
2140 gen_spec->priority = spec->priority; in efx_farch_filter_to_gen_spec()
2141 gen_spec->flags = spec->flags; in efx_farch_filter_to_gen_spec()
2142 gen_spec->dmaq_id = spec->dmaq_id; in efx_farch_filter_to_gen_spec()
2144 switch (spec->type) { in efx_farch_filter_to_gen_spec()
2151 __be32 host1, host2; in efx_farch_filter_to_gen_spec() local
2154 gen_spec->match_flags = in efx_farch_filter_to_gen_spec()
2159 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | in efx_farch_filter_to_gen_spec()
2161 gen_spec->ether_type = htons(ETH_P_IP); in efx_farch_filter_to_gen_spec()
2162 gen_spec->ip_proto = in efx_farch_filter_to_gen_spec()
2163 (spec->type == EFX_FARCH_FILTER_TCP_FULL || in efx_farch_filter_to_gen_spec()
2164 spec->type == EFX_FARCH_FILTER_TCP_WILD) ? in efx_farch_filter_to_gen_spec()
2167 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); in efx_farch_filter_to_gen_spec()
2168 port1 = htons(spec->data[0]); in efx_farch_filter_to_gen_spec()
2169 host2 = htonl(spec->data[2]); in efx_farch_filter_to_gen_spec()
2170 port2 = htons(spec->data[1] >> 16); in efx_farch_filter_to_gen_spec()
2171 if (spec->flags & EFX_FILTER_FLAG_TX) { in efx_farch_filter_to_gen_spec()
2172 gen_spec->loc_host[0] = host1; in efx_farch_filter_to_gen_spec()
2173 gen_spec->rem_host[0] = host2; in efx_farch_filter_to_gen_spec()
2175 gen_spec->loc_host[0] = host2; in efx_farch_filter_to_gen_spec()
2176 gen_spec->rem_host[0] = host1; in efx_farch_filter_to_gen_spec()
2178 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ in efx_farch_filter_to_gen_spec()
2179 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { in efx_farch_filter_to_gen_spec()
2180 gen_spec->loc_port = port1; in efx_farch_filter_to_gen_spec()
2181 gen_spec->rem_port = port2; in efx_farch_filter_to_gen_spec()
2183 gen_spec->loc_port = port2; in efx_farch_filter_to_gen_spec()
2184 gen_spec->rem_port = port1; in efx_farch_filter_to_gen_spec()
2194 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; in efx_farch_filter_to_gen_spec()
2196 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; in efx_farch_filter_to_gen_spec()
2197 gen_spec->loc_mac[0] = spec->data[2] >> 8; in efx_farch_filter_to_gen_spec()
2198 gen_spec->loc_mac[1] = spec->data[2]; in efx_farch_filter_to_gen_spec()
2199 gen_spec->loc_mac[2] = spec->data[1] >> 24; in efx_farch_filter_to_gen_spec()
2200 gen_spec->loc_mac[3] = spec->data[1] >> 16; in efx_farch_filter_to_gen_spec()
2201 gen_spec->loc_mac[4] = spec->data[1] >> 8; in efx_farch_filter_to_gen_spec()
2202 gen_spec->loc_mac[5] = spec->data[1]; in efx_farch_filter_to_gen_spec()
2203 gen_spec->outer_vid = htons(spec->data[0]); in efx_farch_filter_to_gen_spec()
2208 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; in efx_farch_filter_to_gen_spec()
2209 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; in efx_farch_filter_to_gen_spec()
2225 spec->priority = EFX_FILTER_PRI_AUTO; in efx_farch_filter_init_rx_auto()
2226 spec->flags = (EFX_FILTER_FLAG_RX | in efx_farch_filter_init_rx_auto()
2228 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); in efx_farch_filter_init_rx_auto()
2229 spec->dmaq_id = 0; in efx_farch_filter_init_rx_auto()
2232 /* Build a filter entry and return its n-tuple key. */
2240 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || in efx_farch_filter_build()
2241 spec->type == EFX_FARCH_FILTER_UDP_WILD); in efx_farch_filter_build()
2245 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), in efx_farch_filter_build()
2247 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), in efx_farch_filter_build()
2249 FRF_BZ_RXQ_ID, spec->dmaq_id, in efx_farch_filter_build()
2250 EFX_DWORD_2, spec->data[2], in efx_farch_filter_build()
2251 EFX_DWORD_1, spec->data[1], in efx_farch_filter_build()
2252 EFX_DWORD_0, spec->data[0]); in efx_farch_filter_build()
2258 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; in efx_farch_filter_build()
2262 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), in efx_farch_filter_build()
2264 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), in efx_farch_filter_build()
2265 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, in efx_farch_filter_build()
2267 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], in efx_farch_filter_build()
2268 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], in efx_farch_filter_build()
2269 FRF_CZ_RMFT_VLAN_ID, spec->data[0]); in efx_farch_filter_build()
2275 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; in efx_farch_filter_build()
2277 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, in efx_farch_filter_build()
2279 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], in efx_farch_filter_build()
2280 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], in efx_farch_filter_build()
2281 FRF_CZ_TMFT_VLAN_ID, spec->data[0]); in efx_farch_filter_build()
2282 data3 = is_wild | spec->dmaq_id << 1; in efx_farch_filter_build()
2290 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; in efx_farch_filter_build()
2296 if (left->type != right->type || in efx_farch_filter_equal()
2297 memcmp(left->data, right->data, sizeof(left->data))) in efx_farch_filter_equal()
2300 if (left->flags & EFX_FILTER_FLAG_TX && in efx_farch_filter_equal()
2301 left->dmaq_id != right->dmaq_id) in efx_farch_filter_equal()
2313 * accept user-provided IDs.
2340 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2348 range = efx_farch_filter_type_match_pri[spec->type]; in efx_farch_filter_make_id()
2349 if (!(spec->flags & EFX_FILTER_FLAG_RX)) in efx_farch_filter_make_id()
2373 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_rx_id_limit()
2374 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; in efx_farch_filter_get_rx_id_limit()
2379 if (state->table[table_id].size != 0) in efx_farch_filter_get_rx_id_limit()
2381 state->table[table_id].size; in efx_farch_filter_get_rx_id_limit()
2382 } while (range--); in efx_farch_filter_get_rx_id_limit()
2391 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_insert()
2403 down_write(&state->lock); in efx_farch_filter_insert()
2405 table = &state->table[efx_farch_filter_spec_table_id(&spec)]; in efx_farch_filter_insert()
2406 if (table->size == 0) { in efx_farch_filter_insert()
2407 rc = -EINVAL; in efx_farch_filter_insert()
2411 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_filter_insert()
2413 table->search_limit[spec.type]); in efx_farch_filter_insert()
2415 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { in efx_farch_filter_insert()
2419 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); in efx_farch_filter_insert()
2420 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; in efx_farch_filter_insert()
2441 unsigned int max_rep_depth = table->search_limit[spec.type]; in efx_farch_filter_insert()
2446 unsigned int i = hash & (table->size - 1); in efx_farch_filter_insert()
2448 ins_index = -1; in efx_farch_filter_insert()
2452 if (!test_bit(i, table->used_bitmap)) { in efx_farch_filter_insert()
2456 &table->spec[i])) { in efx_farch_filter_insert()
2468 rc = -EBUSY; in efx_farch_filter_insert()
2471 rep_index = -1; in efx_farch_filter_insert()
2475 i = (i + incr) & (table->size - 1); in efx_farch_filter_insert()
2485 &table->spec[rep_index]; in efx_farch_filter_insert()
2487 if (spec.priority == saved_spec->priority && !replace_equal) { in efx_farch_filter_insert()
2488 rc = -EEXIST; in efx_farch_filter_insert()
2491 if (spec.priority < saved_spec->priority) { in efx_farch_filter_insert()
2492 rc = -EPERM; in efx_farch_filter_insert()
2495 if (saved_spec->priority == EFX_FILTER_PRI_AUTO || in efx_farch_filter_insert()
2496 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) in efx_farch_filter_insert()
2502 __set_bit(ins_index, table->used_bitmap); in efx_farch_filter_insert()
2503 ++table->used; in efx_farch_filter_insert()
2505 table->spec[ins_index] = spec; in efx_farch_filter_insert()
2507 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { in efx_farch_filter_insert()
2510 if (table->search_limit[spec.type] < depth) { in efx_farch_filter_insert()
2511 table->search_limit[spec.type] = depth; in efx_farch_filter_insert()
2519 table->offset + table->step * ins_index); in efx_farch_filter_insert()
2529 netif_vdbg(efx, hw, efx->net_dev, in efx_farch_filter_insert()
2535 up_write(&state->lock); in efx_farch_filter_insert()
2546 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); in efx_farch_filter_table_clear_entry()
2547 BUG_ON(table->offset == 0); /* can't clear MAC default filters */ in efx_farch_filter_table_clear_entry()
2549 __clear_bit(filter_idx, table->used_bitmap); in efx_farch_filter_table_clear_entry()
2550 --table->used; in efx_farch_filter_table_clear_entry()
2551 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); in efx_farch_filter_table_clear_entry()
2553 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); in efx_farch_filter_table_clear_entry()
2558 * unless the table has become completely empty - in in efx_farch_filter_table_clear_entry()
2561 if (unlikely(table->used == 0)) { in efx_farch_filter_table_clear_entry()
2562 memset(table->search_limit, 0, sizeof(table->search_limit)); in efx_farch_filter_table_clear_entry()
2563 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) in efx_farch_filter_table_clear_entry()
2575 struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; in efx_farch_filter_remove()
2577 if (!test_bit(filter_idx, table->used_bitmap) || in efx_farch_filter_remove()
2578 spec->priority != priority) in efx_farch_filter_remove()
2579 return -ENOENT; in efx_farch_filter_remove()
2581 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { in efx_farch_filter_remove()
2595 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_remove_safe()
2603 return -ENOENT; in efx_farch_filter_remove_safe()
2604 table = &state->table[table_id]; in efx_farch_filter_remove_safe()
2607 if (filter_idx >= table->size) in efx_farch_filter_remove_safe()
2608 return -ENOENT; in efx_farch_filter_remove_safe()
2609 down_write(&state->lock); in efx_farch_filter_remove_safe()
2612 up_write(&state->lock); in efx_farch_filter_remove_safe()
2621 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_safe()
2626 int rc = -ENOENT; in efx_farch_filter_get_safe()
2628 down_read(&state->lock); in efx_farch_filter_get_safe()
2633 table = &state->table[table_id]; in efx_farch_filter_get_safe()
2636 if (filter_idx >= table->size) in efx_farch_filter_get_safe()
2638 spec = &table->spec[filter_idx]; in efx_farch_filter_get_safe()
2640 if (test_bit(filter_idx, table->used_bitmap) && in efx_farch_filter_get_safe()
2641 spec->priority == priority) { in efx_farch_filter_get_safe()
2647 up_read(&state->lock); in efx_farch_filter_get_safe()
2656 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_clear()
2657 struct efx_farch_filter_table *table = &state->table[table_id]; in efx_farch_filter_table_clear()
2660 down_write(&state->lock); in efx_farch_filter_table_clear()
2661 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { in efx_farch_filter_table_clear()
2662 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) in efx_farch_filter_table_clear()
2666 up_write(&state->lock); in efx_farch_filter_table_clear()
2684 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_count_rx_used()
2690 down_read(&state->lock); in efx_farch_filter_count_rx_used()
2695 table = &state->table[table_id]; in efx_farch_filter_count_rx_used()
2696 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_count_rx_used()
2697 if (test_bit(filter_idx, table->used_bitmap) && in efx_farch_filter_count_rx_used()
2698 table->spec[filter_idx].priority == priority) in efx_farch_filter_count_rx_used()
2703 up_read(&state->lock); in efx_farch_filter_count_rx_used()
2712 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_get_rx_ids()
2718 down_read(&state->lock); in efx_farch_filter_get_rx_ids()
2723 table = &state->table[table_id]; in efx_farch_filter_get_rx_ids()
2724 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_get_rx_ids()
2725 if (test_bit(filter_idx, table->used_bitmap) && in efx_farch_filter_get_rx_ids()
2726 table->spec[filter_idx].priority == priority) { in efx_farch_filter_get_rx_ids()
2728 count = -EMSGSIZE; in efx_farch_filter_get_rx_ids()
2732 &table->spec[filter_idx], filter_idx); in efx_farch_filter_get_rx_ids()
2737 up_read(&state->lock); in efx_farch_filter_get_rx_ids()
2745 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_restore()
2751 down_write(&state->lock); in efx_farch_filter_table_restore()
2754 table = &state->table[table_id]; in efx_farch_filter_table_restore()
2757 if (table->step == 0) in efx_farch_filter_table_restore()
2760 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_table_restore()
2761 if (!test_bit(filter_idx, table->used_bitmap)) in efx_farch_filter_table_restore()
2763 efx_farch_filter_build(&filter, &table->spec[filter_idx]); in efx_farch_filter_table_restore()
2765 table->offset + table->step * filter_idx); in efx_farch_filter_table_restore()
2772 up_write(&state->lock); in efx_farch_filter_table_restore()
2777 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_table_remove()
2781 bitmap_free(state->table[table_id].used_bitmap); in efx_farch_filter_table_remove()
2782 vfree(state->table[table_id].spec); in efx_farch_filter_table_remove()
2795 return -ENOMEM; in efx_farch_filter_table_probe()
2796 efx->filter_state = state; in efx_farch_filter_table_probe()
2797 init_rwsem(&state->lock); in efx_farch_filter_table_probe()
2799 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_table_probe()
2800 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; in efx_farch_filter_table_probe()
2801 table->offset = FR_BZ_RX_FILTER_TBL0; in efx_farch_filter_table_probe()
2802 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; in efx_farch_filter_table_probe()
2803 table->step = FR_BZ_RX_FILTER_TBL0_STEP; in efx_farch_filter_table_probe()
2805 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; in efx_farch_filter_table_probe()
2806 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; in efx_farch_filter_table_probe()
2807 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; in efx_farch_filter_table_probe()
2808 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; in efx_farch_filter_table_probe()
2809 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; in efx_farch_filter_table_probe()
2811 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; in efx_farch_filter_table_probe()
2812 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; in efx_farch_filter_table_probe()
2813 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; in efx_farch_filter_table_probe()
2815 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; in efx_farch_filter_table_probe()
2816 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; in efx_farch_filter_table_probe()
2817 table->offset = FR_CZ_TX_MAC_FILTER_TBL0; in efx_farch_filter_table_probe()
2818 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; in efx_farch_filter_table_probe()
2819 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; in efx_farch_filter_table_probe()
2822 table = &state->table[table_id]; in efx_farch_filter_table_probe()
2823 if (table->size == 0) in efx_farch_filter_table_probe()
2825 table->used_bitmap = bitmap_zalloc(table->size, GFP_KERNEL); in efx_farch_filter_table_probe()
2826 if (!table->used_bitmap) in efx_farch_filter_table_probe()
2828 table->spec = vzalloc(array_size(sizeof(*table->spec), in efx_farch_filter_table_probe()
2829 table->size)); in efx_farch_filter_table_probe()
2830 if (!table->spec) in efx_farch_filter_table_probe()
2834 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; in efx_farch_filter_table_probe()
2835 if (table->size) { in efx_farch_filter_table_probe()
2841 spec = &table->spec[i]; in efx_farch_filter_table_probe()
2842 spec->type = EFX_FARCH_FILTER_UC_DEF + i; in efx_farch_filter_table_probe()
2844 __set_bit(i, table->used_bitmap); in efx_farch_filter_table_probe()
2854 return -ENOMEM; in efx_farch_filter_table_probe()
2860 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_update_rx_scatter()
2866 down_write(&state->lock); in efx_farch_filter_update_rx_scatter()
2871 table = &state->table[table_id]; in efx_farch_filter_update_rx_scatter()
2873 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { in efx_farch_filter_update_rx_scatter()
2874 if (!test_bit(filter_idx, table->used_bitmap) || in efx_farch_filter_update_rx_scatter()
2875 table->spec[filter_idx].dmaq_id >= in efx_farch_filter_update_rx_scatter()
2876 efx->n_rx_channels) in efx_farch_filter_update_rx_scatter()
2879 if (efx->rx_scatter) in efx_farch_filter_update_rx_scatter()
2880 table->spec[filter_idx].flags |= in efx_farch_filter_update_rx_scatter()
2883 table->spec[filter_idx].flags &= in efx_farch_filter_update_rx_scatter()
2890 efx_farch_filter_build(&filter, &table->spec[filter_idx]); in efx_farch_filter_update_rx_scatter()
2892 table->offset + table->step * filter_idx); in efx_farch_filter_update_rx_scatter()
2898 up_write(&state->lock); in efx_farch_filter_update_rx_scatter()
2906 struct efx_farch_filter_state *state = efx->filter_state; in efx_farch_filter_rfs_expire_one()
2911 down_write(&state->lock); in efx_farch_filter_rfs_expire_one()
2912 spin_lock_bh(&efx->rps_hash_lock); in efx_farch_filter_rfs_expire_one()
2913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; in efx_farch_filter_rfs_expire_one()
2914 if (test_bit(index, table->used_bitmap) && in efx_farch_filter_rfs_expire_one()
2915 table->spec[index].priority == EFX_FILTER_PRI_HINT) { in efx_farch_filter_rfs_expire_one()
2919 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); in efx_farch_filter_rfs_expire_one()
2920 if (!efx->rps_hash_table) { in efx_farch_filter_rfs_expire_one()
2931 arfs_id = rule->arfs_id; in efx_farch_filter_rfs_expire_one()
2937 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, in efx_farch_filter_rfs_expire_one()
2940 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; in efx_farch_filter_rfs_expire_one()
2947 spin_unlock_bh(&efx->rps_hash_lock); in efx_farch_filter_rfs_expire_one()
2948 up_write(&state->lock); in efx_farch_filter_rfs_expire_one()
2956 struct net_device *net_dev = efx->net_dev; in efx_farch_filter_sync_rx_mode()
2958 union efx_multicast_hash *mc_hash = &efx->multicast_hash; in efx_farch_filter_sync_rx_mode()
2967 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); in efx_farch_filter_sync_rx_mode()
2970 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { in efx_farch_filter_sync_rx_mode()
2975 crc = ether_crc_le(ETH_ALEN, ha->addr); in efx_farch_filter_sync_rx_mode()
2976 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); in efx_farch_filter_sync_rx_mode()