1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Linux network driver for QLogic BR-series Converged Network Adapter. 4 */ 5 /* 6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 7 * Copyright (c) 2014-2015 QLogic Corporation 8 * All rights reserved 9 * www.qlogic.com 10 */ 11 #ifndef __BNAD_H__ 12 #define __BNAD_H__ 13 14 #include <linux/rtnetlink.h> 15 #include <linux/workqueue.h> 16 #include <linux/ipv6.h> 17 #include <linux/etherdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/firmware.h> 20 #include <linux/if_vlan.h> 21 22 /* Fix for IA64 */ 23 #include <asm/checksum.h> 24 #include <net/ip6_checksum.h> 25 26 #include <net/ip.h> 27 #include <net/tcp.h> 28 29 #include "bna.h" 30 31 #define BNAD_TXQ_DEPTH 2048 32 #define BNAD_RXQ_DEPTH 2048 33 34 #define BNAD_MAX_TX 1 35 #define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */ 36 #define BNAD_TXQ_NUM 1 37 38 #define BNAD_MAX_RX 1 39 #define BNAD_MAX_RXP_PER_RX 16 40 #define BNAD_MAX_RXQ_PER_RXP 2 41 42 /* 43 * Control structure pointed to ccb->ctrl, which 44 * determines the NAPI / LRO behavior CCB 45 * There is 1:1 corres. between ccb & ctrl 46 */ 47 struct bnad_rx_ctrl { 48 struct bna_ccb *ccb; 49 struct bnad *bnad; 50 unsigned long flags; 51 struct napi_struct napi; 52 u64 rx_intr_ctr; 53 u64 rx_poll_ctr; 54 u64 rx_schedule; 55 u64 rx_keep_poll; 56 u64 rx_complete; 57 }; 58 59 #define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC 60 61 /* 62 * GLOBAL #defines (CONSTANTS) 63 */ 64 #define BNAD_NAME "bna" 65 #define BNAD_NAME_LEN 64 66 67 #define BNAD_VERSION "3.2.25.1" 68 69 #define BNAD_MAILBOX_MSIX_INDEX 0 70 #define BNAD_MAILBOX_MSIX_VECTORS 1 71 #define BNAD_INTX_TX_IB_BITMASK 0x1 72 #define BNAD_INTX_RX_IB_BITMASK 0x2 73 74 #define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ 75 #define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ 76 77 #define BNAD_IOCETH_TIMEOUT 10000 78 79 #define BNAD_MIN_Q_DEPTH 512 80 #define BNAD_MAX_RXQ_DEPTH 16384 81 #define BNAD_MAX_TXQ_DEPTH 2048 82 83 #define BNAD_JUMBO_MTU 9000 84 85 #define BNAD_NETIF_WAKE_THRESHOLD 8 86 87 #define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3 88 89 /* Bit positions for tcb->flags */ 90 #define BNAD_TXQ_FREE_SENT 0 91 #define BNAD_TXQ_TX_STARTED 1 92 93 /* Bit positions for rcb->flags */ 94 #define BNAD_RXQ_STARTED 0 95 #define BNAD_RXQ_POST_OK 1 96 97 /* Resource limits */ 98 #define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) 99 #define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) 100 101 #define BNAD_FRAME_SIZE(_mtu) \ 102 (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN) 103 104 /* 105 * DATA STRUCTURES 106 */ 107 108 /* enums */ 109 enum bnad_intr_source { 110 BNAD_INTR_TX = 1, 111 BNAD_INTR_RX = 2 112 }; 113 114 enum bnad_link_state { 115 BNAD_LS_DOWN = 0, 116 BNAD_LS_UP = 1 117 }; 118 119 struct bnad_iocmd_comp { 120 struct bnad *bnad; 121 struct completion comp; 122 int comp_status; 123 }; 124 125 struct bnad_completion { 126 struct completion ioc_comp; 127 struct completion ucast_comp; 128 struct completion mcast_comp; 129 struct completion tx_comp; 130 struct completion rx_comp; 131 struct completion stats_comp; 132 struct completion enet_comp; 133 struct completion mtu_comp; 134 135 u8 ioc_comp_status; 136 u8 ucast_comp_status; 137 u8 mcast_comp_status; 138 u8 tx_comp_status; 139 u8 rx_comp_status; 140 u8 stats_comp_status; 141 u8 port_comp_status; 142 u8 mtu_comp_status; 143 }; 144 145 /* Tx Rx Control Stats */ 146 struct bnad_drv_stats { 147 u64 netif_queue_stop; 148 u64 netif_queue_wakeup; 149 u64 netif_queue_stopped; 150 u64 tso4; 151 u64 tso6; 152 u64 tso_err; 153 u64 tcpcsum_offload; 154 u64 udpcsum_offload; 155 u64 csum_help; 156 u64 tx_skb_too_short; 157 u64 tx_skb_stopping; 158 u64 tx_skb_max_vectors; 159 u64 tx_skb_mss_too_long; 160 u64 tx_skb_tso_too_short; 161 u64 tx_skb_tso_prepare; 162 u64 tx_skb_non_tso_too_long; 163 u64 tx_skb_tcp_hdr; 164 u64 tx_skb_udp_hdr; 165 u64 tx_skb_csum_err; 166 u64 tx_skb_headlen_too_long; 167 u64 tx_skb_headlen_zero; 168 u64 tx_skb_frag_zero; 169 u64 tx_skb_len_mismatch; 170 u64 tx_skb_map_failed; 171 172 u64 hw_stats_updates; 173 u64 netif_rx_dropped; 174 175 u64 link_toggle; 176 u64 cee_toggle; 177 178 u64 rxp_info_alloc_failed; 179 u64 mbox_intr_disabled; 180 u64 mbox_intr_enabled; 181 u64 tx_unmap_q_alloc_failed; 182 u64 rx_unmap_q_alloc_failed; 183 184 u64 rxbuf_alloc_failed; 185 u64 rxbuf_map_failed; 186 }; 187 188 /* Complete driver stats */ 189 struct bnad_stats { 190 struct bnad_drv_stats drv_stats; 191 struct bna_stats *bna_stats; 192 }; 193 194 /* Tx / Rx Resources */ 195 struct bnad_tx_res_info { 196 struct bna_res_info res_info[BNA_TX_RES_T_MAX]; 197 }; 198 199 struct bnad_rx_res_info { 200 struct bna_res_info res_info[BNA_RX_RES_T_MAX]; 201 }; 202 203 struct bnad_tx_info { 204 struct bna_tx *tx; /* 1:1 between tx_info & tx */ 205 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; 206 u32 tx_id; 207 struct delayed_work tx_cleanup_work; 208 } ____cacheline_aligned; 209 210 struct bnad_rx_info { 211 struct bna_rx *rx; /* 1:1 between rx_info & rx */ 212 213 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; 214 u32 rx_id; 215 struct work_struct rx_cleanup_work; 216 } ____cacheline_aligned; 217 218 struct bnad_tx_vector { 219 DEFINE_DMA_UNMAP_ADDR(dma_addr); 220 DEFINE_DMA_UNMAP_LEN(dma_len); 221 }; 222 223 struct bnad_tx_unmap { 224 struct sk_buff *skb; 225 u32 nvecs; 226 struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI]; 227 }; 228 229 struct bnad_rx_vector { 230 DEFINE_DMA_UNMAP_ADDR(dma_addr); 231 u32 len; 232 }; 233 234 struct bnad_rx_unmap { 235 struct page *page; 236 struct sk_buff *skb; 237 struct bnad_rx_vector vector; 238 u32 page_offset; 239 }; 240 241 enum bnad_rxbuf_type { 242 BNAD_RXBUF_NONE = 0, 243 BNAD_RXBUF_SK_BUFF = 1, 244 BNAD_RXBUF_PAGE = 2, 245 BNAD_RXBUF_MULTI_BUFF = 3 246 }; 247 248 #define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF) 249 #define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF) 250 251 struct bnad_rx_unmap_q { 252 int reuse_pi; 253 int alloc_order; 254 u32 map_size; 255 enum bnad_rxbuf_type type; 256 struct bnad_rx_unmap unmap[0] ____cacheline_aligned; 257 }; 258 259 #define BNAD_PCI_DEV_IS_CAT2(_bnad) \ 260 ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2) 261 262 /* Bit mask values for bnad->cfg_flags */ 263 #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ 264 #define BNAD_CF_PROMISC 0x02 265 #define BNAD_CF_ALLMULTI 0x04 266 #define BNAD_CF_DEFAULT 0x08 267 #define BNAD_CF_MSIX 0x10 /* If in MSIx mode */ 268 269 /* Defines for run_flags bit-mask */ 270 /* Set, tested & cleared using xxx_bit() functions */ 271 /* Values indicated bit positions */ 272 #define BNAD_RF_CEE_RUNNING 0 273 #define BNAD_RF_MTU_SET 1 274 #define BNAD_RF_MBOX_IRQ_DISABLED 2 275 #define BNAD_RF_NETDEV_REGISTERED 3 276 #define BNAD_RF_DIM_TIMER_RUNNING 4 277 #define BNAD_RF_STATS_TIMER_RUNNING 5 278 #define BNAD_RF_TX_PRIO_SET 6 279 280 struct bnad { 281 struct net_device *netdev; 282 u32 id; 283 284 /* Data path */ 285 struct bnad_tx_info tx_info[BNAD_MAX_TX]; 286 struct bnad_rx_info rx_info[BNAD_MAX_RX]; 287 288 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 289 /* 290 * These q numbers are global only because 291 * they are used to calculate MSIx vectors. 292 * Actually the exact # of queues are per Tx/Rx 293 * object. 294 */ 295 u32 num_tx; 296 u32 num_rx; 297 u32 num_txq_per_tx; 298 u32 num_rxp_per_rx; 299 300 u32 txq_depth; 301 u32 rxq_depth; 302 303 u8 tx_coalescing_timeo; 304 u8 rx_coalescing_timeo; 305 306 struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned; 307 struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned; 308 309 void __iomem *bar0; /* BAR0 address */ 310 311 struct bna bna; 312 313 u32 cfg_flags; 314 unsigned long run_flags; 315 316 struct pci_dev *pcidev; 317 u64 mmio_start; 318 u64 mmio_len; 319 320 u32 msix_num; 321 struct msix_entry *msix_table; 322 323 struct mutex conf_mutex; 324 spinlock_t bna_lock ____cacheline_aligned; 325 326 /* Timers */ 327 struct timer_list ioc_timer; 328 struct timer_list dim_timer; 329 struct timer_list stats_timer; 330 331 /* Control path resources, memory & irq */ 332 struct bna_res_info res_info[BNA_RES_T_MAX]; 333 struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX]; 334 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX]; 335 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX]; 336 337 struct bnad_completion bnad_completions; 338 339 /* Burnt in MAC address */ 340 u8 perm_addr[ETH_ALEN]; 341 342 struct workqueue_struct *work_q; 343 344 /* Statistics */ 345 struct bnad_stats stats; 346 347 struct bnad_diag *diag; 348 349 char adapter_name[BNAD_NAME_LEN]; 350 char port_name[BNAD_NAME_LEN]; 351 char mbox_irq_name[BNAD_NAME_LEN]; 352 char wq_name[BNAD_NAME_LEN]; 353 354 /* debugfs specific data */ 355 char *regdata; 356 u32 reglen; 357 struct dentry *bnad_dentry_files[5]; 358 struct dentry *port_debugfs_root; 359 }; 360 361 struct bnad_drvinfo { 362 struct bfa_ioc_attr ioc_attr; 363 struct bfa_cee_attr cee_attr; 364 struct bfa_flash_attr flash_attr; 365 u32 cee_status; 366 u32 flash_status; 367 }; 368 369 /* 370 * EXTERN VARIABLES 371 */ 372 extern const struct firmware *bfi_fw; 373 374 /* 375 * EXTERN PROTOTYPES 376 */ 377 u32 *cna_get_firmware_buf(struct pci_dev *pdev); 378 /* Netdev entry point prototypes */ 379 void bnad_set_rx_mode(struct net_device *netdev); 380 struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev); 381 int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr); 382 int bnad_enable_default_bcast(struct bnad *bnad); 383 void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); 384 void bnad_set_ethtool_ops(struct net_device *netdev); 385 void bnad_cb_completion(void *arg, enum bfa_status status); 386 387 /* Configuration & setup */ 388 void bnad_tx_coalescing_timeo_set(struct bnad *bnad); 389 void bnad_rx_coalescing_timeo_set(struct bnad *bnad); 390 391 int bnad_setup_rx(struct bnad *bnad, u32 rx_id); 392 int bnad_setup_tx(struct bnad *bnad, u32 tx_id); 393 void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); 394 void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); 395 396 /* Timer start/stop protos */ 397 void bnad_dim_timer_start(struct bnad *bnad); 398 399 /* Statistics */ 400 void bnad_netdev_qstats_fill(struct bnad *bnad, 401 struct rtnl_link_stats64 *stats); 402 void bnad_netdev_hwstats_fill(struct bnad *bnad, 403 struct rtnl_link_stats64 *stats); 404 405 /* Debugfs */ 406 void bnad_debugfs_init(struct bnad *bnad); 407 void bnad_debugfs_uninit(struct bnad *bnad); 408 409 /* MACROS */ 410 /* To set & get the stats counters */ 411 #define BNAD_UPDATE_CTR(_bnad, _ctr) \ 412 (((_bnad)->stats.drv_stats._ctr)++) 413 414 #define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr) 415 416 #define bnad_enable_rx_irq_unsafe(_ccb) \ 417 { \ 418 if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\ 419 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ 420 (_ccb)->rx_coalescing_timeo); \ 421 bna_ib_ack((_ccb)->i_dbell, 0); \ 422 } \ 423 } 424 425 #endif /* __BNAD_H__ */ 426