1 /* 2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet 3 * driver for Linux. 4 * 5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 /* 37 * This file should not be included directly. Include t4vf_common.h instead. 38 */ 39 40 #ifndef __CXGB4VF_ADAPTER_H__ 41 #define __CXGB4VF_ADAPTER_H__ 42 43 #include <linux/interrupt.h> 44 #include <linux/pci.h> 45 #include <linux/spinlock.h> 46 #include <linux/skbuff.h> 47 #include <linux/if_ether.h> 48 #include <linux/netdevice.h> 49 50 #include "../cxgb4/t4_hw.h" 51 52 /* 53 * Constants of the implementation. 54 */ 55 enum { 56 MAX_NPORTS = 1, /* max # of "ports" */ 57 MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */ 58 MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS, 59 60 /* 61 * MSI-X interrupt index usage. 62 */ 63 MSIX_FW = 0, /* MSI-X index for firmware Q */ 64 MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */ 65 MSIX_EXTRAS = 1, 66 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, 67 68 /* 69 * The maximum number of Ingress and Egress Queues is determined by 70 * the maximum number of "Queue Sets" which we support plus any 71 * ancillary queues. Each "Queue Set" requires one Ingress Queue 72 * for RX Packet Ingress Event notifications and two Egress Queues for 73 * a Free List and an Ethernet TX list. 74 */ 75 INGQ_EXTRAS = 2, /* firmware event queue and */ 76 /* forwarded interrupts */ 77 MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS, 78 MAX_EGRQ = MAX_ETH_QSETS*2, 79 }; 80 81 /* 82 * Forward structure definition references. 83 */ 84 struct adapter; 85 struct sge_eth_rxq; 86 struct sge_rspq; 87 88 /* 89 * Per-"port" information. This is really per-Virtual Interface information 90 * but the use of the "port" nomanclature makes it easier to go back and forth 91 * between the PF and VF drivers ... 92 */ 93 struct port_info { 94 struct adapter *adapter; /* our adapter */ 95 u16 viid; /* virtual interface ID */ 96 s16 xact_addr_filt; /* index of our MAC address filter */ 97 u16 rss_size; /* size of VI's RSS table slice */ 98 u8 pidx; /* index into adapter port[] */ 99 s8 mdio_addr; 100 u8 port_type; /* firmware port type */ 101 u8 mod_type; /* firmware module type */ 102 u8 port_id; /* physical port ID */ 103 u8 nqsets; /* # of "Queue Sets" */ 104 u8 first_qset; /* index of first "Queue Set" */ 105 struct link_config link_cfg; /* physical port configuration */ 106 }; 107 108 /* 109 * Scatter Gather Engine resources for the "adapter". Our ingress and egress 110 * queues are organized into "Queue Sets" with one ingress and one egress 111 * queue per Queue Set. These Queue Sets are aportionable between the "ports" 112 * (Virtual Interfaces). One extra ingress queue is used to receive 113 * asynchronous messages from the firmware. Note that the "Queue IDs" that we 114 * use here are really "Relative Queue IDs" which are returned as part of the 115 * firmware command to allocate queues. These queue IDs are relative to the 116 * absolute Queue ID base of the section of the Queue ID space allocated to 117 * the PF/VF. 118 */ 119 120 /* 121 * SGE free-list queue state. 122 */ 123 struct rx_sw_desc; 124 struct sge_fl { 125 unsigned int avail; /* # of available RX buffers */ 126 unsigned int pend_cred; /* new buffers since last FL DB ring */ 127 unsigned int cidx; /* consumer index */ 128 unsigned int pidx; /* producer index */ 129 unsigned long alloc_failed; /* # of buffer allocation failures */ 130 unsigned long large_alloc_failed; 131 unsigned long starving; /* # of times FL was found starving */ 132 133 /* 134 * Write-once/infrequently fields. 135 * ------------------------------- 136 */ 137 138 unsigned int cntxt_id; /* SGE relative QID for the free list */ 139 unsigned int abs_id; /* SGE absolute QID for the free list */ 140 unsigned int size; /* capacity of free list */ 141 struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ 142 __be64 *desc; /* address of HW RX descriptor ring */ 143 dma_addr_t addr; /* PCI bus address of hardware ring */ 144 void __iomem *bar2_addr; /* address of BAR2 Queue registers */ 145 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ 146 }; 147 148 /* 149 * An ingress packet gather list. 150 */ 151 struct pkt_gl { 152 struct page_frag frags[MAX_SKB_FRAGS]; 153 void *va; /* virtual address of first byte */ 154 unsigned int nfrags; /* # of fragments */ 155 unsigned int tot_len; /* total length of fragments */ 156 }; 157 158 typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *, 159 const struct pkt_gl *); 160 161 /* 162 * State for an SGE Response Queue. 163 */ 164 struct sge_rspq { 165 struct napi_struct napi; /* NAPI scheduling control */ 166 const __be64 *cur_desc; /* current descriptor in queue */ 167 unsigned int cidx; /* consumer index */ 168 u8 gen; /* current generation bit */ 169 u8 next_intr_params; /* holdoff params for next interrupt */ 170 int offset; /* offset into current FL buffer */ 171 172 unsigned int unhandled_irqs; /* bogus interrupts */ 173 174 /* 175 * Write-once/infrequently fields. 176 * ------------------------------- 177 */ 178 179 u8 intr_params; /* interrupt holdoff parameters */ 180 u8 pktcnt_idx; /* interrupt packet threshold */ 181 u8 idx; /* queue index within its group */ 182 u16 cntxt_id; /* SGE rel QID for the response Q */ 183 u16 abs_id; /* SGE abs QID for the response Q */ 184 __be64 *desc; /* address of hardware response ring */ 185 dma_addr_t phys_addr; /* PCI bus address of ring */ 186 void __iomem *bar2_addr; /* address of BAR2 Queue registers */ 187 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ 188 unsigned int iqe_len; /* entry size */ 189 unsigned int size; /* capcity of response Q */ 190 struct adapter *adapter; /* our adapter */ 191 struct net_device *netdev; /* associated net device */ 192 rspq_handler_t handler; /* the handler for this response Q */ 193 }; 194 195 /* 196 * Ethernet queue statistics 197 */ 198 struct sge_eth_stats { 199 unsigned long pkts; /* # of ethernet packets */ 200 unsigned long lro_pkts; /* # of LRO super packets */ 201 unsigned long lro_merged; /* # of wire packets merged by LRO */ 202 unsigned long rx_cso; /* # of Rx checksum offloads */ 203 unsigned long vlan_ex; /* # of Rx VLAN extractions */ 204 unsigned long rx_drops; /* # of packets dropped due to no mem */ 205 }; 206 207 /* 208 * State for an Ethernet Receive Queue. 209 */ 210 struct sge_eth_rxq { 211 struct sge_rspq rspq; /* Response Queue */ 212 struct sge_fl fl; /* Free List */ 213 struct sge_eth_stats stats; /* receive statistics */ 214 }; 215 216 /* 217 * SGE Transmit Queue state. This contains all of the resources associated 218 * with the hardware status of a TX Queue which is a circular ring of hardware 219 * TX Descriptors. For convenience, it also contains a pointer to a parallel 220 * "Software Descriptor" array but we don't know anything about it here other 221 * than its type name. 222 */ 223 struct tx_desc { 224 /* 225 * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the 226 * hardware: Sizes, Producer and Consumer indices, etc. 227 */ 228 __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)]; 229 }; 230 struct tx_sw_desc; 231 struct sge_txq { 232 unsigned int in_use; /* # of in-use TX descriptors */ 233 unsigned int size; /* # of descriptors */ 234 unsigned int cidx; /* SW consumer index */ 235 unsigned int pidx; /* producer index */ 236 unsigned long stops; /* # of times queue has been stopped */ 237 unsigned long restarts; /* # of queue restarts */ 238 239 /* 240 * Write-once/infrequently fields. 241 * ------------------------------- 242 */ 243 244 unsigned int cntxt_id; /* SGE relative QID for the TX Q */ 245 unsigned int abs_id; /* SGE absolute QID for the TX Q */ 246 struct tx_desc *desc; /* address of HW TX descriptor ring */ 247 struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ 248 struct sge_qstat *stat; /* queue status entry */ 249 dma_addr_t phys_addr; /* PCI bus address of hardware ring */ 250 void __iomem *bar2_addr; /* address of BAR2 Queue registers */ 251 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ 252 }; 253 254 /* 255 * State for an Ethernet Transmit Queue. 256 */ 257 struct sge_eth_txq { 258 struct sge_txq q; /* SGE TX Queue */ 259 struct netdev_queue *txq; /* associated netdev TX queue */ 260 unsigned long tso; /* # of TSO requests */ 261 unsigned long tx_cso; /* # of TX checksum offloads */ 262 unsigned long vlan_ins; /* # of TX VLAN insertions */ 263 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ 264 }; 265 266 /* 267 * The complete set of Scatter/Gather Engine resources. 268 */ 269 struct sge { 270 /* 271 * Our "Queue Sets" ... 272 */ 273 struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; 274 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 275 276 /* 277 * Extra ingress queues for asynchronous firmware events and 278 * forwarded interrupts (when in MSI mode). 279 */ 280 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 281 282 struct sge_rspq intrq ____cacheline_aligned_in_smp; 283 spinlock_t intrq_lock; 284 285 /* 286 * State for managing "starving Free Lists" -- Free Lists which have 287 * fallen below a certain threshold of buffers available to the 288 * hardware and attempts to refill them up to that threshold have 289 * failed. We have a regular "slow tick" timer process which will 290 * make periodic attempts to refill these starving Free Lists ... 291 */ 292 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 293 struct timer_list rx_timer; 294 295 /* 296 * State for cleaning up completed TX descriptors. 297 */ 298 struct timer_list tx_timer; 299 300 /* 301 * Write-once/infrequently fields. 302 * ------------------------------- 303 */ 304 305 u16 max_ethqsets; /* # of available Ethernet queue sets */ 306 u16 ethqsets; /* # of active Ethernet queue sets */ 307 u16 ethtxq_rover; /* Tx queue to clean up next */ 308 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ 309 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ 310 311 /* Decoded Adapter Parameters. 312 */ 313 u32 fl_pg_order; /* large page allocation size */ 314 u32 stat_len; /* length of status page at ring end */ 315 u32 pktshift; /* padding between CPL & packet data */ 316 u32 fl_align; /* response queue message alignment */ 317 u32 fl_starve_thres; /* Free List starvation threshold */ 318 319 /* 320 * Reverse maps from Absolute Queue IDs to associated queue pointers. 321 * The absolute Queue IDs are in a compact range which start at a 322 * [potentially large] Base Queue ID. We perform the reverse map by 323 * first converting the Absolute Queue ID into a Relative Queue ID by 324 * subtracting off the Base Queue ID and then use a Relative Queue ID 325 * indexed table to get the pointer to the corresponding software 326 * queue structure. 327 */ 328 unsigned int egr_base; 329 unsigned int ingr_base; 330 void *egr_map[MAX_EGRQ]; 331 struct sge_rspq *ingr_map[MAX_INGQ]; 332 }; 333 334 /* 335 * Utility macros to convert Absolute- to Relative-Queue indices and Egress- 336 * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide 337 * pointers to Ingress- and Egress-Queues can be used as both L- and R-values 338 */ 339 #define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base)) 340 #define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base)) 341 342 #define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)]) 343 #define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)]) 344 345 /* 346 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer). 347 */ 348 #define for_each_ethrxq(sge, iter) \ 349 for (iter = 0; iter < (sge)->ethqsets; iter++) 350 351 struct hash_mac_addr { 352 struct list_head list; 353 u8 addr[ETH_ALEN]; 354 }; 355 356 struct mbox_list { 357 struct list_head list; 358 }; 359 360 /* 361 * Per-"adapter" (Virtual Function) information. 362 */ 363 struct adapter { 364 /* PCI resources */ 365 void __iomem *regs; 366 void __iomem *bar2; 367 struct pci_dev *pdev; 368 struct device *pdev_dev; 369 370 /* "adapter" resources */ 371 unsigned long registered_device_map; 372 unsigned long open_device_map; 373 unsigned long flags; 374 struct adapter_params params; 375 376 /* queue and interrupt resources */ 377 struct { 378 unsigned short vec; 379 char desc[22]; 380 } msix_info[MSIX_ENTRIES]; 381 struct sge sge; 382 383 /* Linux network device resources */ 384 struct net_device *port[MAX_NPORTS]; 385 const char *name; 386 unsigned int msg_enable; 387 388 /* debugfs resources */ 389 struct dentry *debugfs_root; 390 391 /* various locks */ 392 spinlock_t stats_lock; 393 394 /* lock for mailbox cmd list */ 395 spinlock_t mbox_lock; 396 struct mbox_list mlist; 397 398 /* support for mailbox command/reply logging */ 399 #define T4VF_OS_LOG_MBOX_CMDS 256 400 struct mbox_cmd_log *mbox_log; 401 402 /* list of MAC addresses in MPS Hash */ 403 struct list_head mac_hlist; 404 }; 405 406 enum { /* adapter flags */ 407 FULL_INIT_DONE = (1UL << 0), 408 USING_MSI = (1UL << 1), 409 USING_MSIX = (1UL << 2), 410 QUEUES_BOUND = (1UL << 3), 411 ROOT_NO_RELAXED_ORDERING = (1UL << 4), 412 }; 413 414 /* 415 * The following register read/write routine definitions are required by 416 * the common code. 417 */ 418 419 /** 420 * t4_read_reg - read a HW register 421 * @adapter: the adapter 422 * @reg_addr: the register address 423 * 424 * Returns the 32-bit value of the given HW register. 425 */ 426 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) 427 { 428 return readl(adapter->regs + reg_addr); 429 } 430 431 /** 432 * t4_write_reg - write a HW register 433 * @adapter: the adapter 434 * @reg_addr: the register address 435 * @val: the value to write 436 * 437 * Write a 32-bit value into the given HW register. 438 */ 439 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) 440 { 441 writel(val, adapter->regs + reg_addr); 442 } 443 444 #ifndef readq 445 static inline u64 readq(const volatile void __iomem *addr) 446 { 447 return readl(addr) + ((u64)readl(addr + 4) << 32); 448 } 449 450 static inline void writeq(u64 val, volatile void __iomem *addr) 451 { 452 writel(val, addr); 453 writel(val >> 32, addr + 4); 454 } 455 #endif 456 457 /** 458 * t4_read_reg64 - read a 64-bit HW register 459 * @adapter: the adapter 460 * @reg_addr: the register address 461 * 462 * Returns the 64-bit value of the given HW register. 463 */ 464 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) 465 { 466 return readq(adapter->regs + reg_addr); 467 } 468 469 /** 470 * t4_write_reg64 - write a 64-bit HW register 471 * @adapter: the adapter 472 * @reg_addr: the register address 473 * @val: the value to write 474 * 475 * Write a 64-bit value into the given HW register. 476 */ 477 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, 478 u64 val) 479 { 480 writeq(val, adapter->regs + reg_addr); 481 } 482 483 /** 484 * port_name - return the string name of a port 485 * @adapter: the adapter 486 * @pidx: the port index 487 * 488 * Return the string name of the selected port. 489 */ 490 static inline const char *port_name(struct adapter *adapter, int pidx) 491 { 492 return adapter->port[pidx]->name; 493 } 494 495 /** 496 * t4_os_set_hw_addr - store a port's MAC address in SW 497 * @adapter: the adapter 498 * @pidx: the port index 499 * @hw_addr: the Ethernet address 500 * 501 * Store the Ethernet address of the given port in SW. Called by the common 502 * code when it retrieves a port's Ethernet address from EEPROM. 503 */ 504 static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, 505 u8 hw_addr[]) 506 { 507 memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); 508 } 509 510 /** 511 * netdev2pinfo - return the port_info structure associated with a net_device 512 * @dev: the netdev 513 * 514 * Return the struct port_info associated with a net_device 515 */ 516 static inline struct port_info *netdev2pinfo(const struct net_device *dev) 517 { 518 return netdev_priv(dev); 519 } 520 521 /** 522 * adap2pinfo - return the port_info of a port 523 * @adap: the adapter 524 * @pidx: the port index 525 * 526 * Return the port_info structure for the adapter. 527 */ 528 static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx) 529 { 530 return netdev_priv(adapter->port[pidx]); 531 } 532 533 /** 534 * netdev2adap - return the adapter structure associated with a net_device 535 * @dev: the netdev 536 * 537 * Return the struct adapter associated with a net_device 538 */ 539 static inline struct adapter *netdev2adap(const struct net_device *dev) 540 { 541 return netdev2pinfo(dev)->adapter; 542 } 543 544 /* 545 * OS "Callback" function declarations. These are functions that the OS code 546 * is "contracted" to provide for the common code. 547 */ 548 void t4vf_os_link_changed(struct adapter *, int, int); 549 void t4vf_os_portmod_changed(struct adapter *, int); 550 551 /* 552 * SGE function prototype declarations. 553 */ 554 int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool, 555 struct net_device *, int, 556 struct sge_fl *, rspq_handler_t); 557 int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, 558 struct net_device *, struct netdev_queue *, 559 unsigned int); 560 void t4vf_free_sge_resources(struct adapter *); 561 562 int t4vf_eth_xmit(struct sk_buff *, struct net_device *); 563 int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, 564 const struct pkt_gl *); 565 566 irq_handler_t t4vf_intr_handler(struct adapter *); 567 irqreturn_t t4vf_sge_intr_msix(int, void *); 568 569 int t4vf_sge_init(struct adapter *); 570 void t4vf_sge_start(struct adapter *); 571 void t4vf_sge_stop(struct adapter *); 572 573 #endif /* __CXGB4VF_ADAPTER_H__ */ 574