1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #ifndef __CXGB4_H__ 36 #define __CXGB4_H__ 37 38 #include "t4_hw.h" 39 40 #include <linux/bitops.h> 41 #include <linux/cache.h> 42 #include <linux/interrupt.h> 43 #include <linux/list.h> 44 #include <linux/netdevice.h> 45 #include <linux/pci.h> 46 #include <linux/spinlock.h> 47 #include <linux/timer.h> 48 #include <linux/vmalloc.h> 49 #include <asm/io.h> 50 #include "cxgb4_uld.h" 51 52 #define T4FW_VERSION_MAJOR 0x01 53 #define T4FW_VERSION_MINOR 0x09 54 #define T4FW_VERSION_MICRO 0x17 55 #define T4FW_VERSION_BUILD 0x00 56 57 #define T5FW_VERSION_MAJOR 0x01 58 #define T5FW_VERSION_MINOR 0x09 59 #define T5FW_VERSION_MICRO 0x17 60 #define T5FW_VERSION_BUILD 0x00 61 62 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 63 64 enum { 65 MAX_NPORTS = 4, /* max # of ports */ 66 SERNUM_LEN = 24, /* Serial # length */ 67 EC_LEN = 16, /* E/C length */ 68 ID_LEN = 16, /* ID length */ 69 }; 70 71 enum { 72 MEM_EDC0, 73 MEM_EDC1, 74 MEM_MC, 75 MEM_MC0 = MEM_MC, 76 MEM_MC1 77 }; 78 79 enum { 80 MEMWIN0_APERTURE = 2048, 81 MEMWIN0_BASE = 0x1b800, 82 MEMWIN1_APERTURE = 32768, 83 MEMWIN1_BASE = 0x28000, 84 MEMWIN1_BASE_T5 = 0x52000, 85 MEMWIN2_APERTURE = 65536, 86 MEMWIN2_BASE = 0x30000, 87 MEMWIN2_BASE_T5 = 0x54000, 88 }; 89 90 enum dev_master { 91 MASTER_CANT, 92 MASTER_MAY, 93 MASTER_MUST 94 }; 95 96 enum dev_state { 97 DEV_STATE_UNINIT, 98 DEV_STATE_INIT, 99 DEV_STATE_ERR 100 }; 101 102 enum { 103 PAUSE_RX = 1 << 0, 104 PAUSE_TX = 1 << 1, 105 PAUSE_AUTONEG = 1 << 2 106 }; 107 108 struct port_stats { 109 u64 tx_octets; /* total # of octets in good frames */ 110 u64 tx_frames; /* all good frames */ 111 u64 tx_bcast_frames; /* all broadcast frames */ 112 u64 tx_mcast_frames; /* all multicast frames */ 113 u64 tx_ucast_frames; /* all unicast frames */ 114 u64 tx_error_frames; /* all error frames */ 115 116 u64 tx_frames_64; /* # of Tx frames in a particular range */ 117 u64 tx_frames_65_127; 118 u64 tx_frames_128_255; 119 u64 tx_frames_256_511; 120 u64 tx_frames_512_1023; 121 u64 tx_frames_1024_1518; 122 u64 tx_frames_1519_max; 123 124 u64 tx_drop; /* # of dropped Tx frames */ 125 u64 tx_pause; /* # of transmitted pause frames */ 126 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ 127 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ 128 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ 129 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ 130 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ 131 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ 132 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ 133 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ 134 135 u64 rx_octets; /* total # of octets in good frames */ 136 u64 rx_frames; /* all good frames */ 137 u64 rx_bcast_frames; /* all broadcast frames */ 138 u64 rx_mcast_frames; /* all multicast frames */ 139 u64 rx_ucast_frames; /* all unicast frames */ 140 u64 rx_too_long; /* # of frames exceeding MTU */ 141 u64 rx_jabber; /* # of jabber frames */ 142 u64 rx_fcs_err; /* # of received frames with bad FCS */ 143 u64 rx_len_err; /* # of received frames with length error */ 144 u64 rx_symbol_err; /* symbol errors */ 145 u64 rx_runt; /* # of short frames */ 146 147 u64 rx_frames_64; /* # of Rx frames in a particular range */ 148 u64 rx_frames_65_127; 149 u64 rx_frames_128_255; 150 u64 rx_frames_256_511; 151 u64 rx_frames_512_1023; 152 u64 rx_frames_1024_1518; 153 u64 rx_frames_1519_max; 154 155 u64 rx_pause; /* # of received pause frames */ 156 u64 rx_ppp0; /* # of received PPP prio 0 frames */ 157 u64 rx_ppp1; /* # of received PPP prio 1 frames */ 158 u64 rx_ppp2; /* # of received PPP prio 2 frames */ 159 u64 rx_ppp3; /* # of received PPP prio 3 frames */ 160 u64 rx_ppp4; /* # of received PPP prio 4 frames */ 161 u64 rx_ppp5; /* # of received PPP prio 5 frames */ 162 u64 rx_ppp6; /* # of received PPP prio 6 frames */ 163 u64 rx_ppp7; /* # of received PPP prio 7 frames */ 164 165 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ 166 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ 167 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ 168 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ 169 u64 rx_trunc0; /* buffer-group 0 truncated packets */ 170 u64 rx_trunc1; /* buffer-group 1 truncated packets */ 171 u64 rx_trunc2; /* buffer-group 2 truncated packets */ 172 u64 rx_trunc3; /* buffer-group 3 truncated packets */ 173 }; 174 175 struct lb_port_stats { 176 u64 octets; 177 u64 frames; 178 u64 bcast_frames; 179 u64 mcast_frames; 180 u64 ucast_frames; 181 u64 error_frames; 182 183 u64 frames_64; 184 u64 frames_65_127; 185 u64 frames_128_255; 186 u64 frames_256_511; 187 u64 frames_512_1023; 188 u64 frames_1024_1518; 189 u64 frames_1519_max; 190 191 u64 drop; 192 193 u64 ovflow0; 194 u64 ovflow1; 195 u64 ovflow2; 196 u64 ovflow3; 197 u64 trunc0; 198 u64 trunc1; 199 u64 trunc2; 200 u64 trunc3; 201 }; 202 203 struct tp_tcp_stats { 204 u32 tcpOutRsts; 205 u64 tcpInSegs; 206 u64 tcpOutSegs; 207 u64 tcpRetransSegs; 208 }; 209 210 struct tp_err_stats { 211 u32 macInErrs[4]; 212 u32 hdrInErrs[4]; 213 u32 tcpInErrs[4]; 214 u32 tnlCongDrops[4]; 215 u32 ofldChanDrops[4]; 216 u32 tnlTxDrops[4]; 217 u32 ofldVlanDrops[4]; 218 u32 tcp6InErrs[4]; 219 u32 ofldNoNeigh; 220 u32 ofldCongDefer; 221 }; 222 223 struct tp_params { 224 unsigned int ntxchan; /* # of Tx channels */ 225 unsigned int tre; /* log2 of core clocks per TP tick */ 226 unsigned short tx_modq_map; /* TX modulation scheduler queue to */ 227 /* channel map */ 228 229 uint32_t dack_re; /* DACK timer resolution */ 230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 231 232 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ 233 u32 ingress_config; /* cached TP_INGRESS_CONFIG */ 234 235 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a 236 * subset of the set of fields which may be present in the Compressed 237 * Filter Tuple portion of filters and TCP TCB connections. The 238 * fields which are present are controlled by the TP_VLAN_PRI_MAP. 239 * Since a variable number of fields may or may not be present, their 240 * shifted field positions within the Compressed Filter Tuple may 241 * vary, or not even be present if the field isn't selected in 242 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various 243 * places we store their offsets here, or a -1 if the field isn't 244 * present. 245 */ 246 int vlan_shift; 247 int vnic_shift; 248 int port_shift; 249 int protocol_shift; 250 }; 251 252 struct vpd_params { 253 unsigned int cclk; 254 u8 ec[EC_LEN + 1]; 255 u8 sn[SERNUM_LEN + 1]; 256 u8 id[ID_LEN + 1]; 257 }; 258 259 struct pci_params { 260 unsigned char speed; 261 unsigned char width; 262 }; 263 264 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) 265 #define CHELSIO_CHIP_FPGA 0x100 266 #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) 267 #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) 268 269 #define CHELSIO_T4 0x4 270 #define CHELSIO_T5 0x5 271 272 enum chip_type { 273 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), 274 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), 275 T4_FIRST_REV = T4_A1, 276 T4_LAST_REV = T4_A2, 277 278 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), 279 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), 280 T5_FIRST_REV = T5_A0, 281 T5_LAST_REV = T5_A1, 282 }; 283 284 struct adapter_params { 285 struct tp_params tp; 286 struct vpd_params vpd; 287 struct pci_params pci; 288 289 unsigned int sf_size; /* serial flash size in bytes */ 290 unsigned int sf_nsec; /* # of flash sectors */ 291 unsigned int sf_fw_start; /* start of FW image in flash */ 292 293 unsigned int fw_vers; 294 unsigned int tp_vers; 295 u8 api_vers[7]; 296 297 unsigned short mtus[NMTUS]; 298 unsigned short a_wnd[NCCTRL_WIN]; 299 unsigned short b_wnd[NCCTRL_WIN]; 300 301 unsigned char nports; /* # of ethernet ports */ 302 unsigned char portvec; 303 enum chip_type chip; /* chip code */ 304 unsigned char offload; 305 306 unsigned char bypass; 307 308 unsigned int ofldq_wr_cred; 309 }; 310 311 #include "t4fw_api.h" 312 313 #define FW_VERSION(chip) ( \ 314 FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ 315 FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ 316 FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ 317 FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) 318 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) 319 320 struct fw_info { 321 u8 chip; 322 char *fs_name; 323 char *fw_mod_name; 324 struct fw_hdr fw_hdr; 325 }; 326 327 328 struct trace_params { 329 u32 data[TRACE_LEN / 4]; 330 u32 mask[TRACE_LEN / 4]; 331 unsigned short snap_len; 332 unsigned short min_len; 333 unsigned char skip_ofst; 334 unsigned char skip_len; 335 unsigned char invert; 336 unsigned char port; 337 }; 338 339 struct link_config { 340 unsigned short supported; /* link capabilities */ 341 unsigned short advertising; /* advertised capabilities */ 342 unsigned short requested_speed; /* speed user has requested */ 343 unsigned short speed; /* actual link speed */ 344 unsigned char requested_fc; /* flow control user has requested */ 345 unsigned char fc; /* actual link flow control */ 346 unsigned char autoneg; /* autonegotiating? */ 347 unsigned char link_ok; /* link up? */ 348 }; 349 350 #define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) 351 352 enum { 353 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ 354 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ 355 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 356 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 357 }; 358 359 enum { 360 MAX_EGRQ = 128, /* max # of egress queues, including FLs */ 361 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ 362 }; 363 364 struct adapter; 365 struct sge_rspq; 366 367 struct port_info { 368 struct adapter *adapter; 369 u16 viid; 370 s16 xact_addr_filt; /* index of exact MAC address filter */ 371 u16 rss_size; /* size of VI's RSS table slice */ 372 s8 mdio_addr; 373 u8 port_type; 374 u8 mod_type; 375 u8 port_id; 376 u8 tx_chan; 377 u8 lport; /* associated offload logical port */ 378 u8 nqsets; /* # of qsets */ 379 u8 first_qset; /* index of first qset */ 380 u8 rss_mode; 381 struct link_config link_cfg; 382 u16 *rss; 383 }; 384 385 struct dentry; 386 struct work_struct; 387 388 enum { /* adapter flags */ 389 FULL_INIT_DONE = (1 << 0), 390 DEV_ENABLED = (1 << 1), 391 USING_MSI = (1 << 2), 392 USING_MSIX = (1 << 3), 393 FW_OK = (1 << 4), 394 RSS_TNLALLLOOKUP = (1 << 5), 395 USING_SOFT_PARAMS = (1 << 6), 396 MASTER_PF = (1 << 7), 397 FW_OFLD_CONN = (1 << 9), 398 }; 399 400 struct rx_sw_desc; 401 402 struct sge_fl { /* SGE free-buffer queue state */ 403 unsigned int avail; /* # of available Rx buffers */ 404 unsigned int pend_cred; /* new buffers since last FL DB ring */ 405 unsigned int cidx; /* consumer index */ 406 unsigned int pidx; /* producer index */ 407 unsigned long alloc_failed; /* # of times buffer allocation failed */ 408 unsigned long large_alloc_failed; 409 unsigned long starving; 410 /* RO fields */ 411 unsigned int cntxt_id; /* SGE context id for the free list */ 412 unsigned int size; /* capacity of free list */ 413 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 414 __be64 *desc; /* address of HW Rx descriptor ring */ 415 dma_addr_t addr; /* bus address of HW ring start */ 416 }; 417 418 /* A packet gather list */ 419 struct pkt_gl { 420 struct page_frag frags[MAX_SKB_FRAGS]; 421 void *va; /* virtual address of first byte */ 422 unsigned int nfrags; /* # of fragments */ 423 unsigned int tot_len; /* total length of fragments */ 424 }; 425 426 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, 427 const struct pkt_gl *gl); 428 429 struct sge_rspq { /* state for an SGE response queue */ 430 struct napi_struct napi; 431 const __be64 *cur_desc; /* current descriptor in queue */ 432 unsigned int cidx; /* consumer index */ 433 u8 gen; /* current generation bit */ 434 u8 intr_params; /* interrupt holdoff parameters */ 435 u8 next_intr_params; /* holdoff params for next interrupt */ 436 u8 pktcnt_idx; /* interrupt packet threshold */ 437 u8 uld; /* ULD handling this queue */ 438 u8 idx; /* queue index within its group */ 439 int offset; /* offset into current Rx buffer */ 440 u16 cntxt_id; /* SGE context id for the response q */ 441 u16 abs_id; /* absolute SGE id for the response q */ 442 __be64 *desc; /* address of HW response ring */ 443 dma_addr_t phys_addr; /* physical address of the ring */ 444 unsigned int iqe_len; /* entry size */ 445 unsigned int size; /* capacity of response queue */ 446 struct adapter *adap; 447 struct net_device *netdev; /* associated net device */ 448 rspq_handler_t handler; 449 }; 450 451 struct sge_eth_stats { /* Ethernet queue statistics */ 452 unsigned long pkts; /* # of ethernet packets */ 453 unsigned long lro_pkts; /* # of LRO super packets */ 454 unsigned long lro_merged; /* # of wire packets merged by LRO */ 455 unsigned long rx_cso; /* # of Rx checksum offloads */ 456 unsigned long vlan_ex; /* # of Rx VLAN extractions */ 457 unsigned long rx_drops; /* # of packets dropped due to no mem */ 458 }; 459 460 struct sge_eth_rxq { /* SW Ethernet Rx queue */ 461 struct sge_rspq rspq; 462 struct sge_fl fl; 463 struct sge_eth_stats stats; 464 } ____cacheline_aligned_in_smp; 465 466 struct sge_ofld_stats { /* offload queue statistics */ 467 unsigned long pkts; /* # of packets */ 468 unsigned long imm; /* # of immediate-data packets */ 469 unsigned long an; /* # of asynchronous notifications */ 470 unsigned long nomem; /* # of responses deferred due to no mem */ 471 }; 472 473 struct sge_ofld_rxq { /* SW offload Rx queue */ 474 struct sge_rspq rspq; 475 struct sge_fl fl; 476 struct sge_ofld_stats stats; 477 } ____cacheline_aligned_in_smp; 478 479 struct tx_desc { 480 __be64 flit[8]; 481 }; 482 483 struct tx_sw_desc; 484 485 struct sge_txq { 486 unsigned int in_use; /* # of in-use Tx descriptors */ 487 unsigned int size; /* # of descriptors */ 488 unsigned int cidx; /* SW consumer index */ 489 unsigned int pidx; /* producer index */ 490 unsigned long stops; /* # of times q has been stopped */ 491 unsigned long restarts; /* # of queue restarts */ 492 unsigned int cntxt_id; /* SGE context id for the Tx q */ 493 struct tx_desc *desc; /* address of HW Tx descriptor ring */ 494 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ 495 struct sge_qstat *stat; /* queue status entry */ 496 dma_addr_t phys_addr; /* physical address of the ring */ 497 spinlock_t db_lock; 498 int db_disabled; 499 unsigned short db_pidx; 500 u64 udb; 501 }; 502 503 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ 504 struct sge_txq q; 505 struct netdev_queue *txq; /* associated netdev TX queue */ 506 unsigned long tso; /* # of TSO requests */ 507 unsigned long tx_cso; /* # of Tx checksum offloads */ 508 unsigned long vlan_ins; /* # of Tx VLAN insertions */ 509 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ 510 } ____cacheline_aligned_in_smp; 511 512 struct sge_ofld_txq { /* state for an SGE offload Tx queue */ 513 struct sge_txq q; 514 struct adapter *adap; 515 struct sk_buff_head sendq; /* list of backpressured packets */ 516 struct tasklet_struct qresume_tsk; /* restarts the queue */ 517 u8 full; /* the Tx ring is full */ 518 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ 519 } ____cacheline_aligned_in_smp; 520 521 struct sge_ctrl_txq { /* state for an SGE control Tx queue */ 522 struct sge_txq q; 523 struct adapter *adap; 524 struct sk_buff_head sendq; /* list of backpressured packets */ 525 struct tasklet_struct qresume_tsk; /* restarts the queue */ 526 u8 full; /* the Tx ring is full */ 527 } ____cacheline_aligned_in_smp; 528 529 struct sge { 530 struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; 531 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; 532 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; 533 534 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 535 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; 536 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 537 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 538 539 struct sge_rspq intrq ____cacheline_aligned_in_smp; 540 spinlock_t intrq_lock; 541 542 u16 max_ethqsets; /* # of available Ethernet queue sets */ 543 u16 ethqsets; /* # of active Ethernet queue sets */ 544 u16 ethtxq_rover; /* Tx queue to clean up next */ 545 u16 ofldqsets; /* # of active offload queue sets */ 546 u16 rdmaqs; /* # of available RDMA Rx queues */ 547 u16 ofld_rxq[MAX_OFLD_QSETS]; 548 u16 rdma_rxq[NCHAN]; 549 u16 timer_val[SGE_NTIMERS]; 550 u8 counter_val[SGE_NCOUNTERS]; 551 u32 fl_pg_order; /* large page allocation size */ 552 u32 stat_len; /* length of status page at ring end */ 553 u32 pktshift; /* padding between CPL & packet data */ 554 u32 fl_align; /* response queue message alignment */ 555 u32 fl_starve_thres; /* Free List starvation threshold */ 556 unsigned int starve_thres; 557 u8 idma_state[2]; 558 unsigned int egr_start; 559 unsigned int ingr_start; 560 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 561 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 562 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 563 DECLARE_BITMAP(txq_maperr, MAX_EGRQ); 564 struct timer_list rx_timer; /* refills starving FLs */ 565 struct timer_list tx_timer; /* checks Tx queues */ 566 }; 567 568 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 569 #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) 570 #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 571 572 struct l2t_data; 573 574 #ifdef CONFIG_PCI_IOV 575 576 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial 577 * Configuration initialization for T5 only has SR-IOV functionality enabled 578 * on PF0-3 in order to simplify everything. 579 */ 580 #define NUM_OF_PF_WITH_SRIOV 4 581 582 #endif 583 584 struct adapter { 585 void __iomem *regs; 586 void __iomem *bar2; 587 struct pci_dev *pdev; 588 struct device *pdev_dev; 589 unsigned int mbox; 590 unsigned int fn; 591 unsigned int flags; 592 enum chip_type chip; 593 594 int msg_enable; 595 596 struct adapter_params params; 597 struct cxgb4_virt_res vres; 598 unsigned int swintr; 599 600 unsigned int wol; 601 602 struct { 603 unsigned short vec; 604 char desc[IFNAMSIZ + 10]; 605 } msix_info[MAX_INGQ + 1]; 606 607 struct sge sge; 608 609 struct net_device *port[MAX_NPORTS]; 610 u8 chan_map[NCHAN]; /* channel -> port map */ 611 612 u32 filter_mode; 613 unsigned int l2t_start; 614 unsigned int l2t_end; 615 struct l2t_data *l2t; 616 void *uld_handle[CXGB4_ULD_MAX]; 617 struct list_head list_node; 618 struct list_head rcu_node; 619 620 struct tid_info tids; 621 void **tid_release_head; 622 spinlock_t tid_release_lock; 623 struct work_struct tid_release_task; 624 struct work_struct db_full_task; 625 struct work_struct db_drop_task; 626 bool tid_release_task_busy; 627 628 struct dentry *debugfs_root; 629 630 spinlock_t stats_lock; 631 }; 632 633 /* Defined bit width of user definable filter tuples 634 */ 635 #define ETHTYPE_BITWIDTH 16 636 #define FRAG_BITWIDTH 1 637 #define MACIDX_BITWIDTH 9 638 #define FCOE_BITWIDTH 1 639 #define IPORT_BITWIDTH 3 640 #define MATCHTYPE_BITWIDTH 3 641 #define PROTO_BITWIDTH 8 642 #define TOS_BITWIDTH 8 643 #define PF_BITWIDTH 8 644 #define VF_BITWIDTH 8 645 #define IVLAN_BITWIDTH 16 646 #define OVLAN_BITWIDTH 16 647 648 /* Filter matching rules. These consist of a set of ingress packet field 649 * (value, mask) tuples. The associated ingress packet field matches the 650 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field 651 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule 652 * matches an ingress packet when all of the individual individual field 653 * matching rules are true. 654 * 655 * Partial field masks are always valid, however, while it may be easy to 656 * understand their meanings for some fields (e.g. IP address to match a 657 * subnet), for others making sensible partial masks is less intuitive (e.g. 658 * MPS match type) ... 659 * 660 * Most of the following data structures are modeled on T4 capabilities. 661 * Drivers for earlier chips use the subsets which make sense for those chips. 662 * We really need to come up with a hardware-independent mechanism to 663 * represent hardware filter capabilities ... 664 */ 665 struct ch_filter_tuple { 666 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP 667 * register selects which of these fields will participate in the 668 * filter match rules -- up to a maximum of 36 bits. Because 669 * TP_VLAN_PRI_MAP is a global register, all filters must use the same 670 * set of fields. 671 */ 672 uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ 673 uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ 674 uint32_t ivlan_vld:1; /* inner VLAN valid */ 675 uint32_t ovlan_vld:1; /* outer VLAN valid */ 676 uint32_t pfvf_vld:1; /* PF/VF valid */ 677 uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ 678 uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ 679 uint32_t iport:IPORT_BITWIDTH; /* ingress port */ 680 uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ 681 uint32_t proto:PROTO_BITWIDTH; /* protocol type */ 682 uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ 683 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ 684 uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ 685 uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ 686 uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ 687 688 /* Uncompressed header matching field rules. These are always 689 * available for field rules. 690 */ 691 uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ 692 uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ 693 uint16_t lport; /* local port */ 694 uint16_t fport; /* foreign port */ 695 }; 696 697 /* A filter ioctl command. 698 */ 699 struct ch_filter_specification { 700 /* Administrative fields for filter. 701 */ 702 uint32_t hitcnts:1; /* count filter hits in TCB */ 703 uint32_t prio:1; /* filter has priority over active/server */ 704 705 /* Fundamental filter typing. This is the one element of filter 706 * matching that doesn't exist as a (value, mask) tuple. 707 */ 708 uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ 709 710 /* Packet dispatch information. Ingress packets which match the 711 * filter rules will be dropped, passed to the host or switched back 712 * out as egress packets. 713 */ 714 uint32_t action:2; /* drop, pass, switch */ 715 716 uint32_t rpttid:1; /* report TID in RSS hash field */ 717 718 uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ 719 uint32_t iq:10; /* ingress queue */ 720 721 uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */ 722 uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ 723 /* 1 => TCB contains IQ ID */ 724 725 /* Switch proxy/rewrite fields. An ingress packet which matches a 726 * filter with "switch" set will be looped back out as an egress 727 * packet -- potentially with some Ethernet header rewriting. 728 */ 729 uint32_t eport:2; /* egress port to switch packet out */ 730 uint32_t newdmac:1; /* rewrite destination MAC address */ 731 uint32_t newsmac:1; /* rewrite source MAC address */ 732 uint32_t newvlan:2; /* rewrite VLAN Tag */ 733 uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ 734 uint8_t smac[ETH_ALEN]; /* new source MAC address */ 735 uint16_t vlan; /* VLAN Tag to insert */ 736 737 /* Filter rule value/mask pairs. 738 */ 739 struct ch_filter_tuple val; 740 struct ch_filter_tuple mask; 741 }; 742 743 enum { 744 FILTER_PASS = 0, /* default */ 745 FILTER_DROP, 746 FILTER_SWITCH 747 }; 748 749 enum { 750 VLAN_NOCHANGE = 0, /* default */ 751 VLAN_REMOVE, 752 VLAN_INSERT, 753 VLAN_REWRITE 754 }; 755 756 static inline int is_t5(enum chip_type chip) 757 { 758 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; 759 } 760 761 static inline int is_t4(enum chip_type chip) 762 { 763 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; 764 } 765 766 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 767 { 768 return readl(adap->regs + reg_addr); 769 } 770 771 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) 772 { 773 writel(val, adap->regs + reg_addr); 774 } 775 776 #ifndef readq 777 static inline u64 readq(const volatile void __iomem *addr) 778 { 779 return readl(addr) + ((u64)readl(addr + 4) << 32); 780 } 781 782 static inline void writeq(u64 val, volatile void __iomem *addr) 783 { 784 writel(val, addr); 785 writel(val >> 32, addr + 4); 786 } 787 #endif 788 789 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) 790 { 791 return readq(adap->regs + reg_addr); 792 } 793 794 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) 795 { 796 writeq(val, adap->regs + reg_addr); 797 } 798 799 /** 800 * netdev2pinfo - return the port_info structure associated with a net_device 801 * @dev: the netdev 802 * 803 * Return the struct port_info associated with a net_device 804 */ 805 static inline struct port_info *netdev2pinfo(const struct net_device *dev) 806 { 807 return netdev_priv(dev); 808 } 809 810 /** 811 * adap2pinfo - return the port_info of a port 812 * @adap: the adapter 813 * @idx: the port index 814 * 815 * Return the port_info structure for the port of the given index. 816 */ 817 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) 818 { 819 return netdev_priv(adap->port[idx]); 820 } 821 822 /** 823 * netdev2adap - return the adapter structure associated with a net_device 824 * @dev: the netdev 825 * 826 * Return the struct adapter associated with a net_device 827 */ 828 static inline struct adapter *netdev2adap(const struct net_device *dev) 829 { 830 return netdev2pinfo(dev)->adapter; 831 } 832 833 void t4_os_portmod_changed(const struct adapter *adap, int port_id); 834 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 835 836 void *t4_alloc_mem(size_t size); 837 838 void t4_free_sge_resources(struct adapter *adap); 839 irq_handler_t t4_intr_handler(struct adapter *adap); 840 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); 841 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 842 const struct pkt_gl *gl); 843 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); 844 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); 845 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 846 struct net_device *dev, int intr_idx, 847 struct sge_fl *fl, rspq_handler_t hnd); 848 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 849 struct net_device *dev, struct netdev_queue *netdevq, 850 unsigned int iqid); 851 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 852 struct net_device *dev, unsigned int iqid, 853 unsigned int cmplqid); 854 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, 855 struct net_device *dev, unsigned int iqid); 856 irqreturn_t t4_sge_intr_msix(int irq, void *cookie); 857 int t4_sge_init(struct adapter *adap); 858 void t4_sge_start(struct adapter *adap); 859 void t4_sge_stop(struct adapter *adap); 860 extern int dbfifo_int_thresh; 861 862 #define for_each_port(adapter, iter) \ 863 for (iter = 0; iter < (adapter)->params.nports; ++iter) 864 865 static inline int is_bypass(struct adapter *adap) 866 { 867 return adap->params.bypass; 868 } 869 870 static inline int is_bypass_device(int device) 871 { 872 /* this should be set based upon device capabilities */ 873 switch (device) { 874 case 0x440b: 875 case 0x440c: 876 return 1; 877 default: 878 return 0; 879 } 880 } 881 882 static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 883 { 884 return adap->params.vpd.cclk / 1000; 885 } 886 887 static inline unsigned int us_to_core_ticks(const struct adapter *adap, 888 unsigned int us) 889 { 890 return (us * adap->params.vpd.cclk) / 1000; 891 } 892 893 static inline unsigned int core_ticks_to_us(const struct adapter *adapter, 894 unsigned int ticks) 895 { 896 /* add Core Clock / 2 to round ticks to nearest uS */ 897 return ((ticks * 1000 + adapter->params.vpd.cclk/2) / 898 adapter->params.vpd.cclk); 899 } 900 901 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, 902 u32 val); 903 904 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 905 void *rpl, bool sleep_ok); 906 907 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, 908 int size, void *rpl) 909 { 910 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); 911 } 912 913 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, 914 int size, void *rpl) 915 { 916 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); 917 } 918 919 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 920 unsigned int data_reg, const u32 *vals, 921 unsigned int nregs, unsigned int start_idx); 922 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 923 unsigned int data_reg, u32 *vals, unsigned int nregs, 924 unsigned int start_idx); 925 926 struct fw_filter_wr; 927 928 void t4_intr_enable(struct adapter *adapter); 929 void t4_intr_disable(struct adapter *adapter); 930 int t4_slow_intr_handler(struct adapter *adapter); 931 932 int t4_wait_dev_ready(struct adapter *adap); 933 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 934 struct link_config *lc); 935 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 936 int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, 937 __be32 *buf); 938 int t4_seeprom_wp(struct adapter *adapter, bool enable); 939 int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 940 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 941 unsigned int t4_flash_cfg_addr(struct adapter *adapter); 942 int t4_get_fw_version(struct adapter *adapter, u32 *vers); 943 int t4_get_tp_version(struct adapter *adapter, u32 *vers); 944 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 945 const u8 *fw_data, unsigned int fw_size, 946 struct fw_hdr *card_fw, enum dev_state state, int *reset); 947 int t4_prep_adapter(struct adapter *adapter); 948 int t4_init_tp_params(struct adapter *adap); 949 int t4_filter_field_shift(const struct adapter *adap, int filter_sel); 950 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 951 void t4_fatal_err(struct adapter *adapter); 952 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 953 int start, int n, const u16 *rspq, unsigned int nrspq); 954 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 955 unsigned int flags); 956 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 957 u64 *parity); 958 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 959 u64 *parity); 960 961 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 962 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 963 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 964 unsigned int mask, unsigned int val); 965 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 966 struct tp_tcp_stats *v6); 967 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 968 const unsigned short *alpha, const unsigned short *beta); 969 970 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); 971 972 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 973 const u8 *addr); 974 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 975 u64 mask0, u64 mask1, unsigned int crc, bool enable); 976 977 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 978 enum dev_master master, enum dev_state *state); 979 int t4_fw_bye(struct adapter *adap, unsigned int mbox); 980 int t4_early_init(struct adapter *adap, unsigned int mbox); 981 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); 982 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 983 unsigned int cache_line_size); 984 int t4_fw_initialize(struct adapter *adap, unsigned int mbox); 985 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 986 unsigned int vf, unsigned int nparams, const u32 *params, 987 u32 *val); 988 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 989 unsigned int vf, unsigned int nparams, const u32 *params, 990 const u32 *val); 991 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 992 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 993 unsigned int rxqi, unsigned int rxq, unsigned int tc, 994 unsigned int vi, unsigned int cmask, unsigned int pmask, 995 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); 996 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 997 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 998 unsigned int *rss_size); 999 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 1000 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1001 bool sleep_ok); 1002 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 1003 unsigned int viid, bool free, unsigned int naddr, 1004 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); 1005 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 1006 int idx, const u8 *addr, bool persist, bool add_smt); 1007 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 1008 bool ucast, u64 vec, bool sleep_ok); 1009 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 1010 bool rx_en, bool tx_en); 1011 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 1012 unsigned int nblinks); 1013 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1014 unsigned int mmd, unsigned int reg, u16 *valp); 1015 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1016 unsigned int mmd, unsigned int reg, u16 val); 1017 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1018 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1019 unsigned int fl0id, unsigned int fl1id); 1020 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1021 unsigned int vf, unsigned int eqid); 1022 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1023 unsigned int vf, unsigned int eqid); 1024 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1025 unsigned int vf, unsigned int eqid); 1026 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); 1027 void t4_db_full(struct adapter *adapter); 1028 void t4_db_dropped(struct adapter *adapter); 1029 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); 1030 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 1031 u32 addr, u32 val); 1032 #endif /* __CXGB4_H__ */ 1033