1 /* 2 * Linux network driver for Brocade Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 /* 14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. 15 * All rights reserved 16 * www.brocade.com 17 */ 18 #ifndef __BNA_H__ 19 #define __BNA_H__ 20 21 #include "bfa_defs.h" 22 #include "bfa_ioc.h" 23 #include "bfi_enet.h" 24 #include "bna_types.h" 25 26 extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; 27 28 /* Macros and constants */ 29 30 #define BNA_IOC_TIMER_FREQ 200 31 32 /* Log string size */ 33 #define BNA_MESSAGE_SIZE 256 34 35 #define bna_is_small_rxq(_id) ((_id) & 0x1) 36 37 #define BNA_MAC_IS_EQUAL(_mac1, _mac2) \ 38 (!memcmp((_mac1), (_mac2), sizeof(mac_t))) 39 40 #define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0) 41 42 #define BNA_TO_POWER_OF_2(x) \ 43 do { \ 44 int _shift = 0; \ 45 while ((x) && (x) != 1) { \ 46 (x) >>= 1; \ 47 _shift++; \ 48 } \ 49 (x) <<= _shift; \ 50 } while (0) 51 52 #define BNA_TO_POWER_OF_2_HIGH(x) \ 53 do { \ 54 int n = 1; \ 55 while (n < (x)) \ 56 n <<= 1; \ 57 (x) = n; \ 58 } while (0) 59 60 /* 61 * input : _addr-> os dma addr in host endian format, 62 * output : _bna_dma_addr-> pointer to hw dma addr 63 */ 64 #define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \ 65 do { \ 66 u64 tmp_addr = \ 67 cpu_to_be64((u64)(_addr)); \ 68 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \ 69 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \ 70 } while (0) 71 72 /* 73 * input : _bna_dma_addr-> pointer to hw dma addr 74 * output : _addr-> os dma addr in host endian format 75 */ 76 #define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \ 77 do { \ 78 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \ 79 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \ 80 } while (0) 81 82 #define containing_rec(addr, type, field) \ 83 ((type *)((unsigned char *)(addr) - \ 84 (unsigned char *)(&((type *)0)->field))) 85 86 #define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2) 87 88 /* TxQ element is 64 bytes */ 89 #define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6) 90 #define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6) 91 92 #define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ 93 { \ 94 unsigned int page_index; /* index within a page */ \ 95 void *page_addr; \ 96 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \ 97 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \ 98 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\ 99 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \ 100 } 101 102 /* RxQ element is 8 bytes */ 103 #define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3) 104 #define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3) 105 106 #define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ 107 { \ 108 unsigned int page_index; /* index within a page */ \ 109 void *page_addr; \ 110 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \ 111 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \ 112 page_addr = (_qpt_ptr)[((_qe_idx) >> \ 113 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \ 114 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \ 115 } 116 117 /* CQ element is 16 bytes */ 118 #define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4) 119 #define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4) 120 121 #define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ 122 { \ 123 unsigned int page_index; /* index within a page */ \ 124 void *page_addr; \ 125 \ 126 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \ 127 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \ 128 page_addr = (_qpt_ptr)[((_qe_idx) >> \ 129 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \ 130 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\ 131 } 132 133 #define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \ 134 (&((_cast *)(_q_base))[(_qe_idx)]) 135 136 #define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx)) 137 138 #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ 139 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) 140 141 #define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth) 142 143 #define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ 144 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) 145 146 #define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \ 147 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \ 148 ((_q_depth) - 1)) 149 150 #define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \ 151 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \ 152 (_q_depth - 1)) 153 154 #define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index) 155 156 #define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index) 157 158 #define BNA_Q_PI_ADD(_q_ptr, _num) \ 159 (_q_ptr)->q.producer_index = \ 160 (((_q_ptr)->q.producer_index + (_num)) & \ 161 ((_q_ptr)->q.q_depth - 1)) 162 163 #define BNA_Q_CI_ADD(_q_ptr, _num) \ 164 (_q_ptr)->q.consumer_index = \ 165 (((_q_ptr)->q.consumer_index + (_num)) \ 166 & ((_q_ptr)->q.q_depth - 1)) 167 168 #define BNA_Q_FREE_COUNT(_q_ptr) \ 169 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth)) 170 171 #define BNA_Q_IN_USE_COUNT(_q_ptr) \ 172 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth)) 173 174 #define BNA_LARGE_PKT_SIZE 1000 175 176 #define BNA_UPDATE_PKT_CNT(_pkt, _len) \ 177 do { \ 178 if ((_len) > BNA_LARGE_PKT_SIZE) { \ 179 (_pkt)->large_pkt_cnt++; \ 180 } else { \ 181 (_pkt)->small_pkt_cnt++; \ 182 } \ 183 } while (0) 184 185 #define call_rxf_stop_cbfn(rxf) \ 186 do { \ 187 if ((rxf)->stop_cbfn) { \ 188 void (*cbfn)(struct bna_rx *); \ 189 struct bna_rx *cbarg; \ 190 cbfn = (rxf)->stop_cbfn; \ 191 cbarg = (rxf)->stop_cbarg; \ 192 (rxf)->stop_cbfn = NULL; \ 193 (rxf)->stop_cbarg = NULL; \ 194 cbfn(cbarg); \ 195 } \ 196 } while (0) 197 198 #define call_rxf_start_cbfn(rxf) \ 199 do { \ 200 if ((rxf)->start_cbfn) { \ 201 void (*cbfn)(struct bna_rx *); \ 202 struct bna_rx *cbarg; \ 203 cbfn = (rxf)->start_cbfn; \ 204 cbarg = (rxf)->start_cbarg; \ 205 (rxf)->start_cbfn = NULL; \ 206 (rxf)->start_cbarg = NULL; \ 207 cbfn(cbarg); \ 208 } \ 209 } while (0) 210 211 #define call_rxf_cam_fltr_cbfn(rxf) \ 212 do { \ 213 if ((rxf)->cam_fltr_cbfn) { \ 214 void (*cbfn)(struct bnad *, struct bna_rx *); \ 215 struct bnad *cbarg; \ 216 cbfn = (rxf)->cam_fltr_cbfn; \ 217 cbarg = (rxf)->cam_fltr_cbarg; \ 218 (rxf)->cam_fltr_cbfn = NULL; \ 219 (rxf)->cam_fltr_cbarg = NULL; \ 220 cbfn(cbarg, rxf->rx); \ 221 } \ 222 } while (0) 223 224 #define call_rxf_pause_cbfn(rxf) \ 225 do { \ 226 if ((rxf)->oper_state_cbfn) { \ 227 void (*cbfn)(struct bnad *, struct bna_rx *); \ 228 struct bnad *cbarg; \ 229 cbfn = (rxf)->oper_state_cbfn; \ 230 cbarg = (rxf)->oper_state_cbarg; \ 231 (rxf)->oper_state_cbfn = NULL; \ 232 (rxf)->oper_state_cbarg = NULL; \ 233 cbfn(cbarg, rxf->rx); \ 234 } \ 235 } while (0) 236 237 #define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf) 238 239 #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx)) 240 241 #define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx)) 242 243 #define xxx_enable(mode, bitmask, xxx) \ 244 do { \ 245 bitmask |= xxx; \ 246 mode |= xxx; \ 247 } while (0) 248 249 #define xxx_disable(mode, bitmask, xxx) \ 250 do { \ 251 bitmask |= xxx; \ 252 mode &= ~xxx; \ 253 } while (0) 254 255 #define xxx_inactive(mode, bitmask, xxx) \ 256 do { \ 257 bitmask &= ~xxx; \ 258 mode &= ~xxx; \ 259 } while (0) 260 261 #define is_promisc_enable(mode, bitmask) \ 262 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) 263 264 #define is_promisc_disable(mode, bitmask) \ 265 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) 266 267 #define promisc_enable(mode, bitmask) \ 268 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) 269 270 #define promisc_disable(mode, bitmask) \ 271 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) 272 273 #define promisc_inactive(mode, bitmask) \ 274 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC) 275 276 #define is_default_enable(mode, bitmask) \ 277 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) 278 279 #define is_default_disable(mode, bitmask) \ 280 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) 281 282 #define default_enable(mode, bitmask) \ 283 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) 284 285 #define default_disable(mode, bitmask) \ 286 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) 287 288 #define default_inactive(mode, bitmask) \ 289 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT) 290 291 #define is_allmulti_enable(mode, bitmask) \ 292 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) 293 294 #define is_allmulti_disable(mode, bitmask) \ 295 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) 296 297 #define allmulti_enable(mode, bitmask) \ 298 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) 299 300 #define allmulti_disable(mode, bitmask) \ 301 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) 302 303 #define allmulti_inactive(mode, bitmask) \ 304 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI) 305 306 #define GET_RXQS(rxp, q0, q1) do { \ 307 switch ((rxp)->type) { \ 308 case BNA_RXP_SINGLE: \ 309 (q0) = rxp->rxq.single.only; \ 310 (q1) = NULL; \ 311 break; \ 312 case BNA_RXP_SLR: \ 313 (q0) = rxp->rxq.slr.large; \ 314 (q1) = rxp->rxq.slr.small; \ 315 break; \ 316 case BNA_RXP_HDS: \ 317 (q0) = rxp->rxq.hds.data; \ 318 (q1) = rxp->rxq.hds.hdr; \ 319 break; \ 320 } \ 321 } while (0) 322 323 #define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask) 324 325 #define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask) 326 327 #define bna_tx_from_rid(_bna, _rid, _tx) \ 328 do { \ 329 struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \ 330 struct bna_tx *__tx; \ 331 struct list_head *qe; \ 332 _tx = NULL; \ 333 list_for_each(qe, &__tx_mod->tx_active_q) { \ 334 __tx = (struct bna_tx *)qe; \ 335 if (__tx->rid == (_rid)) { \ 336 (_tx) = __tx; \ 337 break; \ 338 } \ 339 } \ 340 } while (0) 341 342 #define bna_rx_from_rid(_bna, _rid, _rx) \ 343 do { \ 344 struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \ 345 struct bna_rx *__rx; \ 346 struct list_head *qe; \ 347 _rx = NULL; \ 348 list_for_each(qe, &__rx_mod->rx_active_q) { \ 349 __rx = (struct bna_rx *)qe; \ 350 if (__rx->rid == (_rid)) { \ 351 (_rx) = __rx; \ 352 break; \ 353 } \ 354 } \ 355 } while (0) 356 357 /* Inline functions */ 358 359 static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) 360 { 361 struct bna_mac *mac = NULL; 362 struct list_head *qe; 363 list_for_each(qe, q) { 364 if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) { 365 mac = (struct bna_mac *)qe; 366 break; 367 } 368 } 369 return mac; 370 } 371 372 #define bna_attr(_bna) (&(_bna)->ioceth.attr) 373 374 /* Function prototypes */ 375 376 /* BNA */ 377 378 /* FW response handlers */ 379 void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); 380 381 /* APIs for BNAD */ 382 void bna_res_req(struct bna_res_info *res_info); 383 void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info); 384 void bna_init(struct bna *bna, struct bnad *bnad, 385 struct bfa_pcidev *pcidev, 386 struct bna_res_info *res_info); 387 void bna_mod_init(struct bna *bna, struct bna_res_info *res_info); 388 void bna_uninit(struct bna *bna); 389 int bna_num_txq_set(struct bna *bna, int num_txq); 390 int bna_num_rxp_set(struct bna *bna, int num_rxp); 391 void bna_hw_stats_get(struct bna *bna); 392 393 /* APIs for RxF */ 394 struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod); 395 void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, 396 struct bna_mac *mac); 397 struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod); 398 void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, 399 struct bna_mac *mac); 400 struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); 401 void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, 402 struct bna_mcam_handle *handle); 403 404 /* MBOX */ 405 406 /* API for BNAD */ 407 void bna_mbox_handler(struct bna *bna, u32 intr_status); 408 409 /* ETHPORT */ 410 411 /* Callbacks for RX */ 412 void bna_ethport_cb_rx_started(struct bna_ethport *ethport); 413 void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); 414 415 /* TX MODULE AND TX */ 416 417 /* FW response handelrs */ 418 void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, 419 struct bfi_msgq_mhdr *msghdr); 420 void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, 421 struct bfi_msgq_mhdr *msghdr); 422 void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod); 423 424 /* APIs for BNA */ 425 void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, 426 struct bna_res_info *res_info); 427 void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod); 428 429 /* APIs for ENET */ 430 void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type); 431 void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type); 432 void bna_tx_mod_fail(struct bna_tx_mod *tx_mod); 433 434 /* APIs for BNAD */ 435 void bna_tx_res_req(int num_txq, int txq_depth, 436 struct bna_res_info *res_info); 437 struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad, 438 struct bna_tx_config *tx_cfg, 439 const struct bna_tx_event_cbfn *tx_cbfn, 440 struct bna_res_info *res_info, void *priv); 441 void bna_tx_destroy(struct bna_tx *tx); 442 void bna_tx_enable(struct bna_tx *tx); 443 void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 444 void (*cbfn)(void *, struct bna_tx *)); 445 void bna_tx_cleanup_complete(struct bna_tx *tx); 446 void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 447 448 /* RX MODULE, RX, RXF */ 449 450 /* FW response handlers */ 451 void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, 452 struct bfi_msgq_mhdr *msghdr); 453 void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, 454 struct bfi_msgq_mhdr *msghdr); 455 void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); 456 void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, 457 struct bfi_msgq_mhdr *msghdr); 458 void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, 459 struct bfi_msgq_mhdr *msghdr); 460 461 /* APIs for BNA */ 462 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, 463 struct bna_res_info *res_info); 464 void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod); 465 466 /* APIs for ENET */ 467 void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type); 468 void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type); 469 void bna_rx_mod_fail(struct bna_rx_mod *rx_mod); 470 471 /* APIs for BNAD */ 472 void bna_rx_res_req(struct bna_rx_config *rx_config, 473 struct bna_res_info *res_info); 474 struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad, 475 struct bna_rx_config *rx_cfg, 476 const struct bna_rx_event_cbfn *rx_cbfn, 477 struct bna_res_info *res_info, void *priv); 478 void bna_rx_destroy(struct bna_rx *rx); 479 void bna_rx_enable(struct bna_rx *rx); 480 void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, 481 void (*cbfn)(void *, struct bna_rx *)); 482 void bna_rx_cleanup_complete(struct bna_rx *rx); 483 void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); 484 void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); 485 void bna_rx_dim_update(struct bna_ccb *ccb); 486 enum bna_cb_status 487 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, 488 void (*cbfn)(struct bnad *, struct bna_rx *)); 489 enum bna_cb_status 490 bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac, 491 void (*cbfn)(struct bnad *, struct bna_rx *)); 492 enum bna_cb_status 493 bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, 494 void (*cbfn)(struct bnad *, struct bna_rx *)); 495 enum bna_cb_status 496 bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, 497 void (*cbfn)(struct bnad *, struct bna_rx *)); 498 enum bna_cb_status 499 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, 500 void (*cbfn)(struct bnad *, struct bna_rx *)); 501 enum bna_cb_status 502 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, 503 enum bna_rxmode bitmask, 504 void (*cbfn)(struct bnad *, struct bna_rx *)); 505 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 506 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 507 void bna_rx_vlanfilter_enable(struct bna_rx *rx); 508 /* ENET */ 509 510 /* API for RX */ 511 int bna_enet_mtu_get(struct bna_enet *enet); 512 513 /* Callbacks for TX, RX */ 514 void bna_enet_cb_tx_stopped(struct bna_enet *enet); 515 void bna_enet_cb_rx_stopped(struct bna_enet *enet); 516 517 /* API for BNAD */ 518 void bna_enet_enable(struct bna_enet *enet); 519 void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, 520 void (*cbfn)(void *)); 521 void bna_enet_pause_config(struct bna_enet *enet, 522 struct bna_pause_config *pause_config, 523 void (*cbfn)(struct bnad *)); 524 void bna_enet_mtu_set(struct bna_enet *enet, int mtu, 525 void (*cbfn)(struct bnad *)); 526 void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); 527 528 /* IOCETH */ 529 530 /* APIs for BNAD */ 531 void bna_ioceth_enable(struct bna_ioceth *ioceth); 532 void bna_ioceth_disable(struct bna_ioceth *ioceth, 533 enum bna_cleanup_type type); 534 535 /* BNAD */ 536 537 /* Callbacks for ENET */ 538 void bnad_cb_ethport_link_status(struct bnad *bnad, 539 enum bna_link_status status); 540 541 /* Callbacks for IOCETH */ 542 void bnad_cb_ioceth_ready(struct bnad *bnad); 543 void bnad_cb_ioceth_failed(struct bnad *bnad); 544 void bnad_cb_ioceth_disabled(struct bnad *bnad); 545 void bnad_cb_mbox_intr_enable(struct bnad *bnad); 546 void bnad_cb_mbox_intr_disable(struct bnad *bnad); 547 548 /* Callbacks for BNA */ 549 void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 550 struct bna_stats *stats); 551 552 #endif /* __BNA_H__ */ 553