1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 **********************************************************************/ 19 20 /*! \file octeon_network.h 21 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module. 22 */ 23 24 #ifndef __OCTEON_NETWORK_H__ 25 #define __OCTEON_NETWORK_H__ 26 #include <linux/ptp_clock_kernel.h> 27 28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) 29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU 30 31 /* Bit mask values for lio->ifstate */ 32 #define LIO_IFSTATE_DROQ_OPS 0x01 33 #define LIO_IFSTATE_REGISTERED 0x02 34 #define LIO_IFSTATE_RUNNING 0x04 35 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 36 #define LIO_IFSTATE_RESETTING 0x10 37 38 struct liquidio_if_cfg_context { 39 u32 octeon_id; 40 wait_queue_head_t wc; 41 int cond; 42 }; 43 44 struct liquidio_if_cfg_resp { 45 u64 rh; 46 struct liquidio_if_cfg_info cfg_info; 47 u64 status; 48 }; 49 50 struct oct_nic_stats_resp { 51 u64 rh; 52 struct oct_link_stats stats; 53 u64 status; 54 }; 55 56 struct oct_nic_stats_ctrl { 57 struct completion complete; 58 struct net_device *netdev; 59 }; 60 61 /** LiquidIO per-interface network private data */ 62 struct lio { 63 /** State of the interface. Rx/Tx happens only in the RUNNING state. */ 64 atomic_t ifstate; 65 66 /** Octeon Interface index number. This device will be represented as 67 * oct<ifidx> in the system. 68 */ 69 int ifidx; 70 71 /** Octeon Input queue to use to transmit for this network interface. */ 72 int txq; 73 74 /** Octeon Output queue from which pkts arrive 75 * for this network interface. 76 */ 77 int rxq; 78 79 /** Guards each glist */ 80 spinlock_t *glist_lock; 81 82 /** Array of gather component linked lists */ 83 struct list_head *glist; 84 void **glists_virt_base; 85 dma_addr_t *glists_dma_base; 86 u32 glist_entry_size; 87 88 /** Pointer to the NIC properties for the Octeon device this network 89 * interface is associated with. 90 */ 91 struct octdev_props *octprops; 92 93 /** Pointer to the octeon device structure. */ 94 struct octeon_device *oct_dev; 95 96 struct net_device *netdev; 97 98 /** Link information sent by the core application for this interface. */ 99 struct oct_link_info linfo; 100 101 /** counter of link changes */ 102 u64 link_changes; 103 104 /** Size of Tx queue for this octeon device. */ 105 u32 tx_qsize; 106 107 /** Size of Rx queue for this octeon device. */ 108 u32 rx_qsize; 109 110 /** Size of MTU this octeon device. */ 111 u32 mtu; 112 113 /** msg level flag per interface. */ 114 u32 msg_enable; 115 116 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ 117 u64 dev_capability; 118 119 /* Copy of transmit encapsulation capabilities: 120 * TSO, TSO6, Checksums for this device for Kernel 121 * 3.10.0 onwards 122 */ 123 u64 enc_dev_capability; 124 125 /** Copy of beacaon reg in phy */ 126 u32 phy_beacon_val; 127 128 /** Copy of ctrl reg in phy */ 129 u32 led_ctrl_val; 130 131 /* PTP clock information */ 132 struct ptp_clock_info ptp_info; 133 struct ptp_clock *ptp_clock; 134 s64 ptp_adjust; 135 136 /* for atomic access to Octeon PTP reg and data struct */ 137 spinlock_t ptp_lock; 138 139 /* Interface info */ 140 u32 intf_open; 141 142 /* work queue for txq status */ 143 struct cavium_wq txq_status_wq; 144 145 /* work queue for rxq oom status */ 146 struct cavium_wq rxq_status_wq; 147 148 /* work queue for link status */ 149 struct cavium_wq link_status_wq; 150 151 /* work queue to regularly send local time to octeon firmware */ 152 struct cavium_wq sync_octeon_time_wq; 153 154 int netdev_uc_count; 155 }; 156 157 #define LIO_SIZE (sizeof(struct lio)) 158 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) 159 160 #define LIO_MAX_CORES 12 161 162 /** 163 * \brief Enable or disable feature 164 * @param netdev pointer to network device 165 * @param cmd Command that just requires acknowledgment 166 * @param param1 Parameter to command 167 */ 168 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); 169 170 int setup_rx_oom_poll_fn(struct net_device *netdev); 171 172 void cleanup_rx_oom_poll_fn(struct net_device *netdev); 173 174 /** 175 * \brief Link control command completion callback 176 * @param nctrl_ptr pointer to control packet structure 177 * 178 * This routine is called by the callback function when a ctrl pkt sent to 179 * core app completes. The nctrl_ptr contains a copy of the command type 180 * and data sent to the core app. This routine is only called if the ctrl 181 * pkt was sent successfully to the core app. 182 */ 183 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); 184 185 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, 186 u32 num_iqs, u32 num_oqs); 187 188 irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), 189 void *dev); 190 191 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); 192 193 int lio_wait_for_clean_oq(struct octeon_device *oct); 194 /** 195 * \brief Register ethtool operations 196 * @param netdev pointer to network device 197 */ 198 void liquidio_set_ethtool_ops(struct net_device *netdev); 199 200 /** 201 * \brief Net device change_mtu 202 * @param netdev network device 203 */ 204 int liquidio_change_mtu(struct net_device *netdev, int new_mtu); 205 #define LIO_CHANGE_MTU_SUCCESS 1 206 #define LIO_CHANGE_MTU_FAIL 2 207 208 #define SKB_ADJ_MASK 0x3F 209 #define SKB_ADJ (SKB_ADJ_MASK + 1) 210 211 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */ 212 #define LIO_RXBUFFER_SZ 2048 213 214 static inline void 215 *recv_buffer_alloc(struct octeon_device *oct, 216 struct octeon_skb_page_info *pg_info) 217 { 218 struct page *page; 219 struct sk_buff *skb; 220 struct octeon_skb_page_info *skb_pg_info; 221 222 page = alloc_page(GFP_ATOMIC); 223 if (unlikely(!page)) 224 return NULL; 225 226 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); 227 if (unlikely(!skb)) { 228 __free_page(page); 229 pg_info->page = NULL; 230 return NULL; 231 } 232 233 if ((unsigned long)skb->data & SKB_ADJ_MASK) { 234 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); 235 236 skb_reserve(skb, r); 237 } 238 239 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 240 /* Get DMA info */ 241 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, 242 PAGE_SIZE, DMA_FROM_DEVICE); 243 244 /* Mapping failed!! */ 245 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { 246 __free_page(page); 247 dev_kfree_skb_any((struct sk_buff *)skb); 248 pg_info->page = NULL; 249 return NULL; 250 } 251 252 pg_info->page = page; 253 pg_info->page_offset = 0; 254 skb_pg_info->page = page; 255 skb_pg_info->page_offset = 0; 256 skb_pg_info->dma = pg_info->dma; 257 258 return (void *)skb; 259 } 260 261 static inline void 262 *recv_buffer_fast_alloc(u32 size) 263 { 264 struct sk_buff *skb; 265 struct octeon_skb_page_info *skb_pg_info; 266 267 skb = dev_alloc_skb(size + SKB_ADJ); 268 if (unlikely(!skb)) 269 return NULL; 270 271 if ((unsigned long)skb->data & SKB_ADJ_MASK) { 272 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); 273 274 skb_reserve(skb, r); 275 } 276 277 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 278 skb_pg_info->page = NULL; 279 skb_pg_info->page_offset = 0; 280 skb_pg_info->dma = 0; 281 282 return skb; 283 } 284 285 static inline int 286 recv_buffer_recycle(struct octeon_device *oct, void *buf) 287 { 288 struct octeon_skb_page_info *pg_info = buf; 289 290 if (!pg_info->page) { 291 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", 292 __func__); 293 return -ENOMEM; 294 } 295 296 if (unlikely(page_count(pg_info->page) != 1) || 297 unlikely(page_to_nid(pg_info->page) != numa_node_id())) { 298 dma_unmap_page(&oct->pci_dev->dev, 299 pg_info->dma, (PAGE_SIZE << 0), 300 DMA_FROM_DEVICE); 301 pg_info->dma = 0; 302 pg_info->page = NULL; 303 pg_info->page_offset = 0; 304 return -ENOMEM; 305 } 306 307 /* Flip to other half of the buffer */ 308 if (pg_info->page_offset == 0) 309 pg_info->page_offset = LIO_RXBUFFER_SZ; 310 else 311 pg_info->page_offset = 0; 312 page_ref_inc(pg_info->page); 313 314 return 0; 315 } 316 317 static inline void 318 *recv_buffer_reuse(struct octeon_device *oct, void *buf) 319 { 320 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info; 321 struct sk_buff *skb; 322 323 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); 324 if (unlikely(!skb)) { 325 dma_unmap_page(&oct->pci_dev->dev, 326 pg_info->dma, (PAGE_SIZE << 0), 327 DMA_FROM_DEVICE); 328 return NULL; 329 } 330 331 if ((unsigned long)skb->data & SKB_ADJ_MASK) { 332 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); 333 334 skb_reserve(skb, r); 335 } 336 337 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 338 skb_pg_info->page = pg_info->page; 339 skb_pg_info->page_offset = pg_info->page_offset; 340 skb_pg_info->dma = pg_info->dma; 341 342 return skb; 343 } 344 345 static inline void 346 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info) 347 { 348 struct sk_buff *skb = (struct sk_buff *)buffer; 349 350 put_page(pg_info->page); 351 pg_info->dma = 0; 352 pg_info->page = NULL; 353 pg_info->page_offset = 0; 354 355 if (skb) 356 dev_kfree_skb_any(skb); 357 } 358 359 static inline void recv_buffer_free(void *buffer) 360 { 361 struct sk_buff *skb = (struct sk_buff *)buffer; 362 struct octeon_skb_page_info *pg_info; 363 364 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 365 366 if (pg_info->page) { 367 put_page(pg_info->page); 368 pg_info->dma = 0; 369 pg_info->page = NULL; 370 pg_info->page_offset = 0; 371 } 372 373 dev_kfree_skb_any((struct sk_buff *)buffer); 374 } 375 376 static inline void 377 recv_buffer_fast_free(void *buffer) 378 { 379 dev_kfree_skb_any((struct sk_buff *)buffer); 380 } 381 382 static inline void tx_buffer_free(void *buffer) 383 { 384 dev_kfree_skb_any((struct sk_buff *)buffer); 385 } 386 387 #define lio_dma_alloc(oct, size, dma_addr) \ 388 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL) 389 #define lio_dma_free(oct, size, virt_addr, dma_addr) \ 390 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) 391 392 static inline 393 void *get_rbd(struct sk_buff *skb) 394 { 395 struct octeon_skb_page_info *pg_info; 396 unsigned char *va; 397 398 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 399 va = page_address(pg_info->page) + pg_info->page_offset; 400 401 return va; 402 } 403 404 static inline u64 405 lio_map_ring(void *buf) 406 { 407 dma_addr_t dma_addr; 408 409 struct sk_buff *skb = (struct sk_buff *)buf; 410 struct octeon_skb_page_info *pg_info; 411 412 pg_info = ((struct octeon_skb_page_info *)(skb->cb)); 413 if (!pg_info->page) { 414 pr_err("%s: pg_info->page NULL\n", __func__); 415 WARN_ON(1); 416 } 417 418 /* Get DMA info */ 419 dma_addr = pg_info->dma; 420 if (!pg_info->dma) { 421 pr_err("%s: ERROR it should be already available\n", 422 __func__); 423 WARN_ON(1); 424 } 425 dma_addr += pg_info->page_offset; 426 427 return (u64)dma_addr; 428 } 429 430 static inline void 431 lio_unmap_ring(struct pci_dev *pci_dev, 432 u64 buf_ptr) 433 434 { 435 dma_unmap_page(&pci_dev->dev, 436 buf_ptr, (PAGE_SIZE << 0), 437 DMA_FROM_DEVICE); 438 } 439 440 static inline void *octeon_fast_packet_alloc(u32 size) 441 { 442 return recv_buffer_fast_alloc(size); 443 } 444 445 static inline void octeon_fast_packet_next(struct octeon_droq *droq, 446 struct sk_buff *nicbuf, 447 int copy_len, 448 int idx) 449 { 450 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer), 451 copy_len); 452 } 453 454 /** 455 * \brief check interface state 456 * @param lio per-network private data 457 * @param state_flag flag state to check 458 */ 459 static inline int ifstate_check(struct lio *lio, int state_flag) 460 { 461 return atomic_read(&lio->ifstate) & state_flag; 462 } 463 464 /** 465 * \brief set interface state 466 * @param lio per-network private data 467 * @param state_flag flag state to set 468 */ 469 static inline void ifstate_set(struct lio *lio, int state_flag) 470 { 471 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); 472 } 473 474 /** 475 * \brief clear interface state 476 * @param lio per-network private data 477 * @param state_flag flag state to clear 478 */ 479 static inline void ifstate_reset(struct lio *lio, int state_flag) 480 { 481 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); 482 } 483 484 /** 485 * \brief wait for all pending requests to complete 486 * @param oct Pointer to Octeon device 487 * 488 * Called during shutdown sequence 489 */ 490 static inline int wait_for_pending_requests(struct octeon_device *oct) 491 { 492 int i, pcount = 0; 493 494 for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) { 495 pcount = atomic_read( 496 &oct->response_list[OCTEON_ORDERED_SC_LIST] 497 .pending_req_count); 498 if (pcount) 499 schedule_timeout_uninterruptible(HZ / 10); 500 else 501 break; 502 } 503 504 if (pcount) 505 return 1; 506 507 return 0; 508 } 509 510 /** 511 * \brief Stop Tx queues 512 * @param netdev network device 513 */ 514 static inline void stop_txqs(struct net_device *netdev) 515 { 516 int i; 517 518 for (i = 0; i < netdev->num_tx_queues; i++) 519 netif_stop_subqueue(netdev, i); 520 } 521 522 /** 523 * \brief Wake Tx queues 524 * @param netdev network device 525 */ 526 static inline void wake_txqs(struct net_device *netdev) 527 { 528 struct lio *lio = GET_LIO(netdev); 529 int i, qno; 530 531 for (i = 0; i < netdev->num_tx_queues; i++) { 532 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; 533 534 if (__netif_subqueue_stopped(netdev, i)) { 535 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, 536 tx_restart, 1); 537 netif_wake_subqueue(netdev, i); 538 } 539 } 540 } 541 542 /** 543 * \brief Start Tx queues 544 * @param netdev network device 545 */ 546 static inline void start_txqs(struct net_device *netdev) 547 { 548 struct lio *lio = GET_LIO(netdev); 549 int i; 550 551 if (lio->linfo.link.s.link_up) { 552 for (i = 0; i < netdev->num_tx_queues; i++) 553 netif_start_subqueue(netdev, i); 554 } 555 } 556 557 static inline int skb_iq(struct lio *lio, struct sk_buff *skb) 558 { 559 return skb->queue_mapping % lio->linfo.num_txpciq; 560 } 561 562 #endif 563