1 /* 2 * Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/u64_stats_sync.h> 19 #include <linux/errno.h> 20 #include <linux/types.h> 21 #include <linux/pci.h> 22 #include <linux/device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/skbuff.h> 27 #include <linux/smp.h> 28 #include <asm/byteorder.h> 29 30 #include "hinic_common.h" 31 #include "hinic_hw_if.h" 32 #include "hinic_hw_wqe.h" 33 #include "hinic_hw_wq.h" 34 #include "hinic_hw_qp.h" 35 #include "hinic_hw_dev.h" 36 #include "hinic_dev.h" 37 #include "hinic_tx.h" 38 39 #define TX_IRQ_NO_PENDING 0 40 #define TX_IRQ_NO_COALESC 0 41 #define TX_IRQ_NO_LLI_TIMER 0 42 #define TX_IRQ_NO_CREDIT 0 43 #define TX_IRQ_NO_RESEND_TIMER 0 44 45 #define CI_UPDATE_NO_PENDING 0 46 #define CI_UPDATE_NO_COALESC 0 47 48 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) 49 50 #define MIN_SKB_LEN 64 51 52 /** 53 * hinic_txq_clean_stats - Clean the statistics of specific queue 54 * @txq: Logical Tx Queue 55 **/ 56 void hinic_txq_clean_stats(struct hinic_txq *txq) 57 { 58 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 59 60 u64_stats_update_begin(&txq_stats->syncp); 61 txq_stats->pkts = 0; 62 txq_stats->bytes = 0; 63 txq_stats->tx_busy = 0; 64 txq_stats->tx_wake = 0; 65 txq_stats->tx_dropped = 0; 66 u64_stats_update_end(&txq_stats->syncp); 67 } 68 69 /** 70 * hinic_txq_get_stats - get statistics of Tx Queue 71 * @txq: Logical Tx Queue 72 * @stats: return updated stats here 73 **/ 74 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) 75 { 76 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 77 unsigned int start; 78 79 u64_stats_update_begin(&stats->syncp); 80 do { 81 start = u64_stats_fetch_begin(&txq_stats->syncp); 82 stats->pkts = txq_stats->pkts; 83 stats->bytes = txq_stats->bytes; 84 stats->tx_busy = txq_stats->tx_busy; 85 stats->tx_wake = txq_stats->tx_wake; 86 stats->tx_dropped = txq_stats->tx_dropped; 87 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 88 u64_stats_update_end(&stats->syncp); 89 } 90 91 /** 92 * txq_stats_init - Initialize the statistics of specific queue 93 * @txq: Logical Tx Queue 94 **/ 95 static void txq_stats_init(struct hinic_txq *txq) 96 { 97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 98 99 u64_stats_init(&txq_stats->syncp); 100 hinic_txq_clean_stats(txq); 101 } 102 103 /** 104 * tx_map_skb - dma mapping for skb and return sges 105 * @nic_dev: nic device 106 * @skb: the skb 107 * @sges: returned sges 108 * 109 * Return 0 - Success, negative - Failure 110 **/ 111 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 112 struct hinic_sge *sges) 113 { 114 struct hinic_hwdev *hwdev = nic_dev->hwdev; 115 struct hinic_hwif *hwif = hwdev->hwif; 116 struct pci_dev *pdev = hwif->pdev; 117 struct skb_frag_struct *frag; 118 dma_addr_t dma_addr; 119 int i, j; 120 121 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), 122 DMA_TO_DEVICE); 123 if (dma_mapping_error(&pdev->dev, dma_addr)) { 124 dev_err(&pdev->dev, "Failed to map Tx skb data\n"); 125 return -EFAULT; 126 } 127 128 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); 129 130 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { 131 frag = &skb_shinfo(skb)->frags[i]; 132 133 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, 134 skb_frag_size(frag), 135 DMA_TO_DEVICE); 136 if (dma_mapping_error(&pdev->dev, dma_addr)) { 137 dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); 138 goto err_tx_map; 139 } 140 141 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); 142 } 143 144 return 0; 145 146 err_tx_map: 147 for (j = 0; j < i; j++) 148 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), 149 sges[j + 1].len, DMA_TO_DEVICE); 150 151 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 152 DMA_TO_DEVICE); 153 return -EFAULT; 154 } 155 156 /** 157 * tx_unmap_skb - unmap the dma address of the skb 158 * @nic_dev: nic device 159 * @skb: the skb 160 * @sges: the sges that are connected to the skb 161 **/ 162 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 163 struct hinic_sge *sges) 164 { 165 struct hinic_hwdev *hwdev = nic_dev->hwdev; 166 struct hinic_hwif *hwif = hwdev->hwif; 167 struct pci_dev *pdev = hwif->pdev; 168 int i; 169 170 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) 171 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), 172 sges[i + 1].len, DMA_TO_DEVICE); 173 174 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 175 DMA_TO_DEVICE); 176 } 177 178 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 179 { 180 struct hinic_dev *nic_dev = netdev_priv(netdev); 181 struct netdev_queue *netdev_txq; 182 int nr_sges, err = NETDEV_TX_OK; 183 struct hinic_sq_wqe *sq_wqe; 184 unsigned int wqe_size; 185 struct hinic_txq *txq; 186 struct hinic_qp *qp; 187 u16 prod_idx; 188 189 txq = &nic_dev->txqs[skb->queue_mapping]; 190 qp = container_of(txq->sq, struct hinic_qp, sq); 191 192 if (skb->len < MIN_SKB_LEN) { 193 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { 194 netdev_err(netdev, "Failed to pad skb\n"); 195 goto update_error_stats; 196 } 197 198 skb->len = MIN_SKB_LEN; 199 } 200 201 nr_sges = skb_shinfo(skb)->nr_frags + 1; 202 if (nr_sges > txq->max_sges) { 203 netdev_err(netdev, "Too many Tx sges\n"); 204 goto skb_error; 205 } 206 207 err = tx_map_skb(nic_dev, skb, txq->sges); 208 if (err) 209 goto skb_error; 210 211 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); 212 213 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 214 if (!sq_wqe) { 215 netif_stop_subqueue(netdev, qp->q_id); 216 217 /* Check for the case free_tx_poll is called in another cpu 218 * and we stopped the subqueue after free_tx_poll check. 219 */ 220 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 221 if (sq_wqe) { 222 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 223 goto process_sq_wqe; 224 } 225 226 tx_unmap_skb(nic_dev, skb, txq->sges); 227 228 u64_stats_update_begin(&txq->txq_stats.syncp); 229 txq->txq_stats.tx_busy++; 230 u64_stats_update_end(&txq->txq_stats.syncp); 231 err = NETDEV_TX_BUSY; 232 wqe_size = 0; 233 goto flush_skbs; 234 } 235 236 process_sq_wqe: 237 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); 238 239 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); 240 241 flush_skbs: 242 netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); 243 if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) 244 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); 245 246 return err; 247 248 skb_error: 249 dev_kfree_skb_any(skb); 250 251 update_error_stats: 252 u64_stats_update_begin(&txq->txq_stats.syncp); 253 txq->txq_stats.tx_dropped++; 254 u64_stats_update_end(&txq->txq_stats.syncp); 255 return err; 256 } 257 258 /** 259 * tx_free_skb - unmap and free skb 260 * @nic_dev: nic device 261 * @skb: the skb 262 * @sges: the sges that are connected to the skb 263 **/ 264 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 265 struct hinic_sge *sges) 266 { 267 tx_unmap_skb(nic_dev, skb, sges); 268 269 dev_kfree_skb_any(skb); 270 } 271 272 /** 273 * free_all_rx_skbs - free all skbs in tx queue 274 * @txq: tx queue 275 **/ 276 static void free_all_tx_skbs(struct hinic_txq *txq) 277 { 278 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 279 struct hinic_sq *sq = txq->sq; 280 struct hinic_sq_wqe *sq_wqe; 281 unsigned int wqe_size; 282 struct sk_buff *skb; 283 int nr_sges; 284 u16 ci; 285 286 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { 287 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); 288 if (!sq_wqe) 289 break; 290 291 nr_sges = skb_shinfo(skb)->nr_frags + 1; 292 293 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 294 295 hinic_sq_put_wqe(sq, wqe_size); 296 297 tx_free_skb(nic_dev, skb, txq->free_sges); 298 } 299 } 300 301 /** 302 * free_tx_poll - free finished tx skbs in tx queue that connected to napi 303 * @napi: napi 304 * @budget: number of tx 305 * 306 * Return 0 - Success, negative - Failure 307 **/ 308 static int free_tx_poll(struct napi_struct *napi, int budget) 309 { 310 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); 311 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); 312 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 313 struct netdev_queue *netdev_txq; 314 struct hinic_sq *sq = txq->sq; 315 struct hinic_wq *wq = sq->wq; 316 struct hinic_sq_wqe *sq_wqe; 317 unsigned int wqe_size; 318 int nr_sges, pkts = 0; 319 struct sk_buff *skb; 320 u64 tx_bytes = 0; 321 u16 hw_ci, sw_ci; 322 323 do { 324 hw_ci = HW_CONS_IDX(sq) & wq->mask; 325 326 /* Reading a WQEBB to get real WQE size and consumer index. */ 327 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); 328 if ((!sq_wqe) || 329 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) 330 break; 331 332 /* If this WQE have multiple WQEBBs, we will read again to get 333 * full size WQE. 334 */ 335 if (wqe_size > wq->wqebb_size) { 336 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci); 337 if (unlikely(!sq_wqe)) 338 break; 339 } 340 341 tx_bytes += skb->len; 342 pkts++; 343 344 nr_sges = skb_shinfo(skb)->nr_frags + 1; 345 346 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 347 348 hinic_sq_put_wqe(sq, wqe_size); 349 350 tx_free_skb(nic_dev, skb, txq->free_sges); 351 } while (pkts < budget); 352 353 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && 354 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { 355 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); 356 357 __netif_tx_lock(netdev_txq, smp_processor_id()); 358 359 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 360 361 __netif_tx_unlock(netdev_txq); 362 363 u64_stats_update_begin(&txq->txq_stats.syncp); 364 txq->txq_stats.tx_wake++; 365 u64_stats_update_end(&txq->txq_stats.syncp); 366 } 367 368 u64_stats_update_begin(&txq->txq_stats.syncp); 369 txq->txq_stats.bytes += tx_bytes; 370 txq->txq_stats.pkts += pkts; 371 u64_stats_update_end(&txq->txq_stats.syncp); 372 373 if (pkts < budget) { 374 napi_complete(napi); 375 enable_irq(sq->irq); 376 return pkts; 377 } 378 379 return budget; 380 } 381 382 static void tx_napi_add(struct hinic_txq *txq, int weight) 383 { 384 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); 385 napi_enable(&txq->napi); 386 } 387 388 static void tx_napi_del(struct hinic_txq *txq) 389 { 390 napi_disable(&txq->napi); 391 netif_napi_del(&txq->napi); 392 } 393 394 static irqreturn_t tx_irq(int irq, void *data) 395 { 396 struct hinic_txq *txq = data; 397 struct hinic_dev *nic_dev; 398 399 nic_dev = netdev_priv(txq->netdev); 400 401 /* Disable the interrupt until napi will be completed */ 402 disable_irq_nosync(txq->sq->irq); 403 404 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); 405 406 napi_schedule(&txq->napi); 407 return IRQ_HANDLED; 408 } 409 410 static int tx_request_irq(struct hinic_txq *txq) 411 { 412 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 413 struct hinic_hwdev *hwdev = nic_dev->hwdev; 414 struct hinic_hwif *hwif = hwdev->hwif; 415 struct pci_dev *pdev = hwif->pdev; 416 struct hinic_sq *sq = txq->sq; 417 int err; 418 419 tx_napi_add(txq, nic_dev->tx_weight); 420 421 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, 422 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, 423 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, 424 TX_IRQ_NO_RESEND_TIMER); 425 426 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); 427 if (err) { 428 dev_err(&pdev->dev, "Failed to request Tx irq\n"); 429 tx_napi_del(txq); 430 return err; 431 } 432 433 return 0; 434 } 435 436 static void tx_free_irq(struct hinic_txq *txq) 437 { 438 struct hinic_sq *sq = txq->sq; 439 440 free_irq(sq->irq, txq); 441 tx_napi_del(txq); 442 } 443 444 /** 445 * hinic_init_txq - Initialize the Tx Queue 446 * @txq: Logical Tx Queue 447 * @sq: Hardware Tx Queue to connect the Logical queue with 448 * @netdev: network device to connect the Logical queue with 449 * 450 * Return 0 - Success, negative - Failure 451 **/ 452 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, 453 struct net_device *netdev) 454 { 455 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); 456 struct hinic_dev *nic_dev = netdev_priv(netdev); 457 struct hinic_hwdev *hwdev = nic_dev->hwdev; 458 int err, irqname_len; 459 size_t sges_size; 460 461 txq->netdev = netdev; 462 txq->sq = sq; 463 464 txq_stats_init(txq); 465 466 txq->max_sges = HINIC_MAX_SQ_BUFDESCS; 467 468 sges_size = txq->max_sges * sizeof(*txq->sges); 469 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 470 if (!txq->sges) 471 return -ENOMEM; 472 473 sges_size = txq->max_sges * sizeof(*txq->free_sges); 474 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 475 if (!txq->free_sges) { 476 err = -ENOMEM; 477 goto err_alloc_free_sges; 478 } 479 480 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; 481 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); 482 if (!txq->irq_name) { 483 err = -ENOMEM; 484 goto err_alloc_irqname; 485 } 486 487 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); 488 489 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, 490 CI_UPDATE_NO_COALESC); 491 if (err) 492 goto err_hw_ci; 493 494 err = tx_request_irq(txq); 495 if (err) { 496 netdev_err(netdev, "Failed to request Tx irq\n"); 497 goto err_req_tx_irq; 498 } 499 500 return 0; 501 502 err_req_tx_irq: 503 err_hw_ci: 504 devm_kfree(&netdev->dev, txq->irq_name); 505 506 err_alloc_irqname: 507 devm_kfree(&netdev->dev, txq->free_sges); 508 509 err_alloc_free_sges: 510 devm_kfree(&netdev->dev, txq->sges); 511 return err; 512 } 513 514 /** 515 * hinic_clean_txq - Clean the Tx Queue 516 * @txq: Logical Tx Queue 517 **/ 518 void hinic_clean_txq(struct hinic_txq *txq) 519 { 520 struct net_device *netdev = txq->netdev; 521 522 tx_free_irq(txq); 523 524 free_all_tx_skbs(txq); 525 526 devm_kfree(&netdev->dev, txq->irq_name); 527 devm_kfree(&netdev->dev, txq->free_sges); 528 devm_kfree(&netdev->dev, txq->sges); 529 } 530