1 /* 2 * Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/u64_stats_sync.h> 19 #include <linux/errno.h> 20 #include <linux/types.h> 21 #include <linux/pci.h> 22 #include <linux/device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/skbuff.h> 27 #include <linux/smp.h> 28 #include <asm/byteorder.h> 29 30 #include "hinic_common.h" 31 #include "hinic_hw_if.h" 32 #include "hinic_hw_wqe.h" 33 #include "hinic_hw_wq.h" 34 #include "hinic_hw_qp.h" 35 #include "hinic_hw_dev.h" 36 #include "hinic_dev.h" 37 #include "hinic_tx.h" 38 39 #define TX_IRQ_NO_PENDING 0 40 #define TX_IRQ_NO_COALESC 0 41 #define TX_IRQ_NO_LLI_TIMER 0 42 #define TX_IRQ_NO_CREDIT 0 43 #define TX_IRQ_NO_RESEND_TIMER 0 44 45 #define CI_UPDATE_NO_PENDING 0 46 #define CI_UPDATE_NO_COALESC 0 47 48 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) 49 50 #define MIN_SKB_LEN 64 51 52 /** 53 * hinic_txq_clean_stats - Clean the statistics of specific queue 54 * @txq: Logical Tx Queue 55 **/ 56 void hinic_txq_clean_stats(struct hinic_txq *txq) 57 { 58 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 59 60 u64_stats_update_begin(&txq_stats->syncp); 61 txq_stats->pkts = 0; 62 txq_stats->bytes = 0; 63 txq_stats->tx_busy = 0; 64 txq_stats->tx_wake = 0; 65 txq_stats->tx_dropped = 0; 66 u64_stats_update_end(&txq_stats->syncp); 67 } 68 69 /** 70 * hinic_txq_get_stats - get statistics of Tx Queue 71 * @txq: Logical Tx Queue 72 * @stats: return updated stats here 73 **/ 74 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) 75 { 76 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 77 unsigned int start; 78 79 u64_stats_update_begin(&stats->syncp); 80 do { 81 start = u64_stats_fetch_begin(&txq_stats->syncp); 82 stats->pkts = txq_stats->pkts; 83 stats->bytes = txq_stats->bytes; 84 stats->tx_busy = txq_stats->tx_busy; 85 stats->tx_wake = txq_stats->tx_wake; 86 stats->tx_dropped = txq_stats->tx_dropped; 87 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 88 u64_stats_update_end(&stats->syncp); 89 } 90 91 /** 92 * txq_stats_init - Initialize the statistics of specific queue 93 * @txq: Logical Tx Queue 94 **/ 95 static void txq_stats_init(struct hinic_txq *txq) 96 { 97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 98 99 u64_stats_init(&txq_stats->syncp); 100 hinic_txq_clean_stats(txq); 101 } 102 103 /** 104 * tx_map_skb - dma mapping for skb and return sges 105 * @nic_dev: nic device 106 * @skb: the skb 107 * @sges: returned sges 108 * 109 * Return 0 - Success, negative - Failure 110 **/ 111 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 112 struct hinic_sge *sges) 113 { 114 struct hinic_hwdev *hwdev = nic_dev->hwdev; 115 struct hinic_hwif *hwif = hwdev->hwif; 116 struct pci_dev *pdev = hwif->pdev; 117 struct skb_frag_struct *frag; 118 dma_addr_t dma_addr; 119 int i, j; 120 121 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), 122 DMA_TO_DEVICE); 123 if (dma_mapping_error(&pdev->dev, dma_addr)) { 124 dev_err(&pdev->dev, "Failed to map Tx skb data\n"); 125 return -EFAULT; 126 } 127 128 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); 129 130 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { 131 frag = &skb_shinfo(skb)->frags[i]; 132 133 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, 134 skb_frag_size(frag), 135 DMA_TO_DEVICE); 136 if (dma_mapping_error(&pdev->dev, dma_addr)) { 137 dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); 138 goto err_tx_map; 139 } 140 141 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); 142 } 143 144 return 0; 145 146 err_tx_map: 147 for (j = 0; j < i; j++) 148 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), 149 sges[j + 1].len, DMA_TO_DEVICE); 150 151 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 152 DMA_TO_DEVICE); 153 return -EFAULT; 154 } 155 156 /** 157 * tx_unmap_skb - unmap the dma address of the skb 158 * @nic_dev: nic device 159 * @skb: the skb 160 * @sges: the sges that are connected to the skb 161 **/ 162 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 163 struct hinic_sge *sges) 164 { 165 struct hinic_hwdev *hwdev = nic_dev->hwdev; 166 struct hinic_hwif *hwif = hwdev->hwif; 167 struct pci_dev *pdev = hwif->pdev; 168 int i; 169 170 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) 171 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), 172 sges[i + 1].len, DMA_TO_DEVICE); 173 174 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 175 DMA_TO_DEVICE); 176 } 177 178 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 179 { 180 struct hinic_dev *nic_dev = netdev_priv(netdev); 181 struct netdev_queue *netdev_txq; 182 int nr_sges, err = NETDEV_TX_OK; 183 struct hinic_sq_wqe *sq_wqe; 184 unsigned int wqe_size; 185 struct hinic_txq *txq; 186 struct hinic_qp *qp; 187 u16 prod_idx; 188 189 txq = &nic_dev->txqs[skb->queue_mapping]; 190 qp = container_of(txq->sq, struct hinic_qp, sq); 191 192 if (skb->len < MIN_SKB_LEN) { 193 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { 194 netdev_err(netdev, "Failed to pad skb\n"); 195 goto update_error_stats; 196 } 197 198 skb->len = MIN_SKB_LEN; 199 } 200 201 nr_sges = skb_shinfo(skb)->nr_frags + 1; 202 if (nr_sges > txq->max_sges) { 203 netdev_err(netdev, "Too many Tx sges\n"); 204 goto skb_error; 205 } 206 207 err = tx_map_skb(nic_dev, skb, txq->sges); 208 if (err) 209 goto skb_error; 210 211 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); 212 213 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 214 if (!sq_wqe) { 215 netif_stop_subqueue(netdev, qp->q_id); 216 217 /* Check for the case free_tx_poll is called in another cpu 218 * and we stopped the subqueue after free_tx_poll check. 219 */ 220 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 221 if (sq_wqe) { 222 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 223 goto process_sq_wqe; 224 } 225 226 tx_unmap_skb(nic_dev, skb, txq->sges); 227 228 u64_stats_update_begin(&txq->txq_stats.syncp); 229 txq->txq_stats.tx_busy++; 230 u64_stats_update_end(&txq->txq_stats.syncp); 231 err = NETDEV_TX_BUSY; 232 goto flush_skbs; 233 } 234 235 process_sq_wqe: 236 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); 237 238 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); 239 240 flush_skbs: 241 netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); 242 if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) 243 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); 244 245 return err; 246 247 skb_error: 248 dev_kfree_skb_any(skb); 249 250 update_error_stats: 251 u64_stats_update_begin(&txq->txq_stats.syncp); 252 txq->txq_stats.tx_dropped++; 253 u64_stats_update_end(&txq->txq_stats.syncp); 254 return err; 255 } 256 257 /** 258 * tx_free_skb - unmap and free skb 259 * @nic_dev: nic device 260 * @skb: the skb 261 * @sges: the sges that are connected to the skb 262 **/ 263 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 264 struct hinic_sge *sges) 265 { 266 tx_unmap_skb(nic_dev, skb, sges); 267 268 dev_kfree_skb_any(skb); 269 } 270 271 /** 272 * free_all_rx_skbs - free all skbs in tx queue 273 * @txq: tx queue 274 **/ 275 static void free_all_tx_skbs(struct hinic_txq *txq) 276 { 277 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 278 struct hinic_sq *sq = txq->sq; 279 struct hinic_sq_wqe *sq_wqe; 280 unsigned int wqe_size; 281 struct sk_buff *skb; 282 int nr_sges; 283 u16 ci; 284 285 while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { 286 nr_sges = skb_shinfo(skb)->nr_frags + 1; 287 288 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 289 290 hinic_sq_put_wqe(sq, wqe_size); 291 292 tx_free_skb(nic_dev, skb, txq->free_sges); 293 } 294 } 295 296 /** 297 * free_tx_poll - free finished tx skbs in tx queue that connected to napi 298 * @napi: napi 299 * @budget: number of tx 300 * 301 * Return 0 - Success, negative - Failure 302 **/ 303 static int free_tx_poll(struct napi_struct *napi, int budget) 304 { 305 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); 306 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); 307 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 308 struct netdev_queue *netdev_txq; 309 struct hinic_sq *sq = txq->sq; 310 struct hinic_wq *wq = sq->wq; 311 struct hinic_sq_wqe *sq_wqe; 312 unsigned int wqe_size; 313 int nr_sges, pkts = 0; 314 struct sk_buff *skb; 315 u64 tx_bytes = 0; 316 u16 hw_ci, sw_ci; 317 318 do { 319 hw_ci = HW_CONS_IDX(sq) & wq->mask; 320 321 sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci); 322 if ((!sq_wqe) || 323 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) 324 break; 325 326 tx_bytes += skb->len; 327 pkts++; 328 329 nr_sges = skb_shinfo(skb)->nr_frags + 1; 330 331 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 332 333 hinic_sq_put_wqe(sq, wqe_size); 334 335 tx_free_skb(nic_dev, skb, txq->free_sges); 336 } while (pkts < budget); 337 338 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && 339 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { 340 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); 341 342 __netif_tx_lock(netdev_txq, smp_processor_id()); 343 344 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 345 346 __netif_tx_unlock(netdev_txq); 347 348 u64_stats_update_begin(&txq->txq_stats.syncp); 349 txq->txq_stats.tx_wake++; 350 u64_stats_update_end(&txq->txq_stats.syncp); 351 } 352 353 u64_stats_update_begin(&txq->txq_stats.syncp); 354 txq->txq_stats.bytes += tx_bytes; 355 txq->txq_stats.pkts += pkts; 356 u64_stats_update_end(&txq->txq_stats.syncp); 357 358 if (pkts < budget) { 359 napi_complete(napi); 360 enable_irq(sq->irq); 361 return pkts; 362 } 363 364 return budget; 365 } 366 367 static void tx_napi_add(struct hinic_txq *txq, int weight) 368 { 369 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); 370 napi_enable(&txq->napi); 371 } 372 373 static void tx_napi_del(struct hinic_txq *txq) 374 { 375 napi_disable(&txq->napi); 376 netif_napi_del(&txq->napi); 377 } 378 379 static irqreturn_t tx_irq(int irq, void *data) 380 { 381 struct hinic_txq *txq = data; 382 struct hinic_dev *nic_dev; 383 384 nic_dev = netdev_priv(txq->netdev); 385 386 /* Disable the interrupt until napi will be completed */ 387 disable_irq_nosync(txq->sq->irq); 388 389 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); 390 391 napi_schedule(&txq->napi); 392 return IRQ_HANDLED; 393 } 394 395 static int tx_request_irq(struct hinic_txq *txq) 396 { 397 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 398 struct hinic_hwdev *hwdev = nic_dev->hwdev; 399 struct hinic_hwif *hwif = hwdev->hwif; 400 struct pci_dev *pdev = hwif->pdev; 401 struct hinic_sq *sq = txq->sq; 402 int err; 403 404 tx_napi_add(txq, nic_dev->tx_weight); 405 406 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, 407 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, 408 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, 409 TX_IRQ_NO_RESEND_TIMER); 410 411 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); 412 if (err) { 413 dev_err(&pdev->dev, "Failed to request Tx irq\n"); 414 tx_napi_del(txq); 415 return err; 416 } 417 418 return 0; 419 } 420 421 static void tx_free_irq(struct hinic_txq *txq) 422 { 423 struct hinic_sq *sq = txq->sq; 424 425 free_irq(sq->irq, txq); 426 tx_napi_del(txq); 427 } 428 429 /** 430 * hinic_init_txq - Initialize the Tx Queue 431 * @txq: Logical Tx Queue 432 * @sq: Hardware Tx Queue to connect the Logical queue with 433 * @netdev: network device to connect the Logical queue with 434 * 435 * Return 0 - Success, negative - Failure 436 **/ 437 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, 438 struct net_device *netdev) 439 { 440 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); 441 struct hinic_dev *nic_dev = netdev_priv(netdev); 442 struct hinic_hwdev *hwdev = nic_dev->hwdev; 443 int err, irqname_len; 444 size_t sges_size; 445 446 txq->netdev = netdev; 447 txq->sq = sq; 448 449 txq_stats_init(txq); 450 451 txq->max_sges = HINIC_MAX_SQ_BUFDESCS; 452 453 sges_size = txq->max_sges * sizeof(*txq->sges); 454 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 455 if (!txq->sges) 456 return -ENOMEM; 457 458 sges_size = txq->max_sges * sizeof(*txq->free_sges); 459 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 460 if (!txq->free_sges) { 461 err = -ENOMEM; 462 goto err_alloc_free_sges; 463 } 464 465 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; 466 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); 467 if (!txq->irq_name) { 468 err = -ENOMEM; 469 goto err_alloc_irqname; 470 } 471 472 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); 473 474 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, 475 CI_UPDATE_NO_COALESC); 476 if (err) 477 goto err_hw_ci; 478 479 err = tx_request_irq(txq); 480 if (err) { 481 netdev_err(netdev, "Failed to request Tx irq\n"); 482 goto err_req_tx_irq; 483 } 484 485 return 0; 486 487 err_req_tx_irq: 488 err_hw_ci: 489 devm_kfree(&netdev->dev, txq->irq_name); 490 491 err_alloc_irqname: 492 devm_kfree(&netdev->dev, txq->free_sges); 493 494 err_alloc_free_sges: 495 devm_kfree(&netdev->dev, txq->sges); 496 return err; 497 } 498 499 /** 500 * hinic_clean_txq - Clean the Tx Queue 501 * @txq: Logical Tx Queue 502 **/ 503 void hinic_clean_txq(struct hinic_txq *txq) 504 { 505 struct net_device *netdev = txq->netdev; 506 507 tx_free_irq(txq); 508 509 free_all_tx_skbs(txq); 510 511 devm_kfree(&netdev->dev, txq->irq_name); 512 devm_kfree(&netdev->dev, txq->free_sges); 513 devm_kfree(&netdev->dev, txq->sges); 514 } 515