1 /* 2 * Huawei HiNIC PCI Express Linux driver 3 * Copyright(c) 2017 Huawei Technologies Co., Ltd 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/netdevice.h> 18 #include <linux/u64_stats_sync.h> 19 #include <linux/errno.h> 20 #include <linux/types.h> 21 #include <linux/pci.h> 22 #include <linux/device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/skbuff.h> 27 #include <linux/smp.h> 28 #include <asm/byteorder.h> 29 30 #include "hinic_common.h" 31 #include "hinic_hw_if.h" 32 #include "hinic_hw_wqe.h" 33 #include "hinic_hw_wq.h" 34 #include "hinic_hw_qp.h" 35 #include "hinic_hw_dev.h" 36 #include "hinic_dev.h" 37 #include "hinic_tx.h" 38 39 #define TX_IRQ_NO_PENDING 0 40 #define TX_IRQ_NO_COALESC 0 41 #define TX_IRQ_NO_LLI_TIMER 0 42 #define TX_IRQ_NO_CREDIT 0 43 #define TX_IRQ_NO_RESEND_TIMER 0 44 45 #define CI_UPDATE_NO_PENDING 0 46 #define CI_UPDATE_NO_COALESC 0 47 48 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) 49 50 #define MIN_SKB_LEN 64 51 52 /** 53 * hinic_txq_clean_stats - Clean the statistics of specific queue 54 * @txq: Logical Tx Queue 55 **/ 56 void hinic_txq_clean_stats(struct hinic_txq *txq) 57 { 58 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 59 60 u64_stats_update_begin(&txq_stats->syncp); 61 txq_stats->pkts = 0; 62 txq_stats->bytes = 0; 63 txq_stats->tx_busy = 0; 64 txq_stats->tx_wake = 0; 65 txq_stats->tx_dropped = 0; 66 u64_stats_update_end(&txq_stats->syncp); 67 } 68 69 /** 70 * hinic_txq_get_stats - get statistics of Tx Queue 71 * @txq: Logical Tx Queue 72 * @stats: return updated stats here 73 **/ 74 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) 75 { 76 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 77 unsigned int start; 78 79 u64_stats_update_begin(&stats->syncp); 80 do { 81 start = u64_stats_fetch_begin(&txq_stats->syncp); 82 stats->pkts = txq_stats->pkts; 83 stats->bytes = txq_stats->bytes; 84 stats->tx_busy = txq_stats->tx_busy; 85 stats->tx_wake = txq_stats->tx_wake; 86 stats->tx_dropped = txq_stats->tx_dropped; 87 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 88 u64_stats_update_end(&stats->syncp); 89 } 90 91 /** 92 * txq_stats_init - Initialize the statistics of specific queue 93 * @txq: Logical Tx Queue 94 **/ 95 static void txq_stats_init(struct hinic_txq *txq) 96 { 97 struct hinic_txq_stats *txq_stats = &txq->txq_stats; 98 99 u64_stats_init(&txq_stats->syncp); 100 hinic_txq_clean_stats(txq); 101 } 102 103 /** 104 * tx_map_skb - dma mapping for skb and return sges 105 * @nic_dev: nic device 106 * @skb: the skb 107 * @sges: returned sges 108 * 109 * Return 0 - Success, negative - Failure 110 **/ 111 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 112 struct hinic_sge *sges) 113 { 114 struct hinic_hwdev *hwdev = nic_dev->hwdev; 115 struct hinic_hwif *hwif = hwdev->hwif; 116 struct pci_dev *pdev = hwif->pdev; 117 struct skb_frag_struct *frag; 118 dma_addr_t dma_addr; 119 int i, j; 120 121 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), 122 DMA_TO_DEVICE); 123 if (dma_mapping_error(&pdev->dev, dma_addr)) { 124 dev_err(&pdev->dev, "Failed to map Tx skb data\n"); 125 return -EFAULT; 126 } 127 128 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); 129 130 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { 131 frag = &skb_shinfo(skb)->frags[i]; 132 133 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, 134 skb_frag_size(frag), 135 DMA_TO_DEVICE); 136 if (dma_mapping_error(&pdev->dev, dma_addr)) { 137 dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); 138 goto err_tx_map; 139 } 140 141 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); 142 } 143 144 return 0; 145 146 err_tx_map: 147 for (j = 0; j < i; j++) 148 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), 149 sges[j + 1].len, DMA_TO_DEVICE); 150 151 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 152 DMA_TO_DEVICE); 153 return -EFAULT; 154 } 155 156 /** 157 * tx_unmap_skb - unmap the dma address of the skb 158 * @nic_dev: nic device 159 * @skb: the skb 160 * @sges: the sges that are connected to the skb 161 **/ 162 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 163 struct hinic_sge *sges) 164 { 165 struct hinic_hwdev *hwdev = nic_dev->hwdev; 166 struct hinic_hwif *hwif = hwdev->hwif; 167 struct pci_dev *pdev = hwif->pdev; 168 int i; 169 170 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) 171 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), 172 sges[i + 1].len, DMA_TO_DEVICE); 173 174 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, 175 DMA_TO_DEVICE); 176 } 177 178 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 179 { 180 struct hinic_dev *nic_dev = netdev_priv(netdev); 181 struct netdev_queue *netdev_txq; 182 int nr_sges, err = NETDEV_TX_OK; 183 struct hinic_sq_wqe *sq_wqe; 184 unsigned int wqe_size; 185 struct hinic_txq *txq; 186 struct hinic_qp *qp; 187 u16 prod_idx; 188 189 txq = &nic_dev->txqs[skb->queue_mapping]; 190 qp = container_of(txq->sq, struct hinic_qp, sq); 191 192 if (skb->len < MIN_SKB_LEN) { 193 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { 194 netdev_err(netdev, "Failed to pad skb\n"); 195 goto update_error_stats; 196 } 197 198 skb->len = MIN_SKB_LEN; 199 } 200 201 nr_sges = skb_shinfo(skb)->nr_frags + 1; 202 if (nr_sges > txq->max_sges) { 203 netdev_err(netdev, "Too many Tx sges\n"); 204 goto skb_error; 205 } 206 207 err = tx_map_skb(nic_dev, skb, txq->sges); 208 if (err) 209 goto skb_error; 210 211 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); 212 213 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); 214 if (!sq_wqe) { 215 tx_unmap_skb(nic_dev, skb, txq->sges); 216 217 netif_stop_subqueue(netdev, qp->q_id); 218 219 u64_stats_update_begin(&txq->txq_stats.syncp); 220 txq->txq_stats.tx_busy++; 221 u64_stats_update_end(&txq->txq_stats.syncp); 222 err = NETDEV_TX_BUSY; 223 goto flush_skbs; 224 } 225 226 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); 227 228 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); 229 230 flush_skbs: 231 netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); 232 if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) 233 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); 234 235 return err; 236 237 skb_error: 238 dev_kfree_skb_any(skb); 239 240 update_error_stats: 241 u64_stats_update_begin(&txq->txq_stats.syncp); 242 txq->txq_stats.tx_dropped++; 243 u64_stats_update_end(&txq->txq_stats.syncp); 244 return err; 245 } 246 247 /** 248 * tx_free_skb - unmap and free skb 249 * @nic_dev: nic device 250 * @skb: the skb 251 * @sges: the sges that are connected to the skb 252 **/ 253 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, 254 struct hinic_sge *sges) 255 { 256 tx_unmap_skb(nic_dev, skb, sges); 257 258 dev_kfree_skb_any(skb); 259 } 260 261 /** 262 * free_all_rx_skbs - free all skbs in tx queue 263 * @txq: tx queue 264 **/ 265 static void free_all_tx_skbs(struct hinic_txq *txq) 266 { 267 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 268 struct hinic_sq *sq = txq->sq; 269 struct hinic_sq_wqe *sq_wqe; 270 unsigned int wqe_size; 271 struct sk_buff *skb; 272 int nr_sges; 273 u16 ci; 274 275 while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { 276 nr_sges = skb_shinfo(skb)->nr_frags + 1; 277 278 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 279 280 hinic_sq_put_wqe(sq, wqe_size); 281 282 tx_free_skb(nic_dev, skb, txq->free_sges); 283 } 284 } 285 286 /** 287 * free_tx_poll - free finished tx skbs in tx queue that connected to napi 288 * @napi: napi 289 * @budget: number of tx 290 * 291 * Return 0 - Success, negative - Failure 292 **/ 293 static int free_tx_poll(struct napi_struct *napi, int budget) 294 { 295 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); 296 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); 297 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 298 struct netdev_queue *netdev_txq; 299 struct hinic_sq *sq = txq->sq; 300 struct hinic_wq *wq = sq->wq; 301 struct hinic_sq_wqe *sq_wqe; 302 unsigned int wqe_size; 303 int nr_sges, pkts = 0; 304 struct sk_buff *skb; 305 u64 tx_bytes = 0; 306 u16 hw_ci, sw_ci; 307 308 do { 309 hw_ci = HW_CONS_IDX(sq) & wq->mask; 310 311 sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci); 312 if ((!sq_wqe) || 313 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) 314 break; 315 316 tx_bytes += skb->len; 317 pkts++; 318 319 nr_sges = skb_shinfo(skb)->nr_frags + 1; 320 321 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); 322 323 hinic_sq_put_wqe(sq, wqe_size); 324 325 tx_free_skb(nic_dev, skb, txq->free_sges); 326 } while (pkts < budget); 327 328 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && 329 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { 330 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); 331 332 __netif_tx_lock(netdev_txq, smp_processor_id()); 333 334 netif_wake_subqueue(nic_dev->netdev, qp->q_id); 335 336 __netif_tx_unlock(netdev_txq); 337 338 u64_stats_update_begin(&txq->txq_stats.syncp); 339 txq->txq_stats.tx_wake++; 340 u64_stats_update_end(&txq->txq_stats.syncp); 341 } 342 343 u64_stats_update_begin(&txq->txq_stats.syncp); 344 txq->txq_stats.bytes += tx_bytes; 345 txq->txq_stats.pkts += pkts; 346 u64_stats_update_end(&txq->txq_stats.syncp); 347 348 if (pkts < budget) { 349 napi_complete(napi); 350 enable_irq(sq->irq); 351 return pkts; 352 } 353 354 return budget; 355 } 356 357 static void tx_napi_add(struct hinic_txq *txq, int weight) 358 { 359 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); 360 napi_enable(&txq->napi); 361 } 362 363 static void tx_napi_del(struct hinic_txq *txq) 364 { 365 napi_disable(&txq->napi); 366 netif_napi_del(&txq->napi); 367 } 368 369 static irqreturn_t tx_irq(int irq, void *data) 370 { 371 struct hinic_txq *txq = data; 372 struct hinic_dev *nic_dev; 373 374 nic_dev = netdev_priv(txq->netdev); 375 376 /* Disable the interrupt until napi will be completed */ 377 disable_irq_nosync(txq->sq->irq); 378 379 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); 380 381 napi_schedule(&txq->napi); 382 return IRQ_HANDLED; 383 } 384 385 static int tx_request_irq(struct hinic_txq *txq) 386 { 387 struct hinic_dev *nic_dev = netdev_priv(txq->netdev); 388 struct hinic_hwdev *hwdev = nic_dev->hwdev; 389 struct hinic_hwif *hwif = hwdev->hwif; 390 struct pci_dev *pdev = hwif->pdev; 391 struct hinic_sq *sq = txq->sq; 392 int err; 393 394 tx_napi_add(txq, nic_dev->tx_weight); 395 396 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, 397 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, 398 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, 399 TX_IRQ_NO_RESEND_TIMER); 400 401 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); 402 if (err) { 403 dev_err(&pdev->dev, "Failed to request Tx irq\n"); 404 tx_napi_del(txq); 405 return err; 406 } 407 408 return 0; 409 } 410 411 static void tx_free_irq(struct hinic_txq *txq) 412 { 413 struct hinic_sq *sq = txq->sq; 414 415 free_irq(sq->irq, txq); 416 tx_napi_del(txq); 417 } 418 419 /** 420 * hinic_init_txq - Initialize the Tx Queue 421 * @txq: Logical Tx Queue 422 * @sq: Hardware Tx Queue to connect the Logical queue with 423 * @netdev: network device to connect the Logical queue with 424 * 425 * Return 0 - Success, negative - Failure 426 **/ 427 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, 428 struct net_device *netdev) 429 { 430 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); 431 struct hinic_dev *nic_dev = netdev_priv(netdev); 432 struct hinic_hwdev *hwdev = nic_dev->hwdev; 433 int err, irqname_len; 434 size_t sges_size; 435 436 txq->netdev = netdev; 437 txq->sq = sq; 438 439 txq_stats_init(txq); 440 441 txq->max_sges = HINIC_MAX_SQ_BUFDESCS; 442 443 sges_size = txq->max_sges * sizeof(*txq->sges); 444 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 445 if (!txq->sges) 446 return -ENOMEM; 447 448 sges_size = txq->max_sges * sizeof(*txq->free_sges); 449 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); 450 if (!txq->free_sges) { 451 err = -ENOMEM; 452 goto err_alloc_free_sges; 453 } 454 455 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; 456 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); 457 if (!txq->irq_name) { 458 err = -ENOMEM; 459 goto err_alloc_irqname; 460 } 461 462 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); 463 464 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, 465 CI_UPDATE_NO_COALESC); 466 if (err) 467 goto err_hw_ci; 468 469 err = tx_request_irq(txq); 470 if (err) { 471 netdev_err(netdev, "Failed to request Tx irq\n"); 472 goto err_req_tx_irq; 473 } 474 475 return 0; 476 477 err_req_tx_irq: 478 err_hw_ci: 479 devm_kfree(&netdev->dev, txq->irq_name); 480 481 err_alloc_irqname: 482 devm_kfree(&netdev->dev, txq->free_sges); 483 484 err_alloc_free_sges: 485 devm_kfree(&netdev->dev, txq->sges); 486 return err; 487 } 488 489 /** 490 * hinic_clean_txq - Clean the Tx Queue 491 * @txq: Logical Tx Queue 492 **/ 493 void hinic_clean_txq(struct hinic_txq *txq) 494 { 495 struct net_device *netdev = txq->netdev; 496 497 tx_free_irq(txq); 498 499 free_all_tx_skbs(txq); 500 501 devm_kfree(&netdev->dev, txq->irq_name); 502 devm_kfree(&netdev->dev, txq->free_sges); 503 devm_kfree(&netdev->dev, txq->sges); 504 } 505