15a2cc190SJeff Kirsher /* 25a2cc190SJeff Kirsher * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 35a2cc190SJeff Kirsher * 45a2cc190SJeff Kirsher * This software is available to you under a choice of one of two 55a2cc190SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 65a2cc190SJeff Kirsher * General Public License (GPL) Version 2, available from the file 75a2cc190SJeff Kirsher * COPYING in the main directory of this source tree, or the 85a2cc190SJeff Kirsher * OpenIB.org BSD license below: 95a2cc190SJeff Kirsher * 105a2cc190SJeff Kirsher * Redistribution and use in source and binary forms, with or 115a2cc190SJeff Kirsher * without modification, are permitted provided that the following 125a2cc190SJeff Kirsher * conditions are met: 135a2cc190SJeff Kirsher * 145a2cc190SJeff Kirsher * - Redistributions of source code must retain the above 155a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 165a2cc190SJeff Kirsher * disclaimer. 175a2cc190SJeff Kirsher * 185a2cc190SJeff Kirsher * - Redistributions in binary form must reproduce the above 195a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 205a2cc190SJeff Kirsher * disclaimer in the documentation and/or other materials 215a2cc190SJeff Kirsher * provided with the distribution. 225a2cc190SJeff Kirsher * 235a2cc190SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 245a2cc190SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 255a2cc190SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 265a2cc190SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 275a2cc190SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 285a2cc190SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 295a2cc190SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 305a2cc190SJeff Kirsher * SOFTWARE. 315a2cc190SJeff Kirsher * 325a2cc190SJeff Kirsher */ 335a2cc190SJeff Kirsher 345a2cc190SJeff Kirsher #include <linux/mlx4/cq.h> 355a2cc190SJeff Kirsher #include <linux/mlx4/qp.h> 365a2cc190SJeff Kirsher #include <linux/mlx4/cmd.h> 375a2cc190SJeff Kirsher 385a2cc190SJeff Kirsher #include "mlx4_en.h" 395a2cc190SJeff Kirsher 405a2cc190SJeff Kirsher static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) 415a2cc190SJeff Kirsher { 425a2cc190SJeff Kirsher return; 435a2cc190SJeff Kirsher } 445a2cc190SJeff Kirsher 455a2cc190SJeff Kirsher 465a2cc190SJeff Kirsher int mlx4_en_create_cq(struct mlx4_en_priv *priv, 4741d942d5SEugenia Emantayev struct mlx4_en_cq **pcq, 48163561a4SEugenia Emantayev int entries, int ring, enum cq_type mode, 49163561a4SEugenia Emantayev int node) 505a2cc190SJeff Kirsher { 515a2cc190SJeff Kirsher struct mlx4_en_dev *mdev = priv->mdev; 5241d942d5SEugenia Emantayev struct mlx4_en_cq *cq; 535a2cc190SJeff Kirsher int err; 545a2cc190SJeff Kirsher 55163561a4SEugenia Emantayev cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); 56163561a4SEugenia Emantayev if (!cq) { 5741d942d5SEugenia Emantayev cq = kzalloc(sizeof(*cq), GFP_KERNEL); 5841d942d5SEugenia Emantayev if (!cq) { 5941d942d5SEugenia Emantayev en_err(priv, "Failed to allocate CQ structure\n"); 6041d942d5SEugenia Emantayev return -ENOMEM; 6141d942d5SEugenia Emantayev } 62163561a4SEugenia Emantayev } 6341d942d5SEugenia Emantayev 645a2cc190SJeff Kirsher cq->size = entries; 6508ff3235SOr Gerlitz cq->buf_size = cq->size * mdev->dev->caps.cqe_size; 665a2cc190SJeff Kirsher 675a2cc190SJeff Kirsher cq->ring = ring; 685a2cc190SJeff Kirsher cq->is_tx = mode; 695a2cc190SJeff Kirsher 70163561a4SEugenia Emantayev /* Allocate HW buffers on provided NUMA node. 71163561a4SEugenia Emantayev * dev->numa_node is used in mtt range allocation flow. 72163561a4SEugenia Emantayev */ 73163561a4SEugenia Emantayev set_dev_node(&mdev->dev->pdev->dev, node); 745a2cc190SJeff Kirsher err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 755a2cc190SJeff Kirsher cq->buf_size, 2 * PAGE_SIZE); 76163561a4SEugenia Emantayev set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 775a2cc190SJeff Kirsher if (err) 7841d942d5SEugenia Emantayev goto err_cq; 795a2cc190SJeff Kirsher 805a2cc190SJeff Kirsher err = mlx4_en_map_buffer(&cq->wqres.buf); 815a2cc190SJeff Kirsher if (err) 8241d942d5SEugenia Emantayev goto err_res; 835a2cc190SJeff Kirsher 8441d942d5SEugenia Emantayev cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; 8541d942d5SEugenia Emantayev *pcq = cq; 8641d942d5SEugenia Emantayev 8741d942d5SEugenia Emantayev return 0; 8841d942d5SEugenia Emantayev 8941d942d5SEugenia Emantayev err_res: 9041d942d5SEugenia Emantayev mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 9141d942d5SEugenia Emantayev err_cq: 9241d942d5SEugenia Emantayev kfree(cq); 9341d942d5SEugenia Emantayev *pcq = NULL; 945a2cc190SJeff Kirsher return err; 955a2cc190SJeff Kirsher } 965a2cc190SJeff Kirsher 9776532d0cSAlexander Guller int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 9876532d0cSAlexander Guller int cq_idx) 995a2cc190SJeff Kirsher { 1005a2cc190SJeff Kirsher struct mlx4_en_dev *mdev = priv->mdev; 1015a2cc190SJeff Kirsher int err = 0; 1025a2cc190SJeff Kirsher char name[25]; 103ec693d47SAmir Vadai int timestamp_en = 0; 1041eb8c695SAmir Vadai struct cpu_rmap *rmap = 1051eb8c695SAmir Vadai #ifdef CONFIG_RFS_ACCEL 1061eb8c695SAmir Vadai priv->dev->rx_cpu_rmap; 1071eb8c695SAmir Vadai #else 1081eb8c695SAmir Vadai NULL; 1091eb8c695SAmir Vadai #endif 1105a2cc190SJeff Kirsher 1115a2cc190SJeff Kirsher cq->dev = mdev->pndev[priv->port]; 1125a2cc190SJeff Kirsher cq->mcq.set_ci_db = cq->wqres.db.db; 1135a2cc190SJeff Kirsher cq->mcq.arm_db = cq->wqres.db.db + 1; 1145a2cc190SJeff Kirsher *cq->mcq.set_ci_db = 0; 1155a2cc190SJeff Kirsher *cq->mcq.arm_db = 0; 1165a2cc190SJeff Kirsher memset(cq->buf, 0, cq->buf_size); 1175a2cc190SJeff Kirsher 1185a2cc190SJeff Kirsher if (cq->is_tx == RX) { 1195a2cc190SJeff Kirsher if (mdev->dev->caps.comp_pool) { 1205a2cc190SJeff Kirsher if (!cq->vector) { 12176532d0cSAlexander Guller sprintf(name, "%s-%d", priv->dev->name, 12276532d0cSAlexander Guller cq->ring); 12376532d0cSAlexander Guller /* Set IRQ for specific name (per ring) */ 1241eb8c695SAmir Vadai if (mlx4_assign_eq(mdev->dev, name, rmap, 125d9236c3fSAmir Vadai &cq->vector)) { 12676532d0cSAlexander Guller cq->vector = (cq->ring + 1 + priv->port) 12776532d0cSAlexander Guller % mdev->dev->caps.num_comp_vectors; 1281a91de28SJoe Perches mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", 12976532d0cSAlexander Guller name); 1305a2cc190SJeff Kirsher } 1315a2cc190SJeff Kirsher } 1325a2cc190SJeff Kirsher } else { 1335a2cc190SJeff Kirsher cq->vector = (cq->ring + 1 + priv->port) % 1345a2cc190SJeff Kirsher mdev->dev->caps.num_comp_vectors; 1355a2cc190SJeff Kirsher } 1365a2cc190SJeff Kirsher } else { 13776532d0cSAlexander Guller /* For TX we use the same irq per 13876532d0cSAlexander Guller ring we assigned for the RX */ 13976532d0cSAlexander Guller struct mlx4_en_cq *rx_cq; 14076532d0cSAlexander Guller 14176532d0cSAlexander Guller cq_idx = cq_idx % priv->rx_ring_num; 14241d942d5SEugenia Emantayev rx_cq = priv->rx_cq[cq_idx]; 14376532d0cSAlexander Guller cq->vector = rx_cq->vector; 1445a2cc190SJeff Kirsher } 1455a2cc190SJeff Kirsher 1465a2cc190SJeff Kirsher if (!cq->is_tx) 14741d942d5SEugenia Emantayev cq->size = priv->rx_ring[cq->ring]->actual_size; 1485a2cc190SJeff Kirsher 149ec693d47SAmir Vadai if ((cq->is_tx && priv->hwtstamp_config.tx_type) || 150ec693d47SAmir Vadai (!cq->is_tx && priv->hwtstamp_config.rx_filter)) 151ec693d47SAmir Vadai timestamp_en = 1; 152ec693d47SAmir Vadai 153ec693d47SAmir Vadai err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, 154ec693d47SAmir Vadai &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, 155ec693d47SAmir Vadai cq->vector, 0, timestamp_en); 1565a2cc190SJeff Kirsher if (err) 1575a2cc190SJeff Kirsher return err; 1585a2cc190SJeff Kirsher 1595a2cc190SJeff Kirsher cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; 1605a2cc190SJeff Kirsher cq->mcq.event = mlx4_en_cq_event; 1615a2cc190SJeff Kirsher 1620276a330SEugenia Emantayev if (cq->is_tx) { 1630276a330SEugenia Emantayev netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, 1640276a330SEugenia Emantayev NAPI_POLL_WEIGHT); 1650276a330SEugenia Emantayev } else { 1665a2cc190SJeff Kirsher netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 1679e77a2b8SAmir Vadai napi_hash_add(&cq->napi); 1685a2cc190SJeff Kirsher } 1695a2cc190SJeff Kirsher 1700276a330SEugenia Emantayev napi_enable(&cq->napi); 1710276a330SEugenia Emantayev 1725a2cc190SJeff Kirsher return 0; 1735a2cc190SJeff Kirsher } 1745a2cc190SJeff Kirsher 17541d942d5SEugenia Emantayev void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) 1765a2cc190SJeff Kirsher { 1775a2cc190SJeff Kirsher struct mlx4_en_dev *mdev = priv->mdev; 17841d942d5SEugenia Emantayev struct mlx4_en_cq *cq = *pcq; 1795a2cc190SJeff Kirsher 1805a2cc190SJeff Kirsher mlx4_en_unmap_buffer(&cq->wqres.buf); 1815a2cc190SJeff Kirsher mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 182fe0af03cSAlexander Guller if (priv->mdev->dev->caps.comp_pool && cq->vector) 1835a2cc190SJeff Kirsher mlx4_release_eq(priv->mdev->dev, cq->vector); 184cd3109d2SYevgeny Petrilin cq->vector = 0; 1855a2cc190SJeff Kirsher cq->buf_size = 0; 1865a2cc190SJeff Kirsher cq->buf = NULL; 18741d942d5SEugenia Emantayev kfree(cq); 18841d942d5SEugenia Emantayev *pcq = NULL; 1895a2cc190SJeff Kirsher } 1905a2cc190SJeff Kirsher 1915a2cc190SJeff Kirsher void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 1925a2cc190SJeff Kirsher { 1935a2cc190SJeff Kirsher napi_disable(&cq->napi); 1940276a330SEugenia Emantayev if (!cq->is_tx) { 1959e77a2b8SAmir Vadai napi_hash_del(&cq->napi); 1969e77a2b8SAmir Vadai synchronize_rcu(); 1975a2cc190SJeff Kirsher } 1980276a330SEugenia Emantayev netif_napi_del(&cq->napi); 1995a2cc190SJeff Kirsher 200e22979d9SYevgeny Petrilin mlx4_cq_free(priv->mdev->dev, &cq->mcq); 2015a2cc190SJeff Kirsher } 2025a2cc190SJeff Kirsher 2035a2cc190SJeff Kirsher /* Set rx cq moderation parameters */ 2045a2cc190SJeff Kirsher int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 2055a2cc190SJeff Kirsher { 2065a2cc190SJeff Kirsher return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, 2075a2cc190SJeff Kirsher cq->moder_cnt, cq->moder_time); 2085a2cc190SJeff Kirsher } 2095a2cc190SJeff Kirsher 2105a2cc190SJeff Kirsher int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 2115a2cc190SJeff Kirsher { 2125a2cc190SJeff Kirsher mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, 2135a2cc190SJeff Kirsher &priv->mdev->uar_lock); 2145a2cc190SJeff Kirsher 2155a2cc190SJeff Kirsher return 0; 2165a2cc190SJeff Kirsher } 2175a2cc190SJeff Kirsher 2185a2cc190SJeff Kirsher 219