1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/mlx4/cq.h> 35 #include <linux/mlx4/qp.h> 36 #include <linux/mlx4/cmd.h> 37 38 #include "mlx4_en.h" 39 40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) 41 { 42 return; 43 } 44 45 46 int mlx4_en_create_cq(struct mlx4_en_priv *priv, 47 struct mlx4_en_cq **pcq, 48 int entries, int ring, enum cq_type mode, 49 int node) 50 { 51 struct mlx4_en_dev *mdev = priv->mdev; 52 struct mlx4_en_cq *cq; 53 int err; 54 55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); 56 if (!cq) { 57 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 58 if (!cq) { 59 en_err(priv, "Failed to allocate CQ structure\n"); 60 return -ENOMEM; 61 } 62 } 63 64 cq->size = entries; 65 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; 66 67 cq->ring = ring; 68 cq->is_tx = mode; 69 cq->vector = mdev->dev->caps.num_comp_vectors; 70 71 /* Allocate HW buffers on provided NUMA node. 72 * dev->numa_node is used in mtt range allocation flow. 73 */ 74 set_dev_node(&mdev->dev->persist->pdev->dev, node); 75 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 76 cq->buf_size, 2 * PAGE_SIZE); 77 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); 78 if (err) 79 goto err_cq; 80 81 err = mlx4_en_map_buffer(&cq->wqres.buf); 82 if (err) 83 goto err_res; 84 85 cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; 86 *pcq = cq; 87 88 return 0; 89 90 err_res: 91 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 92 err_cq: 93 kfree(cq); 94 *pcq = NULL; 95 return err; 96 } 97 98 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 99 int cq_idx) 100 { 101 struct mlx4_en_dev *mdev = priv->mdev; 102 int err = 0; 103 int timestamp_en = 0; 104 bool assigned_eq = false; 105 106 cq->dev = mdev->pndev[priv->port]; 107 cq->mcq.set_ci_db = cq->wqres.db.db; 108 cq->mcq.arm_db = cq->wqres.db.db + 1; 109 *cq->mcq.set_ci_db = 0; 110 *cq->mcq.arm_db = 0; 111 memset(cq->buf, 0, cq->buf_size); 112 113 if (cq->is_tx == RX) { 114 if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, 115 cq->vector)) { 116 cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); 117 118 err = mlx4_assign_eq(mdev->dev, priv->port, 119 &cq->vector); 120 if (err) { 121 mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n", 122 cq->vector); 123 goto free_eq; 124 } 125 126 assigned_eq = true; 127 } 128 129 cq->irq_desc = 130 irq_to_desc(mlx4_eq_get_irq(mdev->dev, 131 cq->vector)); 132 } else { 133 /* For TX we use the same irq per 134 ring we assigned for the RX */ 135 struct mlx4_en_cq *rx_cq; 136 137 cq_idx = cq_idx % priv->rx_ring_num; 138 rx_cq = priv->rx_cq[cq_idx]; 139 cq->vector = rx_cq->vector; 140 } 141 142 if (!cq->is_tx) 143 cq->size = priv->rx_ring[cq->ring]->actual_size; 144 145 if ((cq->is_tx && priv->hwtstamp_config.tx_type) || 146 (!cq->is_tx && priv->hwtstamp_config.rx_filter)) 147 timestamp_en = 1; 148 149 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, 150 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, 151 cq->vector, 0, timestamp_en); 152 if (err) 153 goto free_eq; 154 155 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; 156 cq->mcq.event = mlx4_en_cq_event; 157 158 if (cq->is_tx) { 159 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, 160 NAPI_POLL_WEIGHT); 161 } else { 162 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 163 napi_hash_add(&cq->napi); 164 } 165 166 napi_enable(&cq->napi); 167 168 return 0; 169 170 free_eq: 171 if (assigned_eq) 172 mlx4_release_eq(mdev->dev, cq->vector); 173 cq->vector = mdev->dev->caps.num_comp_vectors; 174 return err; 175 } 176 177 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) 178 { 179 struct mlx4_en_dev *mdev = priv->mdev; 180 struct mlx4_en_cq *cq = *pcq; 181 182 mlx4_en_unmap_buffer(&cq->wqres.buf); 183 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 184 if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && 185 cq->is_tx == RX) 186 mlx4_release_eq(priv->mdev->dev, cq->vector); 187 cq->vector = 0; 188 cq->buf_size = 0; 189 cq->buf = NULL; 190 kfree(cq); 191 *pcq = NULL; 192 } 193 194 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 195 { 196 napi_disable(&cq->napi); 197 if (!cq->is_tx) { 198 napi_hash_del(&cq->napi); 199 synchronize_rcu(); 200 } 201 netif_napi_del(&cq->napi); 202 203 mlx4_cq_free(priv->mdev->dev, &cq->mcq); 204 } 205 206 /* Set rx cq moderation parameters */ 207 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 208 { 209 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, 210 cq->moder_cnt, cq->moder_time); 211 } 212 213 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 214 { 215 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, 216 &priv->mdev->uar_lock); 217 218 return 0; 219 } 220 221 222