1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. 8 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. 9 */ 10 11 #include "aq_vec.h" 12 #include "aq_nic.h" 13 #include "aq_ring.h" 14 #include "aq_hw.h" 15 16 #include <linux/netdevice.h> 17 18 struct aq_vec_s { 19 const struct aq_hw_ops *aq_hw_ops; 20 struct aq_hw_s *aq_hw; 21 struct aq_nic_s *aq_nic; 22 unsigned int tx_rings; 23 unsigned int rx_rings; 24 struct aq_ring_param_s aq_ring_param; 25 struct napi_struct napi; 26 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; 27 }; 28 29 #define AQ_VEC_TX_ID 0 30 #define AQ_VEC_RX_ID 1 31 32 static int aq_vec_poll(struct napi_struct *napi, int budget) 33 { 34 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 35 unsigned int sw_tail_old = 0U; 36 struct aq_ring_s *ring = NULL; 37 bool was_tx_cleaned = true; 38 unsigned int i = 0U; 39 int work_done = 0; 40 int err = 0; 41 42 if (!self) { 43 err = -EINVAL; 44 } else { 45 for (i = 0U, ring = self->ring[0]; 46 self->tx_rings > i; ++i, ring = self->ring[i]) { 47 if (self->aq_hw_ops->hw_ring_tx_head_update) { 48 err = self->aq_hw_ops->hw_ring_tx_head_update( 49 self->aq_hw, 50 &ring[AQ_VEC_TX_ID]); 51 if (err < 0) 52 goto err_exit; 53 } 54 55 if (ring[AQ_VEC_TX_ID].sw_head != 56 ring[AQ_VEC_TX_ID].hw_head) { 57 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 58 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 59 } 60 61 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 62 &ring[AQ_VEC_RX_ID]); 63 if (err < 0) 64 goto err_exit; 65 66 if (ring[AQ_VEC_RX_ID].sw_head != 67 ring[AQ_VEC_RX_ID].hw_head) { 68 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], 69 napi, 70 &work_done, 71 budget - work_done); 72 if (err < 0) 73 goto err_exit; 74 75 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; 76 77 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 78 if (err < 0) 79 goto err_exit; 80 81 err = self->aq_hw_ops->hw_ring_rx_fill( 82 self->aq_hw, 83 &ring[AQ_VEC_RX_ID], sw_tail_old); 84 if (err < 0) 85 goto err_exit; 86 } 87 } 88 89 if (!was_tx_cleaned) 90 work_done = budget; 91 92 if (work_done < budget) { 93 napi_complete_done(napi, work_done); 94 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 95 1U << self->aq_ring_param.vec_idx); 96 } 97 } 98 err_exit: 99 return work_done; 100 } 101 102 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, 103 struct aq_nic_cfg_s *aq_nic_cfg) 104 { 105 struct aq_vec_s *self = NULL; 106 struct aq_ring_s *ring = NULL; 107 unsigned int i = 0U; 108 int err = 0; 109 110 self = kzalloc(sizeof(*self), GFP_KERNEL); 111 if (!self) { 112 err = -ENOMEM; 113 goto err_exit; 114 } 115 116 self->aq_nic = aq_nic; 117 self->aq_ring_param.vec_idx = idx; 118 self->aq_ring_param.cpu = 119 idx + aq_nic_cfg->aq_rss.base_cpu_number; 120 121 cpumask_set_cpu(self->aq_ring_param.cpu, 122 &self->aq_ring_param.affinity_mask); 123 124 self->tx_rings = 0; 125 self->rx_rings = 0; 126 127 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, 128 aq_vec_poll, AQ_CFG_NAPI_WEIGHT); 129 130 for (i = 0; i < aq_nic_cfg->tcs; ++i) { 131 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, 132 self->tx_rings, 133 self->aq_ring_param.vec_idx); 134 135 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, 136 idx_ring, aq_nic_cfg); 137 if (!ring) { 138 err = -ENOMEM; 139 goto err_exit; 140 } 141 142 ++self->tx_rings; 143 144 aq_nic_set_tx_ring(aq_nic, idx_ring, ring); 145 146 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, 147 idx_ring, aq_nic_cfg); 148 if (!ring) { 149 err = -ENOMEM; 150 goto err_exit; 151 } 152 153 ++self->rx_rings; 154 } 155 156 err_exit: 157 if (err < 0) { 158 aq_vec_free(self); 159 self = NULL; 160 } 161 return self; 162 } 163 164 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, 165 struct aq_hw_s *aq_hw) 166 { 167 struct aq_ring_s *ring = NULL; 168 unsigned int i = 0U; 169 int err = 0; 170 171 self->aq_hw_ops = aq_hw_ops; 172 self->aq_hw = aq_hw; 173 174 for (i = 0U, ring = self->ring[0]; 175 self->tx_rings > i; ++i, ring = self->ring[i]) { 176 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 177 if (err < 0) 178 goto err_exit; 179 180 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, 181 &ring[AQ_VEC_TX_ID], 182 &self->aq_ring_param); 183 if (err < 0) 184 goto err_exit; 185 186 err = aq_ring_init(&ring[AQ_VEC_RX_ID]); 187 if (err < 0) 188 goto err_exit; 189 190 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, 191 &ring[AQ_VEC_RX_ID], 192 &self->aq_ring_param); 193 if (err < 0) 194 goto err_exit; 195 196 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 197 if (err < 0) 198 goto err_exit; 199 200 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, 201 &ring[AQ_VEC_RX_ID], 0U); 202 if (err < 0) 203 goto err_exit; 204 } 205 206 err_exit: 207 return err; 208 } 209 210 int aq_vec_start(struct aq_vec_s *self) 211 { 212 struct aq_ring_s *ring = NULL; 213 unsigned int i = 0U; 214 int err = 0; 215 216 for (i = 0U, ring = self->ring[0]; 217 self->tx_rings > i; ++i, ring = self->ring[i]) { 218 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, 219 &ring[AQ_VEC_TX_ID]); 220 if (err < 0) 221 goto err_exit; 222 223 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, 224 &ring[AQ_VEC_RX_ID]); 225 if (err < 0) 226 goto err_exit; 227 } 228 229 napi_enable(&self->napi); 230 231 err_exit: 232 return err; 233 } 234 235 void aq_vec_stop(struct aq_vec_s *self) 236 { 237 struct aq_ring_s *ring = NULL; 238 unsigned int i = 0U; 239 240 for (i = 0U, ring = self->ring[0]; 241 self->tx_rings > i; ++i, ring = self->ring[i]) { 242 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, 243 &ring[AQ_VEC_TX_ID]); 244 245 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, 246 &ring[AQ_VEC_RX_ID]); 247 } 248 249 napi_disable(&self->napi); 250 } 251 252 void aq_vec_deinit(struct aq_vec_s *self) 253 { 254 struct aq_ring_s *ring = NULL; 255 unsigned int i = 0U; 256 257 if (!self) 258 goto err_exit; 259 260 for (i = 0U, ring = self->ring[0]; 261 self->tx_rings > i; ++i, ring = self->ring[i]) { 262 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 263 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); 264 } 265 err_exit:; 266 } 267 268 void aq_vec_free(struct aq_vec_s *self) 269 { 270 struct aq_ring_s *ring = NULL; 271 unsigned int i = 0U; 272 273 if (!self) 274 goto err_exit; 275 276 for (i = 0U, ring = self->ring[0]; 277 self->tx_rings > i; ++i, ring = self->ring[i]) { 278 aq_ring_free(&ring[AQ_VEC_TX_ID]); 279 aq_ring_free(&ring[AQ_VEC_RX_ID]); 280 } 281 282 netif_napi_del(&self->napi); 283 284 kfree(self); 285 286 err_exit:; 287 } 288 289 irqreturn_t aq_vec_isr(int irq, void *private) 290 { 291 struct aq_vec_s *self = private; 292 int err = 0; 293 294 if (!self) { 295 err = -EINVAL; 296 goto err_exit; 297 } 298 napi_schedule(&self->napi); 299 300 err_exit: 301 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 302 } 303 304 irqreturn_t aq_vec_isr_legacy(int irq, void *private) 305 { 306 struct aq_vec_s *self = private; 307 u64 irq_mask = 0U; 308 irqreturn_t err = 0; 309 310 if (!self) { 311 err = -EINVAL; 312 goto err_exit; 313 } 314 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); 315 if (err < 0) 316 goto err_exit; 317 318 if (irq_mask) { 319 self->aq_hw_ops->hw_irq_disable(self->aq_hw, 320 1U << self->aq_ring_param.vec_idx); 321 napi_schedule(&self->napi); 322 } else { 323 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); 324 err = IRQ_NONE; 325 } 326 327 err_exit: 328 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 329 } 330 331 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) 332 { 333 return &self->aq_ring_param.affinity_mask; 334 } 335 336 void aq_vec_add_stats(struct aq_vec_s *self, 337 struct aq_ring_stats_rx_s *stats_rx, 338 struct aq_ring_stats_tx_s *stats_tx) 339 { 340 struct aq_ring_s *ring = NULL; 341 unsigned int r = 0U; 342 343 for (r = 0U, ring = self->ring[0]; 344 self->tx_rings > r; ++r, ring = self->ring[r]) { 345 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; 346 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; 347 348 stats_rx->packets += rx->packets; 349 stats_rx->bytes += rx->bytes; 350 stats_rx->errors += rx->errors; 351 stats_rx->jumbo_packets += rx->jumbo_packets; 352 stats_rx->lro_packets += rx->lro_packets; 353 stats_rx->pg_losts += rx->pg_losts; 354 stats_rx->pg_flips += rx->pg_flips; 355 stats_rx->pg_reuses += rx->pg_reuses; 356 357 stats_tx->packets += tx->packets; 358 stats_tx->bytes += tx->bytes; 359 stats_tx->errors += tx->errors; 360 stats_tx->queue_restarts += tx->queue_restarts; 361 } 362 } 363 364 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) 365 { 366 unsigned int count = 0U; 367 struct aq_ring_stats_rx_s stats_rx; 368 struct aq_ring_stats_tx_s stats_tx; 369 370 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 371 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 372 aq_vec_add_stats(self, &stats_rx, &stats_tx); 373 374 /* This data should mimic aq_ethtool_queue_stat_names structure 375 */ 376 data[count] += stats_rx.packets; 377 data[++count] += stats_tx.packets; 378 data[++count] += stats_tx.queue_restarts; 379 data[++count] += stats_rx.jumbo_packets; 380 data[++count] += stats_rx.lro_packets; 381 data[++count] += stats_rx.errors; 382 383 if (p_count) 384 *p_count = ++count; 385 386 return 0; 387 } 388