1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. 8 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. 9 */ 10 11 #include "aq_vec.h" 12 #include "aq_nic.h" 13 #include "aq_ring.h" 14 #include "aq_hw.h" 15 16 #include <linux/netdevice.h> 17 18 struct aq_vec_s { 19 const struct aq_hw_ops *aq_hw_ops; 20 struct aq_hw_s *aq_hw; 21 struct aq_nic_s *aq_nic; 22 unsigned int tx_rings; 23 unsigned int rx_rings; 24 struct aq_ring_param_s aq_ring_param; 25 struct napi_struct napi; 26 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; 27 }; 28 29 #define AQ_VEC_TX_ID 0 30 #define AQ_VEC_RX_ID 1 31 32 static int aq_vec_poll(struct napi_struct *napi, int budget) 33 { 34 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 35 unsigned int sw_tail_old = 0U; 36 struct aq_ring_s *ring = NULL; 37 bool was_tx_cleaned = true; 38 unsigned int i = 0U; 39 int work_done = 0; 40 int err = 0; 41 42 if (!self) { 43 err = -EINVAL; 44 } else { 45 for (i = 0U, ring = self->ring[0]; 46 self->tx_rings > i; ++i, ring = self->ring[i]) { 47 if (self->aq_hw_ops->hw_ring_tx_head_update) { 48 err = self->aq_hw_ops->hw_ring_tx_head_update( 49 self->aq_hw, 50 &ring[AQ_VEC_TX_ID]); 51 if (err < 0) 52 goto err_exit; 53 } 54 55 if (ring[AQ_VEC_TX_ID].sw_head != 56 ring[AQ_VEC_TX_ID].hw_head) { 57 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 58 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 59 } 60 61 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 62 &ring[AQ_VEC_RX_ID]); 63 if (err < 0) 64 goto err_exit; 65 66 if (ring[AQ_VEC_RX_ID].sw_head != 67 ring[AQ_VEC_RX_ID].hw_head) { 68 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], 69 napi, 70 &work_done, 71 budget - work_done); 72 if (err < 0) 73 goto err_exit; 74 75 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; 76 77 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 78 if (err < 0) 79 goto err_exit; 80 81 err = self->aq_hw_ops->hw_ring_rx_fill( 82 self->aq_hw, 83 &ring[AQ_VEC_RX_ID], sw_tail_old); 84 if (err < 0) 85 goto err_exit; 86 } 87 } 88 89 err_exit: 90 if (!was_tx_cleaned) 91 work_done = budget; 92 93 if (work_done < budget) { 94 napi_complete_done(napi, work_done); 95 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 96 1U << self->aq_ring_param.vec_idx); 97 } 98 } 99 100 return work_done; 101 } 102 103 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, 104 struct aq_nic_cfg_s *aq_nic_cfg) 105 { 106 struct aq_vec_s *self = NULL; 107 108 self = kzalloc(sizeof(*self), GFP_KERNEL); 109 if (!self) 110 goto err_exit; 111 112 self->aq_nic = aq_nic; 113 self->aq_ring_param.vec_idx = idx; 114 self->aq_ring_param.cpu = 115 idx + aq_nic_cfg->aq_rss.base_cpu_number; 116 117 cpumask_set_cpu(self->aq_ring_param.cpu, 118 &self->aq_ring_param.affinity_mask); 119 120 self->tx_rings = 0; 121 self->rx_rings = 0; 122 123 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, 124 aq_vec_poll, AQ_CFG_NAPI_WEIGHT); 125 126 err_exit: 127 return self; 128 } 129 130 int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, 131 unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg) 132 { 133 struct aq_ring_s *ring = NULL; 134 unsigned int i = 0U; 135 int err = 0; 136 137 for (i = 0; i < aq_nic_cfg->tcs; ++i) { 138 const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg, 139 i, idx); 140 141 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, 142 idx_ring, aq_nic_cfg); 143 if (!ring) { 144 err = -ENOMEM; 145 goto err_exit; 146 } 147 148 ++self->tx_rings; 149 150 aq_nic_set_tx_ring(aq_nic, idx_ring, ring); 151 152 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, 153 idx_ring, aq_nic_cfg); 154 if (!ring) { 155 err = -ENOMEM; 156 goto err_exit; 157 } 158 159 ++self->rx_rings; 160 } 161 162 err_exit: 163 if (err < 0) { 164 aq_vec_ring_free(self); 165 self = NULL; 166 } 167 168 return err; 169 } 170 171 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, 172 struct aq_hw_s *aq_hw) 173 { 174 struct aq_ring_s *ring = NULL; 175 unsigned int i = 0U; 176 int err = 0; 177 178 self->aq_hw_ops = aq_hw_ops; 179 self->aq_hw = aq_hw; 180 181 for (i = 0U, ring = self->ring[0]; 182 self->tx_rings > i; ++i, ring = self->ring[i]) { 183 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 184 if (err < 0) 185 goto err_exit; 186 187 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, 188 &ring[AQ_VEC_TX_ID], 189 &self->aq_ring_param); 190 if (err < 0) 191 goto err_exit; 192 193 err = aq_ring_init(&ring[AQ_VEC_RX_ID]); 194 if (err < 0) 195 goto err_exit; 196 197 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, 198 &ring[AQ_VEC_RX_ID], 199 &self->aq_ring_param); 200 if (err < 0) 201 goto err_exit; 202 203 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 204 if (err < 0) 205 goto err_exit; 206 207 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, 208 &ring[AQ_VEC_RX_ID], 0U); 209 if (err < 0) 210 goto err_exit; 211 } 212 213 err_exit: 214 return err; 215 } 216 217 int aq_vec_start(struct aq_vec_s *self) 218 { 219 struct aq_ring_s *ring = NULL; 220 unsigned int i = 0U; 221 int err = 0; 222 223 for (i = 0U, ring = self->ring[0]; 224 self->tx_rings > i; ++i, ring = self->ring[i]) { 225 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, 226 &ring[AQ_VEC_TX_ID]); 227 if (err < 0) 228 goto err_exit; 229 230 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, 231 &ring[AQ_VEC_RX_ID]); 232 if (err < 0) 233 goto err_exit; 234 } 235 236 napi_enable(&self->napi); 237 238 err_exit: 239 return err; 240 } 241 242 void aq_vec_stop(struct aq_vec_s *self) 243 { 244 struct aq_ring_s *ring = NULL; 245 unsigned int i = 0U; 246 247 for (i = 0U, ring = self->ring[0]; 248 self->tx_rings > i; ++i, ring = self->ring[i]) { 249 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, 250 &ring[AQ_VEC_TX_ID]); 251 252 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, 253 &ring[AQ_VEC_RX_ID]); 254 } 255 256 napi_disable(&self->napi); 257 } 258 259 void aq_vec_deinit(struct aq_vec_s *self) 260 { 261 struct aq_ring_s *ring = NULL; 262 unsigned int i = 0U; 263 264 if (!self) 265 goto err_exit; 266 267 for (i = 0U, ring = self->ring[0]; 268 self->tx_rings > i; ++i, ring = self->ring[i]) { 269 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 270 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); 271 } 272 273 err_exit:; 274 } 275 276 void aq_vec_free(struct aq_vec_s *self) 277 { 278 if (!self) 279 goto err_exit; 280 281 netif_napi_del(&self->napi); 282 283 kfree(self); 284 285 err_exit:; 286 } 287 288 void aq_vec_ring_free(struct aq_vec_s *self) 289 { 290 struct aq_ring_s *ring = NULL; 291 unsigned int i = 0U; 292 293 if (!self) 294 goto err_exit; 295 296 for (i = 0U, ring = self->ring[0]; 297 self->tx_rings > i; ++i, ring = self->ring[i]) { 298 aq_ring_free(&ring[AQ_VEC_TX_ID]); 299 if (i < self->rx_rings) 300 aq_ring_free(&ring[AQ_VEC_RX_ID]); 301 } 302 303 self->tx_rings = 0; 304 self->rx_rings = 0; 305 err_exit:; 306 } 307 308 irqreturn_t aq_vec_isr(int irq, void *private) 309 { 310 struct aq_vec_s *self = private; 311 int err = 0; 312 313 if (!self) { 314 err = -EINVAL; 315 goto err_exit; 316 } 317 napi_schedule(&self->napi); 318 319 err_exit: 320 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 321 } 322 323 irqreturn_t aq_vec_isr_legacy(int irq, void *private) 324 { 325 struct aq_vec_s *self = private; 326 u64 irq_mask = 0U; 327 int err; 328 329 if (!self) 330 return IRQ_NONE; 331 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); 332 if (err < 0) 333 return IRQ_NONE; 334 335 if (irq_mask) { 336 self->aq_hw_ops->hw_irq_disable(self->aq_hw, 337 1U << self->aq_ring_param.vec_idx); 338 napi_schedule(&self->napi); 339 } else { 340 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); 341 return IRQ_NONE; 342 } 343 344 return IRQ_HANDLED; 345 } 346 347 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) 348 { 349 return &self->aq_ring_param.affinity_mask; 350 } 351 352 static void aq_vec_add_stats(struct aq_vec_s *self, 353 const unsigned int tc, 354 struct aq_ring_stats_rx_s *stats_rx, 355 struct aq_ring_stats_tx_s *stats_tx) 356 { 357 struct aq_ring_s *ring = self->ring[tc]; 358 359 if (tc < self->rx_rings) { 360 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; 361 362 stats_rx->packets += rx->packets; 363 stats_rx->bytes += rx->bytes; 364 stats_rx->errors += rx->errors; 365 stats_rx->jumbo_packets += rx->jumbo_packets; 366 stats_rx->lro_packets += rx->lro_packets; 367 stats_rx->pg_losts += rx->pg_losts; 368 stats_rx->pg_flips += rx->pg_flips; 369 stats_rx->pg_reuses += rx->pg_reuses; 370 } 371 372 if (tc < self->tx_rings) { 373 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; 374 375 stats_tx->packets += tx->packets; 376 stats_tx->bytes += tx->bytes; 377 stats_tx->errors += tx->errors; 378 stats_tx->queue_restarts += tx->queue_restarts; 379 } 380 } 381 382 int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, 383 unsigned int *p_count) 384 { 385 struct aq_ring_stats_rx_s stats_rx; 386 struct aq_ring_stats_tx_s stats_tx; 387 unsigned int count = 0U; 388 389 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 390 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 391 392 aq_vec_add_stats(self, tc, &stats_rx, &stats_tx); 393 394 /* This data should mimic aq_ethtool_queue_stat_names structure 395 */ 396 data[count] += stats_rx.packets; 397 data[++count] += stats_tx.packets; 398 data[++count] += stats_tx.queue_restarts; 399 data[++count] += stats_rx.jumbo_packets; 400 data[++count] += stats_rx.lro_packets; 401 data[++count] += stats_rx.errors; 402 403 if (p_count) 404 *p_count = ++count; 405 406 return 0; 407 } 408