1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * aQuantia Corporation Network Driver 4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 5 */ 6 7 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. 8 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. 9 */ 10 11 #include "aq_vec.h" 12 #include "aq_nic.h" 13 #include "aq_ring.h" 14 #include "aq_hw.h" 15 16 #include <linux/netdevice.h> 17 18 struct aq_vec_s { 19 const struct aq_hw_ops *aq_hw_ops; 20 struct aq_hw_s *aq_hw; 21 struct aq_nic_s *aq_nic; 22 unsigned int tx_rings; 23 unsigned int rx_rings; 24 struct aq_ring_param_s aq_ring_param; 25 struct napi_struct napi; 26 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; 27 }; 28 29 #define AQ_VEC_TX_ID 0 30 #define AQ_VEC_RX_ID 1 31 32 static int aq_vec_poll(struct napi_struct *napi, int budget) 33 { 34 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 35 unsigned int sw_tail_old = 0U; 36 struct aq_ring_s *ring = NULL; 37 bool was_tx_cleaned = true; 38 unsigned int i = 0U; 39 int work_done = 0; 40 int err = 0; 41 42 if (!self) { 43 err = -EINVAL; 44 } else { 45 for (i = 0U, ring = self->ring[0]; 46 self->tx_rings > i; ++i, ring = self->ring[i]) { 47 if (self->aq_hw_ops->hw_ring_tx_head_update) { 48 err = self->aq_hw_ops->hw_ring_tx_head_update( 49 self->aq_hw, 50 &ring[AQ_VEC_TX_ID]); 51 if (err < 0) 52 goto err_exit; 53 } 54 55 if (ring[AQ_VEC_TX_ID].sw_head != 56 ring[AQ_VEC_TX_ID].hw_head) { 57 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 58 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 59 } 60 61 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 62 &ring[AQ_VEC_RX_ID]); 63 if (err < 0) 64 goto err_exit; 65 66 if (ring[AQ_VEC_RX_ID].sw_head != 67 ring[AQ_VEC_RX_ID].hw_head) { 68 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], 69 napi, 70 &work_done, 71 budget - work_done); 72 if (err < 0) 73 goto err_exit; 74 75 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; 76 77 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 78 if (err < 0) 79 goto err_exit; 80 81 err = self->aq_hw_ops->hw_ring_rx_fill( 82 self->aq_hw, 83 &ring[AQ_VEC_RX_ID], sw_tail_old); 84 if (err < 0) 85 goto err_exit; 86 } 87 } 88 89 err_exit: 90 if (!was_tx_cleaned) 91 work_done = budget; 92 93 if (work_done < budget) { 94 napi_complete_done(napi, work_done); 95 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 96 1U << self->aq_ring_param.vec_idx); 97 } 98 } 99 100 return work_done; 101 } 102 103 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, 104 struct aq_nic_cfg_s *aq_nic_cfg) 105 { 106 struct aq_vec_s *self = NULL; 107 struct aq_ring_s *ring = NULL; 108 unsigned int i = 0U; 109 int err = 0; 110 111 self = kzalloc(sizeof(*self), GFP_KERNEL); 112 if (!self) { 113 err = -ENOMEM; 114 goto err_exit; 115 } 116 117 self->aq_nic = aq_nic; 118 self->aq_ring_param.vec_idx = idx; 119 self->aq_ring_param.cpu = 120 idx + aq_nic_cfg->aq_rss.base_cpu_number; 121 122 cpumask_set_cpu(self->aq_ring_param.cpu, 123 &self->aq_ring_param.affinity_mask); 124 125 self->tx_rings = 0; 126 self->rx_rings = 0; 127 128 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, 129 aq_vec_poll, AQ_CFG_NAPI_WEIGHT); 130 131 for (i = 0; i < aq_nic_cfg->tcs; ++i) { 132 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, 133 self->tx_rings, 134 self->aq_ring_param.vec_idx); 135 136 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, 137 idx_ring, aq_nic_cfg); 138 if (!ring) { 139 err = -ENOMEM; 140 goto err_exit; 141 } 142 143 ++self->tx_rings; 144 145 aq_nic_set_tx_ring(aq_nic, idx_ring, ring); 146 147 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, 148 idx_ring, aq_nic_cfg); 149 if (!ring) { 150 err = -ENOMEM; 151 goto err_exit; 152 } 153 154 ++self->rx_rings; 155 } 156 157 err_exit: 158 if (err < 0) { 159 aq_vec_free(self); 160 self = NULL; 161 } 162 return self; 163 } 164 165 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, 166 struct aq_hw_s *aq_hw) 167 { 168 struct aq_ring_s *ring = NULL; 169 unsigned int i = 0U; 170 int err = 0; 171 172 self->aq_hw_ops = aq_hw_ops; 173 self->aq_hw = aq_hw; 174 175 for (i = 0U, ring = self->ring[0]; 176 self->tx_rings > i; ++i, ring = self->ring[i]) { 177 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 178 if (err < 0) 179 goto err_exit; 180 181 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, 182 &ring[AQ_VEC_TX_ID], 183 &self->aq_ring_param); 184 if (err < 0) 185 goto err_exit; 186 187 err = aq_ring_init(&ring[AQ_VEC_RX_ID]); 188 if (err < 0) 189 goto err_exit; 190 191 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, 192 &ring[AQ_VEC_RX_ID], 193 &self->aq_ring_param); 194 if (err < 0) 195 goto err_exit; 196 197 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 198 if (err < 0) 199 goto err_exit; 200 201 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, 202 &ring[AQ_VEC_RX_ID], 0U); 203 if (err < 0) 204 goto err_exit; 205 } 206 207 err_exit: 208 return err; 209 } 210 211 int aq_vec_start(struct aq_vec_s *self) 212 { 213 struct aq_ring_s *ring = NULL; 214 unsigned int i = 0U; 215 int err = 0; 216 217 for (i = 0U, ring = self->ring[0]; 218 self->tx_rings > i; ++i, ring = self->ring[i]) { 219 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, 220 &ring[AQ_VEC_TX_ID]); 221 if (err < 0) 222 goto err_exit; 223 224 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, 225 &ring[AQ_VEC_RX_ID]); 226 if (err < 0) 227 goto err_exit; 228 } 229 230 napi_enable(&self->napi); 231 232 err_exit: 233 return err; 234 } 235 236 void aq_vec_stop(struct aq_vec_s *self) 237 { 238 struct aq_ring_s *ring = NULL; 239 unsigned int i = 0U; 240 241 for (i = 0U, ring = self->ring[0]; 242 self->tx_rings > i; ++i, ring = self->ring[i]) { 243 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, 244 &ring[AQ_VEC_TX_ID]); 245 246 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, 247 &ring[AQ_VEC_RX_ID]); 248 } 249 250 napi_disable(&self->napi); 251 } 252 253 void aq_vec_deinit(struct aq_vec_s *self) 254 { 255 struct aq_ring_s *ring = NULL; 256 unsigned int i = 0U; 257 258 if (!self) 259 goto err_exit; 260 261 for (i = 0U, ring = self->ring[0]; 262 self->tx_rings > i; ++i, ring = self->ring[i]) { 263 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 264 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); 265 } 266 err_exit:; 267 } 268 269 void aq_vec_free(struct aq_vec_s *self) 270 { 271 struct aq_ring_s *ring = NULL; 272 unsigned int i = 0U; 273 274 if (!self) 275 goto err_exit; 276 277 for (i = 0U, ring = self->ring[0]; 278 self->tx_rings > i; ++i, ring = self->ring[i]) { 279 aq_ring_free(&ring[AQ_VEC_TX_ID]); 280 aq_ring_free(&ring[AQ_VEC_RX_ID]); 281 } 282 283 netif_napi_del(&self->napi); 284 285 kfree(self); 286 287 err_exit:; 288 } 289 290 irqreturn_t aq_vec_isr(int irq, void *private) 291 { 292 struct aq_vec_s *self = private; 293 int err = 0; 294 295 if (!self) { 296 err = -EINVAL; 297 goto err_exit; 298 } 299 napi_schedule(&self->napi); 300 301 err_exit: 302 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 303 } 304 305 irqreturn_t aq_vec_isr_legacy(int irq, void *private) 306 { 307 struct aq_vec_s *self = private; 308 u64 irq_mask = 0U; 309 int err; 310 311 if (!self) 312 return IRQ_NONE; 313 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); 314 if (err < 0) 315 return IRQ_NONE; 316 317 if (irq_mask) { 318 self->aq_hw_ops->hw_irq_disable(self->aq_hw, 319 1U << self->aq_ring_param.vec_idx); 320 napi_schedule(&self->napi); 321 } else { 322 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); 323 return IRQ_NONE; 324 } 325 326 return IRQ_HANDLED; 327 } 328 329 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) 330 { 331 return &self->aq_ring_param.affinity_mask; 332 } 333 334 void aq_vec_add_stats(struct aq_vec_s *self, 335 struct aq_ring_stats_rx_s *stats_rx, 336 struct aq_ring_stats_tx_s *stats_tx) 337 { 338 struct aq_ring_s *ring = NULL; 339 unsigned int r = 0U; 340 341 for (r = 0U, ring = self->ring[0]; 342 self->tx_rings > r; ++r, ring = self->ring[r]) { 343 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; 344 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; 345 346 stats_rx->packets += rx->packets; 347 stats_rx->bytes += rx->bytes; 348 stats_rx->errors += rx->errors; 349 stats_rx->jumbo_packets += rx->jumbo_packets; 350 stats_rx->lro_packets += rx->lro_packets; 351 stats_rx->pg_losts += rx->pg_losts; 352 stats_rx->pg_flips += rx->pg_flips; 353 stats_rx->pg_reuses += rx->pg_reuses; 354 355 stats_tx->packets += tx->packets; 356 stats_tx->bytes += tx->bytes; 357 stats_tx->errors += tx->errors; 358 stats_tx->queue_restarts += tx->queue_restarts; 359 } 360 } 361 362 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) 363 { 364 unsigned int count = 0U; 365 struct aq_ring_stats_rx_s stats_rx; 366 struct aq_ring_stats_tx_s stats_tx; 367 368 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 369 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 370 aq_vec_add_stats(self, &stats_rx, &stats_tx); 371 372 /* This data should mimic aq_ethtool_queue_stat_names structure 373 */ 374 data[count] += stats_rx.packets; 375 data[++count] += stats_tx.packets; 376 data[++count] += stats_tx.queue_restarts; 377 data[++count] += stats_rx.jumbo_packets; 378 data[++count] += stats_rx.lro_packets; 379 data[++count] += stats_rx.errors; 380 381 if (p_count) 382 *p_count = ++count; 383 384 return 0; 385 } 386