1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. 11 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic. 12 */ 13 14 #include "aq_vec.h" 15 #include "aq_nic.h" 16 #include "aq_ring.h" 17 #include "aq_hw.h" 18 19 #include <linux/netdevice.h> 20 21 struct aq_vec_s { 22 struct aq_obj_s header; 23 struct aq_hw_ops *aq_hw_ops; 24 struct aq_hw_s *aq_hw; 25 struct aq_nic_s *aq_nic; 26 unsigned int tx_rings; 27 unsigned int rx_rings; 28 struct aq_ring_param_s aq_ring_param; 29 struct napi_struct napi; 30 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2]; 31 }; 32 33 #define AQ_VEC_TX_ID 0 34 #define AQ_VEC_RX_ID 1 35 36 static int aq_vec_poll(struct napi_struct *napi, int budget) 37 { 38 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 39 struct aq_ring_s *ring = NULL; 40 int work_done = 0; 41 int err = 0; 42 unsigned int i = 0U; 43 unsigned int sw_tail_old = 0U; 44 bool was_tx_cleaned = false; 45 46 if (!self) { 47 err = -EINVAL; 48 } else { 49 for (i = 0U, ring = self->ring[0]; 50 self->tx_rings > i; ++i, ring = self->ring[i]) { 51 if (self->aq_hw_ops->hw_ring_tx_head_update) { 52 err = self->aq_hw_ops->hw_ring_tx_head_update( 53 self->aq_hw, 54 &ring[AQ_VEC_TX_ID]); 55 if (err < 0) 56 goto err_exit; 57 } 58 59 if (ring[AQ_VEC_TX_ID].sw_head != 60 ring[AQ_VEC_TX_ID].hw_head) { 61 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 62 63 if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > 64 AQ_CFG_SKB_FRAGS_MAX) { 65 aq_nic_ndev_queue_start(self->aq_nic, 66 ring[AQ_VEC_TX_ID].idx); 67 } 68 was_tx_cleaned = true; 69 } 70 71 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 72 &ring[AQ_VEC_RX_ID]); 73 if (err < 0) 74 goto err_exit; 75 76 if (ring[AQ_VEC_RX_ID].sw_head != 77 ring[AQ_VEC_RX_ID].hw_head) { 78 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], 79 napi, 80 &work_done, 81 budget - work_done); 82 if (err < 0) 83 goto err_exit; 84 85 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail; 86 87 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 88 if (err < 0) 89 goto err_exit; 90 91 err = self->aq_hw_ops->hw_ring_rx_fill( 92 self->aq_hw, 93 &ring[AQ_VEC_RX_ID], sw_tail_old); 94 if (err < 0) 95 goto err_exit; 96 } 97 } 98 99 if (was_tx_cleaned) 100 work_done = budget; 101 102 if (work_done < budget) { 103 napi_complete_done(napi, work_done); 104 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 105 1U << self->aq_ring_param.vec_idx); 106 } 107 } 108 err_exit: 109 return work_done; 110 } 111 112 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, 113 struct aq_nic_cfg_s *aq_nic_cfg) 114 { 115 struct aq_vec_s *self = NULL; 116 struct aq_ring_s *ring = NULL; 117 unsigned int i = 0U; 118 int err = 0; 119 120 self = kzalloc(sizeof(*self), GFP_KERNEL); 121 if (!self) { 122 err = -ENOMEM; 123 goto err_exit; 124 } 125 126 self->aq_nic = aq_nic; 127 self->aq_ring_param.vec_idx = idx; 128 self->aq_ring_param.cpu = 129 idx + aq_nic_cfg->aq_rss.base_cpu_number; 130 131 cpumask_set_cpu(self->aq_ring_param.cpu, 132 &self->aq_ring_param.affinity_mask); 133 134 self->tx_rings = 0; 135 self->rx_rings = 0; 136 137 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, 138 aq_vec_poll, AQ_CFG_NAPI_WEIGHT); 139 140 for (i = 0; i < aq_nic_cfg->tcs; ++i) { 141 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic, 142 self->tx_rings, 143 self->aq_ring_param.vec_idx); 144 145 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic, 146 idx_ring, aq_nic_cfg); 147 if (!ring) { 148 err = -ENOMEM; 149 goto err_exit; 150 } 151 152 ++self->tx_rings; 153 154 aq_nic_set_tx_ring(aq_nic, idx_ring, ring); 155 156 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, 157 idx_ring, aq_nic_cfg); 158 if (!ring) { 159 err = -ENOMEM; 160 goto err_exit; 161 } 162 163 ++self->rx_rings; 164 } 165 166 err_exit: 167 if (err < 0) { 168 aq_vec_free(self); 169 self = NULL; 170 } 171 return self; 172 } 173 174 int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, 175 struct aq_hw_s *aq_hw) 176 { 177 struct aq_ring_s *ring = NULL; 178 unsigned int i = 0U; 179 int err = 0; 180 181 self->aq_hw_ops = aq_hw_ops; 182 self->aq_hw = aq_hw; 183 184 for (i = 0U, ring = self->ring[0]; 185 self->tx_rings > i; ++i, ring = self->ring[i]) { 186 err = aq_ring_init(&ring[AQ_VEC_TX_ID]); 187 if (err < 0) 188 goto err_exit; 189 190 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw, 191 &ring[AQ_VEC_TX_ID], 192 &self->aq_ring_param); 193 if (err < 0) 194 goto err_exit; 195 196 err = aq_ring_init(&ring[AQ_VEC_RX_ID]); 197 if (err < 0) 198 goto err_exit; 199 200 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw, 201 &ring[AQ_VEC_RX_ID], 202 &self->aq_ring_param); 203 if (err < 0) 204 goto err_exit; 205 206 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]); 207 if (err < 0) 208 goto err_exit; 209 210 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw, 211 &ring[AQ_VEC_RX_ID], 0U); 212 if (err < 0) 213 goto err_exit; 214 } 215 216 err_exit: 217 return err; 218 } 219 220 int aq_vec_start(struct aq_vec_s *self) 221 { 222 struct aq_ring_s *ring = NULL; 223 unsigned int i = 0U; 224 int err = 0; 225 226 for (i = 0U, ring = self->ring[0]; 227 self->tx_rings > i; ++i, ring = self->ring[i]) { 228 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, 229 &ring[AQ_VEC_TX_ID]); 230 if (err < 0) 231 goto err_exit; 232 233 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw, 234 &ring[AQ_VEC_RX_ID]); 235 if (err < 0) 236 goto err_exit; 237 } 238 239 napi_enable(&self->napi); 240 241 err_exit: 242 return err; 243 } 244 245 void aq_vec_stop(struct aq_vec_s *self) 246 { 247 struct aq_ring_s *ring = NULL; 248 unsigned int i = 0U; 249 250 for (i = 0U, ring = self->ring[0]; 251 self->tx_rings > i; ++i, ring = self->ring[i]) { 252 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, 253 &ring[AQ_VEC_TX_ID]); 254 255 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw, 256 &ring[AQ_VEC_RX_ID]); 257 } 258 259 napi_disable(&self->napi); 260 } 261 262 void aq_vec_deinit(struct aq_vec_s *self) 263 { 264 struct aq_ring_s *ring = NULL; 265 unsigned int i = 0U; 266 267 if (!self) 268 goto err_exit; 269 270 for (i = 0U, ring = self->ring[0]; 271 self->tx_rings > i; ++i, ring = self->ring[i]) { 272 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 273 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); 274 } 275 err_exit:; 276 } 277 278 void aq_vec_free(struct aq_vec_s *self) 279 { 280 struct aq_ring_s *ring = NULL; 281 unsigned int i = 0U; 282 283 if (!self) 284 goto err_exit; 285 286 for (i = 0U, ring = self->ring[0]; 287 self->tx_rings > i; ++i, ring = self->ring[i]) { 288 aq_ring_free(&ring[AQ_VEC_TX_ID]); 289 aq_ring_free(&ring[AQ_VEC_RX_ID]); 290 } 291 292 netif_napi_del(&self->napi); 293 294 kfree(self); 295 296 err_exit:; 297 } 298 299 irqreturn_t aq_vec_isr(int irq, void *private) 300 { 301 struct aq_vec_s *self = private; 302 int err = 0; 303 304 if (!self) { 305 err = -EINVAL; 306 goto err_exit; 307 } 308 napi_schedule(&self->napi); 309 310 err_exit: 311 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 312 } 313 314 irqreturn_t aq_vec_isr_legacy(int irq, void *private) 315 { 316 struct aq_vec_s *self = private; 317 u64 irq_mask = 0U; 318 irqreturn_t err = 0; 319 320 if (!self) { 321 err = -EINVAL; 322 goto err_exit; 323 } 324 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask); 325 if (err < 0) 326 goto err_exit; 327 328 if (irq_mask) { 329 self->aq_hw_ops->hw_irq_disable(self->aq_hw, 330 1U << self->aq_ring_param.vec_idx); 331 napi_schedule(&self->napi); 332 } else { 333 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U); 334 err = IRQ_NONE; 335 } 336 337 err_exit: 338 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 339 } 340 341 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) 342 { 343 return &self->aq_ring_param.affinity_mask; 344 } 345 346 void aq_vec_add_stats(struct aq_vec_s *self, 347 struct aq_ring_stats_rx_s *stats_rx, 348 struct aq_ring_stats_tx_s *stats_tx) 349 { 350 struct aq_ring_s *ring = NULL; 351 unsigned int r = 0U; 352 353 for (r = 0U, ring = self->ring[0]; 354 self->tx_rings > r; ++r, ring = self->ring[r]) { 355 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; 356 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; 357 358 stats_rx->packets += rx->packets; 359 stats_rx->bytes += rx->bytes; 360 stats_rx->errors += rx->errors; 361 stats_rx->jumbo_packets += rx->jumbo_packets; 362 stats_rx->lro_packets += rx->lro_packets; 363 364 stats_tx->packets += tx->packets; 365 stats_tx->bytes += tx->bytes; 366 stats_tx->errors += tx->errors; 367 } 368 } 369 370 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) 371 { 372 unsigned int count = 0U; 373 struct aq_ring_stats_rx_s stats_rx; 374 struct aq_ring_stats_tx_s stats_tx; 375 376 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 377 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 378 aq_vec_add_stats(self, &stats_rx, &stats_tx); 379 380 data[count] += stats_rx.packets; 381 data[++count] += stats_tx.packets; 382 data[++count] += stats_rx.jumbo_packets; 383 data[++count] += stats_rx.lro_packets; 384 data[++count] += stats_rx.errors; 385 386 if (p_count) 387 *p_count = ++count; 388 389 return 0; 390 } 391