1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5  */
6 
7 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
8 
9 #include "aq_ring.h"
10 #include "aq_nic.h"
11 #include "aq_hw.h"
12 #include "aq_hw_utils.h"
13 
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 
17 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
18 {
19 	unsigned int len = PAGE_SIZE << rxpage->order;
20 
21 	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
22 
23 	/* Drop the ref for being in the ring. */
24 	__free_pages(rxpage->page, rxpage->order);
25 	rxpage->page = NULL;
26 }
27 
28 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
29 			 struct device *dev)
30 {
31 	struct page *page;
32 	dma_addr_t daddr;
33 	int ret = -ENOMEM;
34 
35 	page = dev_alloc_pages(order);
36 	if (unlikely(!page))
37 		goto err_exit;
38 
39 	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
40 			     DMA_FROM_DEVICE);
41 
42 	if (unlikely(dma_mapping_error(dev, daddr)))
43 		goto free_page;
44 
45 	rxpage->page = page;
46 	rxpage->daddr = daddr;
47 	rxpage->order = order;
48 	rxpage->pg_off = 0;
49 
50 	return 0;
51 
52 free_page:
53 	__free_pages(page, order);
54 
55 err_exit:
56 	return ret;
57 }
58 
59 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
60 			  int order)
61 {
62 	int ret;
63 
64 	if (rxbuf->rxdata.page) {
65 		/* One means ring is the only user and can reuse */
66 		if (page_ref_count(rxbuf->rxdata.page) > 1) {
67 			/* Try reuse buffer */
68 			rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
69 			if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
70 				(PAGE_SIZE << order)) {
71 				self->stats.rx.pg_flips++;
72 			} else {
73 				/* Buffer exhausted. We have other users and
74 				 * should release this page and realloc
75 				 */
76 				aq_free_rxpage(&rxbuf->rxdata,
77 					       aq_nic_get_dev(self->aq_nic));
78 				self->stats.rx.pg_losts++;
79 			}
80 		} else {
81 			rxbuf->rxdata.pg_off = 0;
82 			self->stats.rx.pg_reuses++;
83 		}
84 	}
85 
86 	if (!rxbuf->rxdata.page) {
87 		ret = aq_get_rxpage(&rxbuf->rxdata, order,
88 				    aq_nic_get_dev(self->aq_nic));
89 		return ret;
90 	}
91 
92 	return 0;
93 }
94 
95 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
96 				       struct aq_nic_s *aq_nic)
97 {
98 	int err = 0;
99 
100 	self->buff_ring =
101 		kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
102 
103 	if (!self->buff_ring) {
104 		err = -ENOMEM;
105 		goto err_exit;
106 	}
107 	self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
108 					   self->size * self->dx_size,
109 					   &self->dx_ring_pa, GFP_KERNEL);
110 	if (!self->dx_ring) {
111 		err = -ENOMEM;
112 		goto err_exit;
113 	}
114 
115 err_exit:
116 	if (err < 0) {
117 		aq_ring_free(self);
118 		self = NULL;
119 	}
120 	return self;
121 }
122 
123 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
124 				   struct aq_nic_s *aq_nic,
125 				   unsigned int idx,
126 				   struct aq_nic_cfg_s *aq_nic_cfg)
127 {
128 	int err = 0;
129 
130 	self->aq_nic = aq_nic;
131 	self->idx = idx;
132 	self->size = aq_nic_cfg->txds;
133 	self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
134 
135 	self = aq_ring_alloc(self, aq_nic);
136 	if (!self) {
137 		err = -ENOMEM;
138 		goto err_exit;
139 	}
140 
141 err_exit:
142 	if (err < 0) {
143 		aq_ring_free(self);
144 		self = NULL;
145 	}
146 	return self;
147 }
148 
149 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
150 				   struct aq_nic_s *aq_nic,
151 				   unsigned int idx,
152 				   struct aq_nic_cfg_s *aq_nic_cfg)
153 {
154 	int err = 0;
155 
156 	self->aq_nic = aq_nic;
157 	self->idx = idx;
158 	self->size = aq_nic_cfg->rxds;
159 	self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
160 	self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
161 			       (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
162 
163 	if (aq_nic_cfg->rxpageorder > self->page_order)
164 		self->page_order = aq_nic_cfg->rxpageorder;
165 
166 	self = aq_ring_alloc(self, aq_nic);
167 	if (!self) {
168 		err = -ENOMEM;
169 		goto err_exit;
170 	}
171 
172 err_exit:
173 	if (err < 0) {
174 		aq_ring_free(self);
175 		self = NULL;
176 	}
177 	return self;
178 }
179 
180 int aq_ring_init(struct aq_ring_s *self)
181 {
182 	self->hw_head = 0;
183 	self->sw_head = 0;
184 	self->sw_tail = 0;
185 	return 0;
186 }
187 
188 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
189 				       unsigned int t)
190 {
191 	return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
192 }
193 
194 void aq_ring_update_queue_state(struct aq_ring_s *ring)
195 {
196 	if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
197 		aq_ring_queue_stop(ring);
198 	else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
199 		aq_ring_queue_wake(ring);
200 }
201 
202 void aq_ring_queue_wake(struct aq_ring_s *ring)
203 {
204 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
205 
206 	if (__netif_subqueue_stopped(ndev, ring->idx)) {
207 		netif_wake_subqueue(ndev, ring->idx);
208 		ring->stats.tx.queue_restarts++;
209 	}
210 }
211 
212 void aq_ring_queue_stop(struct aq_ring_s *ring)
213 {
214 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
215 
216 	if (!__netif_subqueue_stopped(ndev, ring->idx))
217 		netif_stop_subqueue(ndev, ring->idx);
218 }
219 
220 bool aq_ring_tx_clean(struct aq_ring_s *self)
221 {
222 	struct device *dev = aq_nic_get_dev(self->aq_nic);
223 	unsigned int budget;
224 
225 	for (budget = AQ_CFG_TX_CLEAN_BUDGET;
226 	     budget && self->sw_head != self->hw_head; budget--) {
227 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
228 
229 		if (likely(buff->is_mapped)) {
230 			if (unlikely(buff->is_sop)) {
231 				if (!buff->is_eop &&
232 				    buff->eop_index != 0xffffU &&
233 				    (!aq_ring_dx_in_range(self->sw_head,
234 						buff->eop_index,
235 						self->hw_head)))
236 					break;
237 
238 				dma_unmap_single(dev, buff->pa, buff->len,
239 						 DMA_TO_DEVICE);
240 			} else {
241 				dma_unmap_page(dev, buff->pa, buff->len,
242 					       DMA_TO_DEVICE);
243 			}
244 		}
245 
246 		if (unlikely(buff->is_eop))
247 			dev_kfree_skb_any(buff->skb);
248 
249 		buff->pa = 0U;
250 		buff->eop_index = 0xffffU;
251 		self->sw_head = aq_ring_next_dx(self, self->sw_head);
252 	}
253 
254 	return !!budget;
255 }
256 
257 static void aq_rx_checksum(struct aq_ring_s *self,
258 			   struct aq_ring_buff_s *buff,
259 			   struct sk_buff *skb)
260 {
261 	if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
262 		return;
263 
264 	if (unlikely(buff->is_cso_err)) {
265 		++self->stats.rx.errors;
266 		skb->ip_summed = CHECKSUM_NONE;
267 		return;
268 	}
269 	if (buff->is_ip_cso) {
270 		__skb_incr_checksum_unnecessary(skb);
271 	} else {
272 		skb->ip_summed = CHECKSUM_NONE;
273 	}
274 
275 	if (buff->is_udp_cso || buff->is_tcp_cso)
276 		__skb_incr_checksum_unnecessary(skb);
277 }
278 
279 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
280 int aq_ring_rx_clean(struct aq_ring_s *self,
281 		     struct napi_struct *napi,
282 		     int *work_done,
283 		     int budget)
284 {
285 	struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
286 	bool is_rsc_completed = true;
287 	int err = 0;
288 
289 	for (; (self->sw_head != self->hw_head) && budget;
290 		self->sw_head = aq_ring_next_dx(self, self->sw_head),
291 		--budget, ++(*work_done)) {
292 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
293 		struct aq_ring_buff_s *buff_ = NULL;
294 		struct sk_buff *skb = NULL;
295 		unsigned int next_ = 0U;
296 		unsigned int i = 0U;
297 		u16 hdr_len;
298 
299 		if (buff->is_cleaned)
300 			continue;
301 
302 		if (!buff->is_eop) {
303 			buff_ = buff;
304 			do {
305 				next_ = buff_->next,
306 				buff_ = &self->buff_ring[next_];
307 				is_rsc_completed =
308 					aq_ring_dx_in_range(self->sw_head,
309 							    next_,
310 							    self->hw_head);
311 
312 				if (unlikely(!is_rsc_completed))
313 					break;
314 
315 				buff->is_error |= buff_->is_error;
316 
317 			} while (!buff_->is_eop);
318 
319 			if (!is_rsc_completed) {
320 				err = 0;
321 				goto err_exit;
322 			}
323 			if (buff->is_error) {
324 				buff_ = buff;
325 				do {
326 					next_ = buff_->next,
327 					buff_ = &self->buff_ring[next_];
328 
329 					buff_->is_cleaned = true;
330 				} while (!buff_->is_eop);
331 
332 				++self->stats.rx.errors;
333 				continue;
334 			}
335 		}
336 
337 		if (buff->is_error) {
338 			++self->stats.rx.errors;
339 			continue;
340 		}
341 
342 		dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
343 					      buff->rxdata.daddr,
344 					      buff->rxdata.pg_off,
345 					      buff->len, DMA_FROM_DEVICE);
346 
347 		/* for single fragment packets use build_skb() */
348 		if (buff->is_eop &&
349 		    buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
350 			skb = build_skb(aq_buf_vaddr(&buff->rxdata),
351 					AQ_CFG_RX_FRAME_MAX);
352 			if (unlikely(!skb)) {
353 				err = -ENOMEM;
354 				goto err_exit;
355 			}
356 			skb_put(skb, buff->len);
357 			page_ref_inc(buff->rxdata.page);
358 		} else {
359 			skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
360 			if (unlikely(!skb)) {
361 				err = -ENOMEM;
362 				goto err_exit;
363 			}
364 
365 			hdr_len = buff->len;
366 			if (hdr_len > AQ_CFG_RX_HDR_SIZE)
367 				hdr_len = eth_get_headlen(skb->dev,
368 							  aq_buf_vaddr(&buff->rxdata),
369 							  AQ_CFG_RX_HDR_SIZE);
370 
371 			memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
372 			       ALIGN(hdr_len, sizeof(long)));
373 
374 			if (buff->len - hdr_len > 0) {
375 				skb_add_rx_frag(skb, 0, buff->rxdata.page,
376 						buff->rxdata.pg_off + hdr_len,
377 						buff->len - hdr_len,
378 						AQ_CFG_RX_FRAME_MAX);
379 				page_ref_inc(buff->rxdata.page);
380 			}
381 
382 			if (!buff->is_eop) {
383 				buff_ = buff;
384 				i = 1U;
385 				do {
386 					next_ = buff_->next,
387 					buff_ = &self->buff_ring[next_];
388 
389 					dma_sync_single_range_for_cpu(
390 							aq_nic_get_dev(self->aq_nic),
391 							buff_->rxdata.daddr,
392 							buff_->rxdata.pg_off,
393 							buff_->len,
394 							DMA_FROM_DEVICE);
395 					skb_add_rx_frag(skb, i++,
396 							buff_->rxdata.page,
397 							buff_->rxdata.pg_off,
398 							buff_->len,
399 							AQ_CFG_RX_FRAME_MAX);
400 					page_ref_inc(buff_->rxdata.page);
401 					buff_->is_cleaned = 1;
402 
403 					buff->is_ip_cso &= buff_->is_ip_cso;
404 					buff->is_udp_cso &= buff_->is_udp_cso;
405 					buff->is_tcp_cso &= buff_->is_tcp_cso;
406 					buff->is_cso_err |= buff_->is_cso_err;
407 
408 				} while (!buff_->is_eop);
409 			}
410 		}
411 
412 		if (buff->is_vlan)
413 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
414 					       buff->vlan_rx_tag);
415 
416 		skb->protocol = eth_type_trans(skb, ndev);
417 
418 		aq_rx_checksum(self, buff, skb);
419 
420 		skb_set_hash(skb, buff->rss_hash,
421 			     buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
422 			     PKT_HASH_TYPE_NONE);
423 
424 		skb_record_rx_queue(skb, self->idx);
425 
426 		++self->stats.rx.packets;
427 		self->stats.rx.bytes += skb->len;
428 
429 		napi_gro_receive(napi, skb);
430 	}
431 
432 err_exit:
433 	return err;
434 }
435 
436 int aq_ring_rx_fill(struct aq_ring_s *self)
437 {
438 	unsigned int page_order = self->page_order;
439 	struct aq_ring_buff_s *buff = NULL;
440 	int err = 0;
441 	int i = 0;
442 
443 	if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
444 					   self->size / 2))
445 		return err;
446 
447 	for (i = aq_ring_avail_dx(self); i--;
448 		self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
449 		buff = &self->buff_ring[self->sw_tail];
450 
451 		buff->flags = 0U;
452 		buff->len = AQ_CFG_RX_FRAME_MAX;
453 
454 		err = aq_get_rxpages(self, buff, page_order);
455 		if (err)
456 			goto err_exit;
457 
458 		buff->pa = aq_buf_daddr(&buff->rxdata);
459 		buff = NULL;
460 	}
461 
462 err_exit:
463 	return err;
464 }
465 
466 void aq_ring_rx_deinit(struct aq_ring_s *self)
467 {
468 	if (!self)
469 		goto err_exit;
470 
471 	for (; self->sw_head != self->sw_tail;
472 		self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
473 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
474 
475 		aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
476 	}
477 
478 err_exit:;
479 }
480 
481 void aq_ring_free(struct aq_ring_s *self)
482 {
483 	if (!self)
484 		goto err_exit;
485 
486 	kfree(self->buff_ring);
487 
488 	if (self->dx_ring)
489 		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
490 				  self->size * self->dx_size, self->dx_ring,
491 				  self->dx_ring_pa);
492 
493 err_exit:;
494 }
495