1 /*******************************************************************************
2   This contains the functions to handle the normal descriptors.
3 
4   Copyright (C) 2007-2009  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23 *******************************************************************************/
24 
25 #include <linux/stmmac.h>
26 #include "common.h"
27 #include "descs_com.h"
28 
29 static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
30 			       struct dma_desc *p, void __iomem *ioaddr)
31 {
32 	struct net_device_stats *stats = (struct net_device_stats *)data;
33 	unsigned int tdes0 = le32_to_cpu(p->des0);
34 	unsigned int tdes1 = le32_to_cpu(p->des1);
35 	int ret = tx_done;
36 
37 	/* Get tx owner first */
38 	if (unlikely(tdes0 & TDES0_OWN))
39 		return tx_dma_own;
40 
41 	/* Verify tx error by looking at the last segment. */
42 	if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
43 		return tx_not_ls;
44 
45 	if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
46 		if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
47 			x->tx_underflow++;
48 			stats->tx_fifo_errors++;
49 		}
50 		if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
51 			x->tx_carrier++;
52 			stats->tx_carrier_errors++;
53 		}
54 		if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
55 			x->tx_losscarrier++;
56 			stats->tx_carrier_errors++;
57 		}
58 		if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
59 			     (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
60 			     (tdes0 & TDES0_LATE_COLLISION))) {
61 			unsigned int collisions;
62 
63 			collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
64 			stats->collisions += collisions;
65 		}
66 		ret = tx_err;
67 	}
68 
69 	if (tdes0 & TDES0_VLAN_FRAME)
70 		x->tx_vlan++;
71 
72 	if (unlikely(tdes0 & TDES0_DEFERRED))
73 		x->tx_deferred++;
74 
75 	return ret;
76 }
77 
78 static int ndesc_get_tx_len(struct dma_desc *p)
79 {
80 	return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
81 }
82 
83 /* This function verifies if each incoming frame has some errors
84  * and, if required, updates the multicast statistics.
85  * In case of success, it returns good_frame because the GMAC device
86  * is supposed to be able to compute the csum in HW. */
87 static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
88 			       struct dma_desc *p)
89 {
90 	int ret = good_frame;
91 	unsigned int rdes0 = le32_to_cpu(p->des0);
92 	struct net_device_stats *stats = (struct net_device_stats *)data;
93 
94 	if (unlikely(rdes0 & RDES0_OWN))
95 		return dma_own;
96 
97 	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
98 		pr_warn("%s: Oversized frame spanned multiple buffers\n",
99 			__func__);
100 		stats->rx_length_errors++;
101 		return discard_frame;
102 	}
103 
104 	if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
105 		if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
106 			x->rx_desc++;
107 		if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
108 			x->sa_filter_fail++;
109 		if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
110 			x->overflow_error++;
111 		if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
112 			x->ipc_csum_error++;
113 		if (unlikely(rdes0 & RDES0_COLLISION)) {
114 			x->rx_collision++;
115 			stats->collisions++;
116 		}
117 		if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
118 			x->rx_crc++;
119 			stats->rx_crc_errors++;
120 		}
121 		ret = discard_frame;
122 	}
123 	if (unlikely(rdes0 & RDES0_DRIBBLING))
124 		x->dribbling_bit++;
125 
126 	if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
127 		x->rx_length++;
128 		ret = discard_frame;
129 	}
130 	if (unlikely(rdes0 & RDES0_MII_ERROR)) {
131 		x->rx_mii++;
132 		ret = discard_frame;
133 	}
134 #ifdef STMMAC_VLAN_TAG_USED
135 	if (rdes0 & RDES0_VLAN_TAG)
136 		x->vlan_tag++;
137 #endif
138 	return ret;
139 }
140 
141 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
142 			       int end)
143 {
144 	p->des0 |= cpu_to_le32(RDES0_OWN);
145 	p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
146 
147 	if (mode == STMMAC_CHAIN_MODE)
148 		ndesc_rx_set_on_chain(p, end);
149 	else
150 		ndesc_rx_set_on_ring(p, end);
151 
152 	if (disable_rx_ic)
153 		p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
154 }
155 
156 static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
157 {
158 	p->des0 &= cpu_to_le32(~TDES0_OWN);
159 	if (mode == STMMAC_CHAIN_MODE)
160 		ndesc_tx_set_on_chain(p);
161 	else
162 		ndesc_end_tx_desc_on_ring(p, end);
163 }
164 
165 static int ndesc_get_tx_owner(struct dma_desc *p)
166 {
167 	return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
168 }
169 
170 static void ndesc_set_tx_owner(struct dma_desc *p)
171 {
172 	p->des0 |= cpu_to_le32(TDES0_OWN);
173 }
174 
175 static void ndesc_set_rx_owner(struct dma_desc *p)
176 {
177 	p->des0 |= cpu_to_le32(RDES0_OWN);
178 }
179 
180 static int ndesc_get_tx_ls(struct dma_desc *p)
181 {
182 	return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
183 }
184 
185 static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
186 {
187 	int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
188 
189 	memset(p, 0, offsetof(struct dma_desc, des2));
190 	if (mode == STMMAC_CHAIN_MODE)
191 		ndesc_tx_set_on_chain(p);
192 	else
193 		ndesc_end_tx_desc_on_ring(p, ter);
194 }
195 
196 static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
197 				  bool csum_flag, int mode, bool tx_own,
198 				  bool ls)
199 {
200 	unsigned int tdes1 = le32_to_cpu(p->des1);
201 
202 	if (is_fs)
203 		tdes1 |= TDES1_FIRST_SEGMENT;
204 	else
205 		tdes1 &= ~TDES1_FIRST_SEGMENT;
206 
207 	if (likely(csum_flag))
208 		tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
209 	else
210 		tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
211 
212 	if (ls)
213 		tdes1 |= TDES1_LAST_SEGMENT;
214 
215 	p->des1 = cpu_to_le32(tdes1);
216 
217 	if (mode == STMMAC_CHAIN_MODE)
218 		norm_set_tx_desc_len_on_chain(p, len);
219 	else
220 		norm_set_tx_desc_len_on_ring(p, len);
221 
222 	if (tx_own)
223 		p->des0 |= cpu_to_le32(TDES0_OWN);
224 }
225 
226 static void ndesc_set_tx_ic(struct dma_desc *p)
227 {
228 	p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
229 }
230 
231 static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
232 {
233 	unsigned int csum = 0;
234 
235 	/* The type-1 checksum offload engines append the checksum at
236 	 * the end of frame and the two bytes of checksum are added in
237 	 * the length.
238 	 * Adjust for that in the framelen for type-1 checksum offload
239 	 * engines
240 	 */
241 	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
242 		csum = 2;
243 
244 	return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
245 				>> RDES0_FRAME_LEN_SHIFT) -
246 		csum);
247 
248 }
249 
250 static void ndesc_enable_tx_timestamp(struct dma_desc *p)
251 {
252 	p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
253 }
254 
255 static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
256 {
257 	return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
258 }
259 
260 static u64 ndesc_get_timestamp(void *desc, u32 ats)
261 {
262 	struct dma_desc *p = (struct dma_desc *)desc;
263 	u64 ns;
264 
265 	ns = le32_to_cpu(p->des2);
266 	/* convert high/sec time stamp value to nanosecond */
267 	ns += le32_to_cpu(p->des3) * 1000000000ULL;
268 
269 	return ns;
270 }
271 
272 static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
273 {
274 	struct dma_desc *p = (struct dma_desc *)desc;
275 
276 	if ((le32_to_cpu(p->des2) == 0xffffffff) &&
277 	    (le32_to_cpu(p->des3) == 0xffffffff))
278 		/* timestamp is corrupted, hence don't store it */
279 		return 0;
280 	else
281 		return 1;
282 }
283 
284 static void ndesc_display_ring(void *head, unsigned int size, bool rx)
285 {
286 	struct dma_desc *p = (struct dma_desc *)head;
287 	int i;
288 
289 	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
290 
291 	for (i = 0; i < size; i++) {
292 		u64 x;
293 
294 		x = *(u64 *)p;
295 		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
296 			i, (unsigned int)virt_to_phys(p),
297 			(unsigned int)x, (unsigned int)(x >> 32),
298 			p->des2, p->des3);
299 		p++;
300 	}
301 	pr_info("\n");
302 }
303 
304 const struct stmmac_desc_ops ndesc_ops = {
305 	.tx_status = ndesc_get_tx_status,
306 	.rx_status = ndesc_get_rx_status,
307 	.get_tx_len = ndesc_get_tx_len,
308 	.init_rx_desc = ndesc_init_rx_desc,
309 	.init_tx_desc = ndesc_init_tx_desc,
310 	.get_tx_owner = ndesc_get_tx_owner,
311 	.release_tx_desc = ndesc_release_tx_desc,
312 	.prepare_tx_desc = ndesc_prepare_tx_desc,
313 	.set_tx_ic = ndesc_set_tx_ic,
314 	.get_tx_ls = ndesc_get_tx_ls,
315 	.set_tx_owner = ndesc_set_tx_owner,
316 	.set_rx_owner = ndesc_set_rx_owner,
317 	.get_rx_frame_len = ndesc_get_rx_frame_len,
318 	.enable_tx_timestamp = ndesc_enable_tx_timestamp,
319 	.get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
320 	.get_timestamp = ndesc_get_timestamp,
321 	.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
322 	.display_ring = ndesc_display_ring,
323 };
324