xref: /openbmc/linux/drivers/net/dsa/qca/qca8k-8xxx.c (revision faffb083)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <john@phrozen.org>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
14 #include <net/dsa.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/mdio.h>
19 #include <linux/phylink.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/etherdevice.h>
22 #include <linux/dsa/tag_qca.h>
23 
24 #include "qca8k.h"
25 
26 static void
27 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
28 {
29 	regaddr >>= 1;
30 	*r1 = regaddr & 0x1e;
31 
32 	regaddr >>= 5;
33 	*r2 = regaddr & 0x7;
34 
35 	regaddr >>= 3;
36 	*page = regaddr & 0x3ff;
37 }
38 
39 static int
40 qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
41 {
42 	int ret;
43 	u16 lo;
44 
45 	lo = val & 0xffff;
46 	ret = bus->write(bus, phy_id, regnum, lo);
47 	if (ret < 0)
48 		dev_err_ratelimited(&bus->dev,
49 				    "failed to write qca8k 32bit lo register\n");
50 
51 	return ret;
52 }
53 
54 static int
55 qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
56 {
57 	int ret;
58 	u16 hi;
59 
60 	hi = (u16)(val >> 16);
61 	ret = bus->write(bus, phy_id, regnum, hi);
62 	if (ret < 0)
63 		dev_err_ratelimited(&bus->dev,
64 				    "failed to write qca8k 32bit hi register\n");
65 
66 	return ret;
67 }
68 
69 static int
70 qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
71 {
72 	int ret;
73 
74 	ret = bus->read(bus, phy_id, regnum);
75 	if (ret < 0)
76 		goto err;
77 
78 	*val = ret & 0xffff;
79 	return 0;
80 
81 err:
82 	dev_err_ratelimited(&bus->dev,
83 			    "failed to read qca8k 32bit lo register\n");
84 	*val = 0;
85 
86 	return ret;
87 }
88 
89 static int
90 qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
91 {
92 	int ret;
93 
94 	ret = bus->read(bus, phy_id, regnum);
95 	if (ret < 0)
96 		goto err;
97 
98 	*val = ret << 16;
99 	return 0;
100 
101 err:
102 	dev_err_ratelimited(&bus->dev,
103 			    "failed to read qca8k 32bit hi register\n");
104 	*val = 0;
105 
106 	return ret;
107 }
108 
109 static int
110 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
111 {
112 	u32 hi, lo;
113 	int ret;
114 
115 	*val = 0;
116 
117 	ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
118 	if (ret < 0)
119 		goto err;
120 
121 	ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
122 	if (ret < 0)
123 		goto err;
124 
125 	*val = lo | hi;
126 
127 err:
128 	return ret;
129 }
130 
131 static void
132 qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
133 {
134 	if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
135 		return;
136 
137 	qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
138 }
139 
140 static int
141 qca8k_set_page(struct qca8k_priv *priv, u16 page)
142 {
143 	u16 *cached_page = &priv->mdio_cache.page;
144 	struct mii_bus *bus = priv->bus;
145 	int ret;
146 
147 	if (page == *cached_page)
148 		return 0;
149 
150 	ret = bus->write(bus, 0x18, 0, page);
151 	if (ret < 0) {
152 		dev_err_ratelimited(&bus->dev,
153 				    "failed to set qca8k page\n");
154 		return ret;
155 	}
156 
157 	*cached_page = page;
158 	usleep_range(1000, 2000);
159 	return 0;
160 }
161 
162 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
163 {
164 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
165 	struct qca8k_priv *priv = ds->priv;
166 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
167 	u32 command;
168 	u8 len, cmd;
169 	int i;
170 
171 	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
172 	mgmt_eth_data = &priv->mgmt_eth_data;
173 
174 	command = get_unaligned_le32(&mgmt_ethhdr->command);
175 	cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
176 
177 	len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
178 	/* Special case for len of 15 as this is the max value for len and needs to
179 	 * be increased before converting it from word to dword.
180 	 */
181 	if (len == 15)
182 		len++;
183 
184 	/* We can ignore odd value, we always round up them in the alloc function. */
185 	len *= sizeof(u16);
186 
187 	/* Make sure the seq match the requested packet */
188 	if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
189 		mgmt_eth_data->ack = true;
190 
191 	if (cmd == MDIO_READ) {
192 		u32 *val = mgmt_eth_data->data;
193 
194 		*val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
195 
196 		/* Get the rest of the 12 byte of data.
197 		 * The read/write function will extract the requested data.
198 		 */
199 		if (len > QCA_HDR_MGMT_DATA1_LEN) {
200 			__le32 *data2 = (__le32 *)skb->data;
201 			int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
202 					     len - QCA_HDR_MGMT_DATA1_LEN);
203 
204 			val++;
205 
206 			for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
207 				*val = get_unaligned_le32(data2);
208 				val++;
209 				data2++;
210 			}
211 		}
212 	}
213 
214 	complete(&mgmt_eth_data->rw_done);
215 }
216 
217 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
218 					       int priority, unsigned int len)
219 {
220 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
221 	unsigned int real_len;
222 	struct sk_buff *skb;
223 	__le32 *data2;
224 	u32 command;
225 	u16 hdr;
226 	int i;
227 
228 	skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
229 	if (!skb)
230 		return NULL;
231 
232 	/* Hdr mgmt length value is in step of word size.
233 	 * As an example to process 4 byte of data the correct length to set is 2.
234 	 * To process 8 byte 4, 12 byte 6, 16 byte 8...
235 	 *
236 	 * Odd values will always return the next size on the ack packet.
237 	 * (length of 3 (6 byte) will always return 8 bytes of data)
238 	 *
239 	 * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
240 	 * of data.
241 	 *
242 	 * To correctly calculate the length we devide the requested len by word and
243 	 * round up.
244 	 * On the ack function we can skip the odd check as we already handle the
245 	 * case here.
246 	 */
247 	real_len = DIV_ROUND_UP(len, sizeof(u16));
248 
249 	/* We check if the result len is odd and we round up another time to
250 	 * the next size. (length of 3 will be increased to 4 as switch will always
251 	 * return 8 bytes)
252 	 */
253 	if (real_len % sizeof(u16) != 0)
254 		real_len++;
255 
256 	/* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
257 	if (real_len == 16)
258 		real_len--;
259 
260 	skb_reset_mac_header(skb);
261 	skb_set_network_header(skb, skb->len);
262 
263 	mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
264 
265 	hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
266 	hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
267 	hdr |= QCA_HDR_XMIT_FROM_CPU;
268 	hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
269 	hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
270 
271 	command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
272 	command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
273 	command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
274 	command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
275 					   QCA_HDR_MGMT_CHECK_CODE_VAL);
276 
277 	put_unaligned_le32(command, &mgmt_ethhdr->command);
278 
279 	if (cmd == MDIO_WRITE)
280 		put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
281 
282 	mgmt_ethhdr->hdr = htons(hdr);
283 
284 	data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
285 	if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
286 		int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
287 				     len - QCA_HDR_MGMT_DATA1_LEN);
288 
289 		val++;
290 
291 		for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
292 			put_unaligned_le32(*val, data2);
293 			data2++;
294 			val++;
295 		}
296 	}
297 
298 	return skb;
299 }
300 
301 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
302 {
303 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
304 	u32 seq;
305 
306 	seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
307 	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
308 	put_unaligned_le32(seq, &mgmt_ethhdr->seq);
309 }
310 
311 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
312 {
313 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
314 	struct sk_buff *skb;
315 	bool ack;
316 	int ret;
317 
318 	skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
319 				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
320 	if (!skb)
321 		return -ENOMEM;
322 
323 	mutex_lock(&mgmt_eth_data->mutex);
324 
325 	/* Check mgmt_master if is operational */
326 	if (!priv->mgmt_master) {
327 		kfree_skb(skb);
328 		mutex_unlock(&mgmt_eth_data->mutex);
329 		return -EINVAL;
330 	}
331 
332 	skb->dev = priv->mgmt_master;
333 
334 	reinit_completion(&mgmt_eth_data->rw_done);
335 
336 	/* Increment seq_num and set it in the mdio pkt */
337 	mgmt_eth_data->seq++;
338 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
339 	mgmt_eth_data->ack = false;
340 
341 	dev_queue_xmit(skb);
342 
343 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
344 					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
345 
346 	*val = mgmt_eth_data->data[0];
347 	if (len > QCA_HDR_MGMT_DATA1_LEN)
348 		memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
349 
350 	ack = mgmt_eth_data->ack;
351 
352 	mutex_unlock(&mgmt_eth_data->mutex);
353 
354 	if (ret <= 0)
355 		return -ETIMEDOUT;
356 
357 	if (!ack)
358 		return -EINVAL;
359 
360 	return 0;
361 }
362 
363 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
364 {
365 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
366 	struct sk_buff *skb;
367 	bool ack;
368 	int ret;
369 
370 	skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
371 				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
372 	if (!skb)
373 		return -ENOMEM;
374 
375 	mutex_lock(&mgmt_eth_data->mutex);
376 
377 	/* Check mgmt_master if is operational */
378 	if (!priv->mgmt_master) {
379 		kfree_skb(skb);
380 		mutex_unlock(&mgmt_eth_data->mutex);
381 		return -EINVAL;
382 	}
383 
384 	skb->dev = priv->mgmt_master;
385 
386 	reinit_completion(&mgmt_eth_data->rw_done);
387 
388 	/* Increment seq_num and set it in the mdio pkt */
389 	mgmt_eth_data->seq++;
390 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
391 	mgmt_eth_data->ack = false;
392 
393 	dev_queue_xmit(skb);
394 
395 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
396 					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
397 
398 	ack = mgmt_eth_data->ack;
399 
400 	mutex_unlock(&mgmt_eth_data->mutex);
401 
402 	if (ret <= 0)
403 		return -ETIMEDOUT;
404 
405 	if (!ack)
406 		return -EINVAL;
407 
408 	return 0;
409 }
410 
411 static int
412 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
413 {
414 	u32 val = 0;
415 	int ret;
416 
417 	ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
418 	if (ret)
419 		return ret;
420 
421 	val &= ~mask;
422 	val |= write_val;
423 
424 	return qca8k_write_eth(priv, reg, &val, sizeof(val));
425 }
426 
427 static int
428 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
429 {
430 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
431 	struct mii_bus *bus = priv->bus;
432 	u16 r1, r2, page;
433 	int ret;
434 
435 	if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
436 		return 0;
437 
438 	qca8k_split_addr(reg, &r1, &r2, &page);
439 
440 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
441 
442 	ret = qca8k_set_page(priv, page);
443 	if (ret < 0)
444 		goto exit;
445 
446 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
447 
448 exit:
449 	mutex_unlock(&bus->mdio_lock);
450 	return ret;
451 }
452 
453 static int
454 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
455 {
456 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
457 	struct mii_bus *bus = priv->bus;
458 	u16 r1, r2, page;
459 	int ret;
460 
461 	if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
462 		return 0;
463 
464 	qca8k_split_addr(reg, &r1, &r2, &page);
465 
466 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
467 
468 	ret = qca8k_set_page(priv, page);
469 	if (ret < 0)
470 		goto exit;
471 
472 	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
473 
474 exit:
475 	mutex_unlock(&bus->mdio_lock);
476 	return ret;
477 }
478 
479 static int
480 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
481 {
482 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
483 	struct mii_bus *bus = priv->bus;
484 	u16 r1, r2, page;
485 	u32 val;
486 	int ret;
487 
488 	if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
489 		return 0;
490 
491 	qca8k_split_addr(reg, &r1, &r2, &page);
492 
493 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
494 
495 	ret = qca8k_set_page(priv, page);
496 	if (ret < 0)
497 		goto exit;
498 
499 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
500 	if (ret < 0)
501 		goto exit;
502 
503 	val &= ~mask;
504 	val |= write_val;
505 	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
506 
507 exit:
508 	mutex_unlock(&bus->mdio_lock);
509 
510 	return ret;
511 }
512 
513 static struct regmap_config qca8k_regmap_config = {
514 	.reg_bits = 16,
515 	.val_bits = 32,
516 	.reg_stride = 4,
517 	.max_register = 0x16ac, /* end MIB - Port6 range */
518 	.reg_read = qca8k_regmap_read,
519 	.reg_write = qca8k_regmap_write,
520 	.reg_update_bits = qca8k_regmap_update_bits,
521 	.rd_table = &qca8k_readable_table,
522 	.disable_locking = true, /* Locking is handled by qca8k read/write */
523 	.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
524 };
525 
526 static int
527 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
528 			struct sk_buff *read_skb, u32 *val)
529 {
530 	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
531 	bool ack;
532 	int ret;
533 
534 	reinit_completion(&mgmt_eth_data->rw_done);
535 
536 	/* Increment seq_num and set it in the copy pkt */
537 	mgmt_eth_data->seq++;
538 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
539 	mgmt_eth_data->ack = false;
540 
541 	dev_queue_xmit(skb);
542 
543 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
544 					  QCA8K_ETHERNET_TIMEOUT);
545 
546 	ack = mgmt_eth_data->ack;
547 
548 	if (ret <= 0)
549 		return -ETIMEDOUT;
550 
551 	if (!ack)
552 		return -EINVAL;
553 
554 	*val = mgmt_eth_data->data[0];
555 
556 	return 0;
557 }
558 
559 static int
560 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
561 		      int regnum, u16 data)
562 {
563 	struct sk_buff *write_skb, *clear_skb, *read_skb;
564 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
565 	u32 write_val, clear_val = 0, val;
566 	struct net_device *mgmt_master;
567 	int ret, ret1;
568 	bool ack;
569 
570 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
571 		return -EINVAL;
572 
573 	mgmt_eth_data = &priv->mgmt_eth_data;
574 
575 	write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
576 		    QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
577 		    QCA8K_MDIO_MASTER_REG_ADDR(regnum);
578 
579 	if (read) {
580 		write_val |= QCA8K_MDIO_MASTER_READ;
581 	} else {
582 		write_val |= QCA8K_MDIO_MASTER_WRITE;
583 		write_val |= QCA8K_MDIO_MASTER_DATA(data);
584 	}
585 
586 	/* Prealloc all the needed skb before the lock */
587 	write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
588 					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
589 	if (!write_skb)
590 		return -ENOMEM;
591 
592 	clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
593 					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
594 	if (!clear_skb) {
595 		ret = -ENOMEM;
596 		goto err_clear_skb;
597 	}
598 
599 	read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
600 					   QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
601 	if (!read_skb) {
602 		ret = -ENOMEM;
603 		goto err_read_skb;
604 	}
605 
606 	/* Actually start the request:
607 	 * 1. Send mdio master packet
608 	 * 2. Busy Wait for mdio master command
609 	 * 3. Get the data if we are reading
610 	 * 4. Reset the mdio master (even with error)
611 	 */
612 	mutex_lock(&mgmt_eth_data->mutex);
613 
614 	/* Check if mgmt_master is operational */
615 	mgmt_master = priv->mgmt_master;
616 	if (!mgmt_master) {
617 		mutex_unlock(&mgmt_eth_data->mutex);
618 		ret = -EINVAL;
619 		goto err_mgmt_master;
620 	}
621 
622 	read_skb->dev = mgmt_master;
623 	clear_skb->dev = mgmt_master;
624 	write_skb->dev = mgmt_master;
625 
626 	reinit_completion(&mgmt_eth_data->rw_done);
627 
628 	/* Increment seq_num and set it in the write pkt */
629 	mgmt_eth_data->seq++;
630 	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
631 	mgmt_eth_data->ack = false;
632 
633 	dev_queue_xmit(write_skb);
634 
635 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
636 					  QCA8K_ETHERNET_TIMEOUT);
637 
638 	ack = mgmt_eth_data->ack;
639 
640 	if (ret <= 0) {
641 		ret = -ETIMEDOUT;
642 		kfree_skb(read_skb);
643 		goto exit;
644 	}
645 
646 	if (!ack) {
647 		ret = -EINVAL;
648 		kfree_skb(read_skb);
649 		goto exit;
650 	}
651 
652 	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
653 				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
654 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
655 				mgmt_eth_data, read_skb, &val);
656 
657 	if (ret < 0 && ret1 < 0) {
658 		ret = ret1;
659 		goto exit;
660 	}
661 
662 	if (read) {
663 		reinit_completion(&mgmt_eth_data->rw_done);
664 
665 		/* Increment seq_num and set it in the read pkt */
666 		mgmt_eth_data->seq++;
667 		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
668 		mgmt_eth_data->ack = false;
669 
670 		dev_queue_xmit(read_skb);
671 
672 		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
673 						  QCA8K_ETHERNET_TIMEOUT);
674 
675 		ack = mgmt_eth_data->ack;
676 
677 		if (ret <= 0) {
678 			ret = -ETIMEDOUT;
679 			goto exit;
680 		}
681 
682 		if (!ack) {
683 			ret = -EINVAL;
684 			goto exit;
685 		}
686 
687 		ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
688 	} else {
689 		kfree_skb(read_skb);
690 	}
691 exit:
692 	reinit_completion(&mgmt_eth_data->rw_done);
693 
694 	/* Increment seq_num and set it in the clear pkt */
695 	mgmt_eth_data->seq++;
696 	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
697 	mgmt_eth_data->ack = false;
698 
699 	dev_queue_xmit(clear_skb);
700 
701 	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
702 				    QCA8K_ETHERNET_TIMEOUT);
703 
704 	mutex_unlock(&mgmt_eth_data->mutex);
705 
706 	return ret;
707 
708 	/* Error handling before lock */
709 err_mgmt_master:
710 	kfree_skb(read_skb);
711 err_read_skb:
712 	kfree_skb(clear_skb);
713 err_clear_skb:
714 	kfree_skb(write_skb);
715 
716 	return ret;
717 }
718 
719 static u32
720 qca8k_port_to_phy(int port)
721 {
722 	/* From Andrew Lunn:
723 	 * Port 0 has no internal phy.
724 	 * Port 1 has an internal PHY at MDIO address 0.
725 	 * Port 2 has an internal PHY at MDIO address 1.
726 	 * ...
727 	 * Port 5 has an internal PHY at MDIO address 4.
728 	 * Port 6 has no internal PHY.
729 	 */
730 
731 	return port - 1;
732 }
733 
734 static int
735 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
736 {
737 	u16 r1, r2, page;
738 	u32 val;
739 	int ret, ret1;
740 
741 	qca8k_split_addr(reg, &r1, &r2, &page);
742 
743 	ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
744 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
745 				bus, 0x10 | r2, r1 + 1, &val);
746 
747 	/* Check if qca8k_read has failed for a different reason
748 	 * before returnting -ETIMEDOUT
749 	 */
750 	if (ret < 0 && ret1 < 0)
751 		return ret1;
752 
753 	return ret;
754 }
755 
756 static int
757 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
758 {
759 	struct mii_bus *bus = priv->bus;
760 	u16 r1, r2, page;
761 	u32 val;
762 	int ret;
763 
764 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
765 		return -EINVAL;
766 
767 	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
768 	      QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
769 	      QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
770 	      QCA8K_MDIO_MASTER_DATA(data);
771 
772 	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
773 
774 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
775 
776 	ret = qca8k_set_page(priv, page);
777 	if (ret)
778 		goto exit;
779 
780 	qca8k_mii_write32(bus, 0x10 | r2, r1, val);
781 
782 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
783 				   QCA8K_MDIO_MASTER_BUSY);
784 
785 exit:
786 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
787 	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
788 
789 	mutex_unlock(&bus->mdio_lock);
790 
791 	return ret;
792 }
793 
794 static int
795 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
796 {
797 	struct mii_bus *bus = priv->bus;
798 	u16 r1, r2, page;
799 	u32 val;
800 	int ret;
801 
802 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
803 		return -EINVAL;
804 
805 	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
806 	      QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
807 	      QCA8K_MDIO_MASTER_REG_ADDR(regnum);
808 
809 	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
810 
811 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
812 
813 	ret = qca8k_set_page(priv, page);
814 	if (ret)
815 		goto exit;
816 
817 	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
818 
819 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
820 				   QCA8K_MDIO_MASTER_BUSY);
821 	if (ret)
822 		goto exit;
823 
824 	ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
825 
826 exit:
827 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
828 	qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
829 
830 	mutex_unlock(&bus->mdio_lock);
831 
832 	if (ret >= 0)
833 		ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
834 
835 	return ret;
836 }
837 
838 static int
839 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
840 {
841 	struct qca8k_priv *priv = slave_bus->priv;
842 	int ret;
843 
844 	/* Use mdio Ethernet when available, fallback to legacy one on error */
845 	ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
846 	if (!ret)
847 		return 0;
848 
849 	return qca8k_mdio_write(priv, phy, regnum, data);
850 }
851 
852 static int
853 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
854 {
855 	struct qca8k_priv *priv = slave_bus->priv;
856 	int ret;
857 
858 	/* Use mdio Ethernet when available, fallback to legacy one on error */
859 	ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
860 	if (ret >= 0)
861 		return ret;
862 
863 	ret = qca8k_mdio_read(priv, phy, regnum);
864 
865 	if (ret < 0)
866 		return 0xffff;
867 
868 	return ret;
869 }
870 
871 static int
872 qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
873 {
874 	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
875 
876 	return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
877 }
878 
879 static int
880 qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
881 {
882 	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
883 
884 	return qca8k_internal_mdio_read(slave_bus, port, regnum);
885 }
886 
887 static int
888 qca8k_mdio_register(struct qca8k_priv *priv)
889 {
890 	struct dsa_switch *ds = priv->ds;
891 	struct device_node *mdio;
892 	struct mii_bus *bus;
893 
894 	bus = devm_mdiobus_alloc(ds->dev);
895 	if (!bus)
896 		return -ENOMEM;
897 
898 	bus->priv = (void *)priv;
899 	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
900 		 ds->dst->index, ds->index);
901 	bus->parent = ds->dev;
902 	bus->phy_mask = ~ds->phys_mii_mask;
903 	ds->slave_mii_bus = bus;
904 
905 	/* Check if the devicetree declare the port:phy mapping */
906 	mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
907 	if (of_device_is_available(mdio)) {
908 		bus->name = "qca8k slave mii";
909 		bus->read = qca8k_internal_mdio_read;
910 		bus->write = qca8k_internal_mdio_write;
911 		return devm_of_mdiobus_register(priv->dev, bus, mdio);
912 	}
913 
914 	/* If a mapping can't be found the legacy mapping is used,
915 	 * using the qca8k_port_to_phy function
916 	 */
917 	bus->name = "qca8k-legacy slave mii";
918 	bus->read = qca8k_legacy_mdio_read;
919 	bus->write = qca8k_legacy_mdio_write;
920 	return devm_mdiobus_register(priv->dev, bus);
921 }
922 
923 static int
924 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
925 {
926 	u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
927 	struct device_node *ports, *port;
928 	phy_interface_t mode;
929 	int err;
930 
931 	ports = of_get_child_by_name(priv->dev->of_node, "ports");
932 	if (!ports)
933 		ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
934 
935 	if (!ports)
936 		return -EINVAL;
937 
938 	for_each_available_child_of_node(ports, port) {
939 		err = of_property_read_u32(port, "reg", &reg);
940 		if (err) {
941 			of_node_put(port);
942 			of_node_put(ports);
943 			return err;
944 		}
945 
946 		if (!dsa_is_user_port(priv->ds, reg))
947 			continue;
948 
949 		of_get_phy_mode(port, &mode);
950 
951 		if (of_property_read_bool(port, "phy-handle") &&
952 		    mode != PHY_INTERFACE_MODE_INTERNAL)
953 			external_mdio_mask |= BIT(reg);
954 		else
955 			internal_mdio_mask |= BIT(reg);
956 	}
957 
958 	of_node_put(ports);
959 	if (!external_mdio_mask && !internal_mdio_mask) {
960 		dev_err(priv->dev, "no PHYs are defined.\n");
961 		return -EINVAL;
962 	}
963 
964 	/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
965 	 * the MDIO_MASTER register also _disconnects_ the external MDC
966 	 * passthrough to the internal PHYs. It's not possible to use both
967 	 * configurations at the same time!
968 	 *
969 	 * Because this came up during the review process:
970 	 * If the external mdio-bus driver is capable magically disabling
971 	 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
972 	 * accessors for the time being, it would be possible to pull this
973 	 * off.
974 	 */
975 	if (!!external_mdio_mask && !!internal_mdio_mask) {
976 		dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
977 		return -EINVAL;
978 	}
979 
980 	if (external_mdio_mask) {
981 		/* Make sure to disable the internal mdio bus in cases
982 		 * a dt-overlay and driver reload changed the configuration
983 		 */
984 
985 		return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
986 					 QCA8K_MDIO_MASTER_EN);
987 	}
988 
989 	return qca8k_mdio_register(priv);
990 }
991 
992 static int
993 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
994 {
995 	u32 mask = 0;
996 	int ret = 0;
997 
998 	/* SoC specific settings for ipq8064.
999 	 * If more device require this consider adding
1000 	 * a dedicated binding.
1001 	 */
1002 	if (of_machine_is_compatible("qcom,ipq8064"))
1003 		mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1004 
1005 	/* SoC specific settings for ipq8065 */
1006 	if (of_machine_is_compatible("qcom,ipq8065"))
1007 		mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1008 
1009 	if (mask) {
1010 		ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1011 				QCA8K_MAC_PWR_RGMII0_1_8V |
1012 				QCA8K_MAC_PWR_RGMII1_1_8V,
1013 				mask);
1014 	}
1015 
1016 	return ret;
1017 }
1018 
1019 static int qca8k_find_cpu_port(struct dsa_switch *ds)
1020 {
1021 	struct qca8k_priv *priv = ds->priv;
1022 
1023 	/* Find the connected cpu port. Valid port are 0 or 6 */
1024 	if (dsa_is_cpu_port(ds, 0))
1025 		return 0;
1026 
1027 	dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1028 
1029 	if (dsa_is_cpu_port(ds, 6))
1030 		return 6;
1031 
1032 	return -EINVAL;
1033 }
1034 
1035 static int
1036 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1037 {
1038 	const struct qca8k_match_data *data = priv->info;
1039 	struct device_node *node = priv->dev->of_node;
1040 	u32 val = 0;
1041 	int ret;
1042 
1043 	/* QCA8327 require to set to the correct mode.
1044 	 * His bigger brother QCA8328 have the 172 pin layout.
1045 	 * Should be applied by default but we set this just to make sure.
1046 	 */
1047 	if (priv->switch_id == QCA8K_ID_QCA8327) {
1048 		/* Set the correct package of 148 pin for QCA8327 */
1049 		if (data->reduced_package)
1050 			val |= QCA8327_PWS_PACKAGE148_EN;
1051 
1052 		ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1053 				val);
1054 		if (ret)
1055 			return ret;
1056 	}
1057 
1058 	if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1059 		val |= QCA8K_PWS_POWER_ON_SEL;
1060 
1061 	if (of_property_read_bool(node, "qca,led-open-drain")) {
1062 		if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1063 			dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1064 			return -EINVAL;
1065 		}
1066 
1067 		val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1068 	}
1069 
1070 	return qca8k_rmw(priv, QCA8K_REG_PWS,
1071 			QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1072 			val);
1073 }
1074 
1075 static int
1076 qca8k_parse_port_config(struct qca8k_priv *priv)
1077 {
1078 	int port, cpu_port_index = -1, ret;
1079 	struct device_node *port_dn;
1080 	phy_interface_t mode;
1081 	struct dsa_port *dp;
1082 	u32 delay;
1083 
1084 	/* We have 2 CPU port. Check them */
1085 	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1086 		/* Skip every other port */
1087 		if (port != 0 && port != 6)
1088 			continue;
1089 
1090 		dp = dsa_to_port(priv->ds, port);
1091 		port_dn = dp->dn;
1092 		cpu_port_index++;
1093 
1094 		if (!of_device_is_available(port_dn))
1095 			continue;
1096 
1097 		ret = of_get_phy_mode(port_dn, &mode);
1098 		if (ret)
1099 			continue;
1100 
1101 		switch (mode) {
1102 		case PHY_INTERFACE_MODE_RGMII:
1103 		case PHY_INTERFACE_MODE_RGMII_ID:
1104 		case PHY_INTERFACE_MODE_RGMII_TXID:
1105 		case PHY_INTERFACE_MODE_RGMII_RXID:
1106 		case PHY_INTERFACE_MODE_SGMII:
1107 			delay = 0;
1108 
1109 			if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1110 				/* Switch regs accept value in ns, convert ps to ns */
1111 				delay = delay / 1000;
1112 			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1113 				 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1114 				delay = 1;
1115 
1116 			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1117 				dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1118 				delay = 3;
1119 			}
1120 
1121 			priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1122 
1123 			delay = 0;
1124 
1125 			if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1126 				/* Switch regs accept value in ns, convert ps to ns */
1127 				delay = delay / 1000;
1128 			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1129 				 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1130 				delay = 2;
1131 
1132 			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1133 				dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1134 				delay = 3;
1135 			}
1136 
1137 			priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1138 
1139 			/* Skip sgmii parsing for rgmii* mode */
1140 			if (mode == PHY_INTERFACE_MODE_RGMII ||
1141 			    mode == PHY_INTERFACE_MODE_RGMII_ID ||
1142 			    mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1143 			    mode == PHY_INTERFACE_MODE_RGMII_RXID)
1144 				break;
1145 
1146 			if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1147 				priv->ports_config.sgmii_tx_clk_falling_edge = true;
1148 
1149 			if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1150 				priv->ports_config.sgmii_rx_clk_falling_edge = true;
1151 
1152 			if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1153 				priv->ports_config.sgmii_enable_pll = true;
1154 
1155 				if (priv->switch_id == QCA8K_ID_QCA8327) {
1156 					dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1157 					priv->ports_config.sgmii_enable_pll = false;
1158 				}
1159 
1160 				if (priv->switch_revision < 2)
1161 					dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1162 			}
1163 
1164 			break;
1165 		default:
1166 			continue;
1167 		}
1168 	}
1169 
1170 	return 0;
1171 }
1172 
1173 static void
1174 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1175 				      u32 reg)
1176 {
1177 	u32 delay, val = 0;
1178 	int ret;
1179 
1180 	/* Delay can be declared in 3 different way.
1181 	 * Mode to rgmii and internal-delay standard binding defined
1182 	 * rgmii-id or rgmii-tx/rx phy mode set.
1183 	 * The parse logic set a delay different than 0 only when one
1184 	 * of the 3 different way is used. In all other case delay is
1185 	 * not enabled. With ID or TX/RXID delay is enabled and set
1186 	 * to the default and recommended value.
1187 	 */
1188 	if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1189 		delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1190 
1191 		val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1192 			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1193 	}
1194 
1195 	if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1196 		delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1197 
1198 		val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1199 			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1200 	}
1201 
1202 	/* Set RGMII delay based on the selected values */
1203 	ret = qca8k_rmw(priv, reg,
1204 			QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1205 			QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1206 			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1207 			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1208 			val);
1209 	if (ret)
1210 		dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1211 			cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1212 }
1213 
1214 static struct phylink_pcs *
1215 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1216 			     phy_interface_t interface)
1217 {
1218 	struct qca8k_priv *priv = ds->priv;
1219 	struct phylink_pcs *pcs = NULL;
1220 
1221 	switch (interface) {
1222 	case PHY_INTERFACE_MODE_SGMII:
1223 	case PHY_INTERFACE_MODE_1000BASEX:
1224 		switch (port) {
1225 		case 0:
1226 			pcs = &priv->pcs_port_0.pcs;
1227 			break;
1228 
1229 		case 6:
1230 			pcs = &priv->pcs_port_6.pcs;
1231 			break;
1232 		}
1233 		break;
1234 
1235 	default:
1236 		break;
1237 	}
1238 
1239 	return pcs;
1240 }
1241 
1242 static void
1243 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1244 			 const struct phylink_link_state *state)
1245 {
1246 	struct qca8k_priv *priv = ds->priv;
1247 	int cpu_port_index;
1248 	u32 reg;
1249 
1250 	switch (port) {
1251 	case 0: /* 1st CPU port */
1252 		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1253 		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1254 		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1255 		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1256 		    state->interface != PHY_INTERFACE_MODE_SGMII)
1257 			return;
1258 
1259 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1260 		cpu_port_index = QCA8K_CPU_PORT0;
1261 		break;
1262 	case 1:
1263 	case 2:
1264 	case 3:
1265 	case 4:
1266 	case 5:
1267 		/* Internal PHY, nothing to do */
1268 		return;
1269 	case 6: /* 2nd CPU port / external PHY */
1270 		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1271 		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1272 		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1273 		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1274 		    state->interface != PHY_INTERFACE_MODE_SGMII &&
1275 		    state->interface != PHY_INTERFACE_MODE_1000BASEX)
1276 			return;
1277 
1278 		reg = QCA8K_REG_PORT6_PAD_CTRL;
1279 		cpu_port_index = QCA8K_CPU_PORT6;
1280 		break;
1281 	default:
1282 		dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1283 		return;
1284 	}
1285 
1286 	if (port != 6 && phylink_autoneg_inband(mode)) {
1287 		dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1288 			__func__);
1289 		return;
1290 	}
1291 
1292 	switch (state->interface) {
1293 	case PHY_INTERFACE_MODE_RGMII:
1294 	case PHY_INTERFACE_MODE_RGMII_ID:
1295 	case PHY_INTERFACE_MODE_RGMII_TXID:
1296 	case PHY_INTERFACE_MODE_RGMII_RXID:
1297 		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1298 
1299 		/* Configure rgmii delay */
1300 		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1301 
1302 		/* QCA8337 requires to set rgmii rx delay for all ports.
1303 		 * This is enabled through PORT5_PAD_CTRL for all ports,
1304 		 * rather than individual port registers.
1305 		 */
1306 		if (priv->switch_id == QCA8K_ID_QCA8337)
1307 			qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1308 				    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1309 		break;
1310 	case PHY_INTERFACE_MODE_SGMII:
1311 	case PHY_INTERFACE_MODE_1000BASEX:
1312 		/* Enable SGMII on the port */
1313 		qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1314 		break;
1315 	default:
1316 		dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1317 			phy_modes(state->interface), port);
1318 		return;
1319 	}
1320 }
1321 
1322 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1323 				   struct phylink_config *config)
1324 {
1325 	switch (port) {
1326 	case 0: /* 1st CPU port */
1327 		phy_interface_set_rgmii(config->supported_interfaces);
1328 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1329 			  config->supported_interfaces);
1330 		break;
1331 
1332 	case 1:
1333 	case 2:
1334 	case 3:
1335 	case 4:
1336 	case 5:
1337 		/* Internal PHY */
1338 		__set_bit(PHY_INTERFACE_MODE_GMII,
1339 			  config->supported_interfaces);
1340 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1341 			  config->supported_interfaces);
1342 		break;
1343 
1344 	case 6: /* 2nd CPU port / external PHY */
1345 		phy_interface_set_rgmii(config->supported_interfaces);
1346 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1347 			  config->supported_interfaces);
1348 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1349 			  config->supported_interfaces);
1350 		break;
1351 	}
1352 
1353 	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1354 		MAC_10 | MAC_100 | MAC_1000FD;
1355 
1356 	config->legacy_pre_march2020 = false;
1357 }
1358 
1359 static void
1360 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1361 			    phy_interface_t interface)
1362 {
1363 	struct qca8k_priv *priv = ds->priv;
1364 
1365 	qca8k_port_set_status(priv, port, 0);
1366 }
1367 
1368 static void
1369 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1370 			  phy_interface_t interface, struct phy_device *phydev,
1371 			  int speed, int duplex, bool tx_pause, bool rx_pause)
1372 {
1373 	struct qca8k_priv *priv = ds->priv;
1374 	u32 reg;
1375 
1376 	if (phylink_autoneg_inband(mode)) {
1377 		reg = QCA8K_PORT_STATUS_LINK_AUTO;
1378 	} else {
1379 		switch (speed) {
1380 		case SPEED_10:
1381 			reg = QCA8K_PORT_STATUS_SPEED_10;
1382 			break;
1383 		case SPEED_100:
1384 			reg = QCA8K_PORT_STATUS_SPEED_100;
1385 			break;
1386 		case SPEED_1000:
1387 			reg = QCA8K_PORT_STATUS_SPEED_1000;
1388 			break;
1389 		default:
1390 			reg = QCA8K_PORT_STATUS_LINK_AUTO;
1391 			break;
1392 		}
1393 
1394 		if (duplex == DUPLEX_FULL)
1395 			reg |= QCA8K_PORT_STATUS_DUPLEX;
1396 
1397 		if (rx_pause || dsa_is_cpu_port(ds, port))
1398 			reg |= QCA8K_PORT_STATUS_RXFLOW;
1399 
1400 		if (tx_pause || dsa_is_cpu_port(ds, port))
1401 			reg |= QCA8K_PORT_STATUS_TXFLOW;
1402 	}
1403 
1404 	reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1405 
1406 	qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1407 }
1408 
1409 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1410 {
1411 	return container_of(pcs, struct qca8k_pcs, pcs);
1412 }
1413 
1414 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1415 				struct phylink_link_state *state)
1416 {
1417 	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1418 	int port = pcs_to_qca8k_pcs(pcs)->port;
1419 	u32 reg;
1420 	int ret;
1421 
1422 	ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1423 	if (ret < 0) {
1424 		state->link = false;
1425 		return;
1426 	}
1427 
1428 	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1429 	state->an_complete = state->link;
1430 	state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1431 	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1432 							   DUPLEX_HALF;
1433 
1434 	switch (reg & QCA8K_PORT_STATUS_SPEED) {
1435 	case QCA8K_PORT_STATUS_SPEED_10:
1436 		state->speed = SPEED_10;
1437 		break;
1438 	case QCA8K_PORT_STATUS_SPEED_100:
1439 		state->speed = SPEED_100;
1440 		break;
1441 	case QCA8K_PORT_STATUS_SPEED_1000:
1442 		state->speed = SPEED_1000;
1443 		break;
1444 	default:
1445 		state->speed = SPEED_UNKNOWN;
1446 		break;
1447 	}
1448 
1449 	if (reg & QCA8K_PORT_STATUS_RXFLOW)
1450 		state->pause |= MLO_PAUSE_RX;
1451 	if (reg & QCA8K_PORT_STATUS_TXFLOW)
1452 		state->pause |= MLO_PAUSE_TX;
1453 }
1454 
1455 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1456 			    phy_interface_t interface,
1457 			    const unsigned long *advertising,
1458 			    bool permit_pause_to_mac)
1459 {
1460 	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1461 	int cpu_port_index, ret, port;
1462 	u32 reg, val;
1463 
1464 	port = pcs_to_qca8k_pcs(pcs)->port;
1465 	switch (port) {
1466 	case 0:
1467 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1468 		cpu_port_index = QCA8K_CPU_PORT0;
1469 		break;
1470 
1471 	case 6:
1472 		reg = QCA8K_REG_PORT6_PAD_CTRL;
1473 		cpu_port_index = QCA8K_CPU_PORT6;
1474 		break;
1475 
1476 	default:
1477 		WARN_ON(1);
1478 		return -EINVAL;
1479 	}
1480 
1481 	/* Enable/disable SerDes auto-negotiation as necessary */
1482 	ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1483 	if (ret)
1484 		return ret;
1485 	if (phylink_autoneg_inband(mode))
1486 		val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1487 	else
1488 		val |= QCA8K_PWS_SERDES_AEN_DIS;
1489 	qca8k_write(priv, QCA8K_REG_PWS, val);
1490 
1491 	/* Configure the SGMII parameters */
1492 	ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1493 	if (ret)
1494 		return ret;
1495 
1496 	val |= QCA8K_SGMII_EN_SD;
1497 
1498 	if (priv->ports_config.sgmii_enable_pll)
1499 		val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1500 		       QCA8K_SGMII_EN_TX;
1501 
1502 	if (dsa_is_cpu_port(priv->ds, port)) {
1503 		/* CPU port, we're talking to the CPU MAC, be a PHY */
1504 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1505 		val |= QCA8K_SGMII_MODE_CTRL_PHY;
1506 	} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1507 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1508 		val |= QCA8K_SGMII_MODE_CTRL_MAC;
1509 	} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1510 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1511 		val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1512 	}
1513 
1514 	qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1515 
1516 	/* From original code is reported port instability as SGMII also
1517 	 * require delay set. Apply advised values here or take them from DT.
1518 	 */
1519 	if (interface == PHY_INTERFACE_MODE_SGMII)
1520 		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1521 	/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1522 	 * falling edge is set writing in the PORT0 PAD reg
1523 	 */
1524 	if (priv->switch_id == QCA8K_ID_QCA8327 ||
1525 	    priv->switch_id == QCA8K_ID_QCA8337)
1526 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1527 
1528 	val = 0;
1529 
1530 	/* SGMII Clock phase configuration */
1531 	if (priv->ports_config.sgmii_rx_clk_falling_edge)
1532 		val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1533 
1534 	if (priv->ports_config.sgmii_tx_clk_falling_edge)
1535 		val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1536 
1537 	if (val)
1538 		ret = qca8k_rmw(priv, reg,
1539 				QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1540 				QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1541 				val);
1542 
1543 	return 0;
1544 }
1545 
1546 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1547 {
1548 }
1549 
1550 static const struct phylink_pcs_ops qca8k_pcs_ops = {
1551 	.pcs_get_state = qca8k_pcs_get_state,
1552 	.pcs_config = qca8k_pcs_config,
1553 	.pcs_an_restart = qca8k_pcs_an_restart,
1554 };
1555 
1556 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1557 			    int port)
1558 {
1559 	qpcs->pcs.ops = &qca8k_pcs_ops;
1560 
1561 	/* We don't have interrupts for link changes, so we need to poll */
1562 	qpcs->pcs.poll = true;
1563 	qpcs->priv = priv;
1564 	qpcs->port = port;
1565 }
1566 
1567 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
1568 {
1569 	struct qca8k_mib_eth_data *mib_eth_data;
1570 	struct qca8k_priv *priv = ds->priv;
1571 	const struct qca8k_mib_desc *mib;
1572 	struct mib_ethhdr *mib_ethhdr;
1573 	__le32 *data2;
1574 	u8 port;
1575 	int i;
1576 
1577 	mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
1578 	mib_eth_data = &priv->mib_eth_data;
1579 
1580 	/* The switch autocast every port. Ignore other packet and
1581 	 * parse only the requested one.
1582 	 */
1583 	port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
1584 	if (port != mib_eth_data->req_port)
1585 		goto exit;
1586 
1587 	data2 = (__le32 *)skb->data;
1588 
1589 	for (i = 0; i < priv->info->mib_count; i++) {
1590 		mib = &ar8327_mib[i];
1591 
1592 		/* First 3 mib are present in the skb head */
1593 		if (i < 3) {
1594 			mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
1595 			continue;
1596 		}
1597 
1598 		/* Some mib are 64 bit wide */
1599 		if (mib->size == 2)
1600 			mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
1601 		else
1602 			mib_eth_data->data[i] = get_unaligned_le32(data2);
1603 
1604 		data2 += mib->size;
1605 	}
1606 
1607 exit:
1608 	/* Complete on receiving all the mib packet */
1609 	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
1610 		complete(&mib_eth_data->rw_done);
1611 }
1612 
1613 static int
1614 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
1615 {
1616 	struct dsa_port *dp = dsa_to_port(ds, port);
1617 	struct qca8k_mib_eth_data *mib_eth_data;
1618 	struct qca8k_priv *priv = ds->priv;
1619 	int ret;
1620 
1621 	mib_eth_data = &priv->mib_eth_data;
1622 
1623 	mutex_lock(&mib_eth_data->mutex);
1624 
1625 	reinit_completion(&mib_eth_data->rw_done);
1626 
1627 	mib_eth_data->req_port = dp->index;
1628 	mib_eth_data->data = data;
1629 	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
1630 
1631 	mutex_lock(&priv->reg_mutex);
1632 
1633 	/* Send mib autocast request */
1634 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
1635 				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
1636 				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
1637 				 QCA8K_MIB_BUSY);
1638 
1639 	mutex_unlock(&priv->reg_mutex);
1640 
1641 	if (ret)
1642 		goto exit;
1643 
1644 	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
1645 
1646 exit:
1647 	mutex_unlock(&mib_eth_data->mutex);
1648 
1649 	return ret;
1650 }
1651 
1652 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
1653 {
1654 	struct qca8k_priv *priv = ds->priv;
1655 
1656 	/* Communicate to the phy internal driver the switch revision.
1657 	 * Based on the switch revision different values needs to be
1658 	 * set to the dbg and mmd reg on the phy.
1659 	 * The first 2 bit are used to communicate the switch revision
1660 	 * to the phy driver.
1661 	 */
1662 	if (port > 0 && port < 6)
1663 		return priv->switch_revision;
1664 
1665 	return 0;
1666 }
1667 
1668 static enum dsa_tag_protocol
1669 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
1670 		       enum dsa_tag_protocol mp)
1671 {
1672 	return DSA_TAG_PROTO_QCA;
1673 }
1674 
1675 static void
1676 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
1677 		    bool operational)
1678 {
1679 	struct dsa_port *dp = master->dsa_ptr;
1680 	struct qca8k_priv *priv = ds->priv;
1681 
1682 	/* Ethernet MIB/MDIO is only supported for CPU port 0 */
1683 	if (dp->index != 0)
1684 		return;
1685 
1686 	mutex_lock(&priv->mgmt_eth_data.mutex);
1687 	mutex_lock(&priv->mib_eth_data.mutex);
1688 
1689 	priv->mgmt_master = operational ? (struct net_device *)master : NULL;
1690 
1691 	mutex_unlock(&priv->mib_eth_data.mutex);
1692 	mutex_unlock(&priv->mgmt_eth_data.mutex);
1693 }
1694 
1695 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
1696 				      enum dsa_tag_protocol proto)
1697 {
1698 	struct qca_tagger_data *tagger_data;
1699 
1700 	switch (proto) {
1701 	case DSA_TAG_PROTO_QCA:
1702 		tagger_data = ds->tagger_data;
1703 
1704 		tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
1705 		tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
1706 
1707 		break;
1708 	default:
1709 		return -EOPNOTSUPP;
1710 	}
1711 
1712 	return 0;
1713 }
1714 
1715 static int
1716 qca8k_setup(struct dsa_switch *ds)
1717 {
1718 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1719 	int cpu_port, ret, i;
1720 	u32 mask;
1721 
1722 	cpu_port = qca8k_find_cpu_port(ds);
1723 	if (cpu_port < 0) {
1724 		dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1725 		return cpu_port;
1726 	}
1727 
1728 	/* Parse CPU port config to be later used in phy_link mac_config */
1729 	ret = qca8k_parse_port_config(priv);
1730 	if (ret)
1731 		return ret;
1732 
1733 	ret = qca8k_setup_mdio_bus(priv);
1734 	if (ret)
1735 		return ret;
1736 
1737 	ret = qca8k_setup_of_pws_reg(priv);
1738 	if (ret)
1739 		return ret;
1740 
1741 	ret = qca8k_setup_mac_pwr_sel(priv);
1742 	if (ret)
1743 		return ret;
1744 
1745 	qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
1746 	qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
1747 
1748 	/* Make sure MAC06 is disabled */
1749 	ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1750 				QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1751 	if (ret) {
1752 		dev_err(priv->dev, "failed disabling MAC06 exchange");
1753 		return ret;
1754 	}
1755 
1756 	/* Enable CPU Port */
1757 	ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1758 			      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1759 	if (ret) {
1760 		dev_err(priv->dev, "failed enabling CPU port");
1761 		return ret;
1762 	}
1763 
1764 	/* Enable MIB counters */
1765 	ret = qca8k_mib_init(priv);
1766 	if (ret)
1767 		dev_warn(priv->dev, "mib init failed");
1768 
1769 	/* Initial setup of all ports */
1770 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1771 		/* Disable forwarding by default on all ports */
1772 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1773 				QCA8K_PORT_LOOKUP_MEMBER, 0);
1774 		if (ret)
1775 			return ret;
1776 
1777 		/* Enable QCA header mode on all cpu ports */
1778 		if (dsa_is_cpu_port(ds, i)) {
1779 			ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1780 					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1781 					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1782 			if (ret) {
1783 				dev_err(priv->dev, "failed enabling QCA header mode");
1784 				return ret;
1785 			}
1786 		}
1787 
1788 		/* Disable MAC by default on all user ports */
1789 		if (dsa_is_user_port(ds, i))
1790 			qca8k_port_set_status(priv, i, 0);
1791 	}
1792 
1793 	/* Forward all unknown frames to CPU port for Linux processing
1794 	 * Notice that in multi-cpu config only one port should be set
1795 	 * for igmp, unknown, multicast and broadcast packet
1796 	 */
1797 	ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1798 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1799 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1800 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1801 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1802 	if (ret)
1803 		return ret;
1804 
1805 	/* Setup connection between CPU port & user ports
1806 	 * Configure specific switch configuration for ports
1807 	 */
1808 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1809 		/* CPU port gets connected to all user ports of the switch */
1810 		if (dsa_is_cpu_port(ds, i)) {
1811 			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1812 					QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1813 			if (ret)
1814 				return ret;
1815 		}
1816 
1817 		/* Individual user ports get connected to CPU port only */
1818 		if (dsa_is_user_port(ds, i)) {
1819 			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1820 					QCA8K_PORT_LOOKUP_MEMBER,
1821 					BIT(cpu_port));
1822 			if (ret)
1823 				return ret;
1824 
1825 			/* Enable ARP Auto-learning by default */
1826 			ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1827 					      QCA8K_PORT_LOOKUP_LEARN);
1828 			if (ret)
1829 				return ret;
1830 
1831 			/* For port based vlans to work we need to set the
1832 			 * default egress vid
1833 			 */
1834 			ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1835 					QCA8K_EGREES_VLAN_PORT_MASK(i),
1836 					QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1837 			if (ret)
1838 				return ret;
1839 
1840 			ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1841 					  QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1842 					  QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1843 			if (ret)
1844 				return ret;
1845 		}
1846 
1847 		/* The port 5 of the qca8337 have some problem in flood condition. The
1848 		 * original legacy driver had some specific buffer and priority settings
1849 		 * for the different port suggested by the QCA switch team. Add this
1850 		 * missing settings to improve switch stability under load condition.
1851 		 * This problem is limited to qca8337 and other qca8k switch are not affected.
1852 		 */
1853 		if (priv->switch_id == QCA8K_ID_QCA8337) {
1854 			switch (i) {
1855 			/* The 2 CPU port and port 5 requires some different
1856 			 * priority than any other ports.
1857 			 */
1858 			case 0:
1859 			case 5:
1860 			case 6:
1861 				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1862 					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1863 					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1864 					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1865 					QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1866 					QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1867 					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1868 				break;
1869 			default:
1870 				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1871 					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1872 					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1873 					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1874 					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1875 			}
1876 			qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1877 
1878 			mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1879 			QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1880 			QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1881 			QCA8K_PORT_HOL_CTRL1_WRED_EN;
1882 			qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1883 				  QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1884 				  QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1885 				  QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1886 				  QCA8K_PORT_HOL_CTRL1_WRED_EN,
1887 				  mask);
1888 		}
1889 	}
1890 
1891 	/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1892 	if (priv->switch_id == QCA8K_ID_QCA8327) {
1893 		mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1894 		       QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1895 		qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1896 			  QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1897 			  QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1898 			  mask);
1899 	}
1900 
1901 	/* Setup our port MTUs to match power on defaults */
1902 	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1903 	if (ret)
1904 		dev_warn(priv->dev, "failed setting MTU settings");
1905 
1906 	/* Flush the FDB table */
1907 	qca8k_fdb_flush(priv);
1908 
1909 	/* Set min a max ageing value supported */
1910 	ds->ageing_time_min = 7000;
1911 	ds->ageing_time_max = 458745000;
1912 
1913 	/* Set max number of LAGs supported */
1914 	ds->num_lag_ids = QCA8K_NUM_LAGS;
1915 
1916 	return 0;
1917 }
1918 
1919 static const struct dsa_switch_ops qca8k_switch_ops = {
1920 	.get_tag_protocol	= qca8k_get_tag_protocol,
1921 	.setup			= qca8k_setup,
1922 	.get_strings		= qca8k_get_strings,
1923 	.get_ethtool_stats	= qca8k_get_ethtool_stats,
1924 	.get_sset_count		= qca8k_get_sset_count,
1925 	.set_ageing_time	= qca8k_set_ageing_time,
1926 	.get_mac_eee		= qca8k_get_mac_eee,
1927 	.set_mac_eee		= qca8k_set_mac_eee,
1928 	.port_enable		= qca8k_port_enable,
1929 	.port_disable		= qca8k_port_disable,
1930 	.port_change_mtu	= qca8k_port_change_mtu,
1931 	.port_max_mtu		= qca8k_port_max_mtu,
1932 	.port_stp_state_set	= qca8k_port_stp_state_set,
1933 	.port_bridge_join	= qca8k_port_bridge_join,
1934 	.port_bridge_leave	= qca8k_port_bridge_leave,
1935 	.port_fast_age		= qca8k_port_fast_age,
1936 	.port_fdb_add		= qca8k_port_fdb_add,
1937 	.port_fdb_del		= qca8k_port_fdb_del,
1938 	.port_fdb_dump		= qca8k_port_fdb_dump,
1939 	.port_mdb_add		= qca8k_port_mdb_add,
1940 	.port_mdb_del		= qca8k_port_mdb_del,
1941 	.port_mirror_add	= qca8k_port_mirror_add,
1942 	.port_mirror_del	= qca8k_port_mirror_del,
1943 	.port_vlan_filtering	= qca8k_port_vlan_filtering,
1944 	.port_vlan_add		= qca8k_port_vlan_add,
1945 	.port_vlan_del		= qca8k_port_vlan_del,
1946 	.phylink_get_caps	= qca8k_phylink_get_caps,
1947 	.phylink_mac_select_pcs	= qca8k_phylink_mac_select_pcs,
1948 	.phylink_mac_config	= qca8k_phylink_mac_config,
1949 	.phylink_mac_link_down	= qca8k_phylink_mac_link_down,
1950 	.phylink_mac_link_up	= qca8k_phylink_mac_link_up,
1951 	.get_phy_flags		= qca8k_get_phy_flags,
1952 	.port_lag_join		= qca8k_port_lag_join,
1953 	.port_lag_leave		= qca8k_port_lag_leave,
1954 	.master_state_change	= qca8k_master_change,
1955 	.connect_tag_protocol	= qca8k_connect_tag_protocol,
1956 };
1957 
1958 static int
1959 qca8k_sw_probe(struct mdio_device *mdiodev)
1960 {
1961 	struct qca8k_priv *priv;
1962 	int ret;
1963 
1964 	/* allocate the private data struct so that we can probe the switches
1965 	 * ID register
1966 	 */
1967 	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
1968 	if (!priv)
1969 		return -ENOMEM;
1970 
1971 	priv->bus = mdiodev->bus;
1972 	priv->dev = &mdiodev->dev;
1973 	priv->info = of_device_get_match_data(priv->dev);
1974 
1975 	priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
1976 						   GPIOD_ASIS);
1977 	if (IS_ERR(priv->reset_gpio))
1978 		return PTR_ERR(priv->reset_gpio);
1979 
1980 	if (priv->reset_gpio) {
1981 		gpiod_set_value_cansleep(priv->reset_gpio, 1);
1982 		/* The active low duration must be greater than 10 ms
1983 		 * and checkpatch.pl wants 20 ms.
1984 		 */
1985 		msleep(20);
1986 		gpiod_set_value_cansleep(priv->reset_gpio, 0);
1987 	}
1988 
1989 	/* Start by setting up the register mapping */
1990 	priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
1991 					&qca8k_regmap_config);
1992 	if (IS_ERR(priv->regmap)) {
1993 		dev_err(priv->dev, "regmap initialization failed");
1994 		return PTR_ERR(priv->regmap);
1995 	}
1996 
1997 	priv->mdio_cache.page = 0xffff;
1998 
1999 	/* Check the detected switch id */
2000 	ret = qca8k_read_switch_id(priv);
2001 	if (ret)
2002 		return ret;
2003 
2004 	priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
2005 	if (!priv->ds)
2006 		return -ENOMEM;
2007 
2008 	mutex_init(&priv->mgmt_eth_data.mutex);
2009 	init_completion(&priv->mgmt_eth_data.rw_done);
2010 
2011 	mutex_init(&priv->mib_eth_data.mutex);
2012 	init_completion(&priv->mib_eth_data.rw_done);
2013 
2014 	priv->ds->dev = &mdiodev->dev;
2015 	priv->ds->num_ports = QCA8K_NUM_PORTS;
2016 	priv->ds->priv = priv;
2017 	priv->ds->ops = &qca8k_switch_ops;
2018 	mutex_init(&priv->reg_mutex);
2019 	dev_set_drvdata(&mdiodev->dev, priv);
2020 
2021 	return dsa_register_switch(priv->ds);
2022 }
2023 
2024 static void
2025 qca8k_sw_remove(struct mdio_device *mdiodev)
2026 {
2027 	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2028 	int i;
2029 
2030 	if (!priv)
2031 		return;
2032 
2033 	for (i = 0; i < QCA8K_NUM_PORTS; i++)
2034 		qca8k_port_set_status(priv, i, 0);
2035 
2036 	dsa_unregister_switch(priv->ds);
2037 }
2038 
2039 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
2040 {
2041 	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
2042 
2043 	if (!priv)
2044 		return;
2045 
2046 	dsa_switch_shutdown(priv->ds);
2047 
2048 	dev_set_drvdata(&mdiodev->dev, NULL);
2049 }
2050 
2051 #ifdef CONFIG_PM_SLEEP
2052 static void
2053 qca8k_set_pm(struct qca8k_priv *priv, int enable)
2054 {
2055 	int port;
2056 
2057 	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
2058 		/* Do not enable on resume if the port was
2059 		 * disabled before.
2060 		 */
2061 		if (!(priv->port_enabled_map & BIT(port)))
2062 			continue;
2063 
2064 		qca8k_port_set_status(priv, port, enable);
2065 	}
2066 }
2067 
2068 static int qca8k_suspend(struct device *dev)
2069 {
2070 	struct qca8k_priv *priv = dev_get_drvdata(dev);
2071 
2072 	qca8k_set_pm(priv, 0);
2073 
2074 	return dsa_switch_suspend(priv->ds);
2075 }
2076 
2077 static int qca8k_resume(struct device *dev)
2078 {
2079 	struct qca8k_priv *priv = dev_get_drvdata(dev);
2080 
2081 	qca8k_set_pm(priv, 1);
2082 
2083 	return dsa_switch_resume(priv->ds);
2084 }
2085 #endif /* CONFIG_PM_SLEEP */
2086 
2087 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2088 			 qca8k_suspend, qca8k_resume);
2089 
2090 static const struct qca8k_info_ops qca8xxx_ops = {
2091 	.autocast_mib = qca8k_get_ethtool_stats_eth,
2092 	.read_eth = qca8k_read_eth,
2093 	.write_eth = qca8k_write_eth,
2094 };
2095 
2096 static const struct qca8k_match_data qca8327 = {
2097 	.id = QCA8K_ID_QCA8327,
2098 	.reduced_package = true,
2099 	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2100 	.ops = &qca8xxx_ops,
2101 };
2102 
2103 static const struct qca8k_match_data qca8328 = {
2104 	.id = QCA8K_ID_QCA8327,
2105 	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2106 	.ops = &qca8xxx_ops,
2107 };
2108 
2109 static const struct qca8k_match_data qca833x = {
2110 	.id = QCA8K_ID_QCA8337,
2111 	.mib_count = QCA8K_QCA833X_MIB_COUNT,
2112 	.ops = &qca8xxx_ops,
2113 };
2114 
2115 static const struct of_device_id qca8k_of_match[] = {
2116 	{ .compatible = "qca,qca8327", .data = &qca8327 },
2117 	{ .compatible = "qca,qca8328", .data = &qca8328 },
2118 	{ .compatible = "qca,qca8334", .data = &qca833x },
2119 	{ .compatible = "qca,qca8337", .data = &qca833x },
2120 	{ /* sentinel */ },
2121 };
2122 
2123 static struct mdio_driver qca8kmdio_driver = {
2124 	.probe  = qca8k_sw_probe,
2125 	.remove = qca8k_sw_remove,
2126 	.shutdown = qca8k_sw_shutdown,
2127 	.mdiodrv.driver = {
2128 		.name = "qca8k",
2129 		.of_match_table = qca8k_of_match,
2130 		.pm = &qca8k_pm_ops,
2131 	},
2132 };
2133 
2134 mdio_module_driver(qca8kmdio_driver);
2135 
2136 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2137 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2138 MODULE_LICENSE("GPL v2");
2139 MODULE_ALIAS("platform:qca8k");
2140