xref: /openbmc/linux/drivers/net/dsa/qca/qca8k-8xxx.c (revision c4a7b9b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <john@phrozen.org>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
14 #include <net/dsa.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/mdio.h>
19 #include <linux/phylink.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/etherdevice.h>
22 #include <linux/dsa/tag_qca.h>
23 
24 #include "qca8k.h"
25 
26 static void
27 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
28 {
29 	regaddr >>= 1;
30 	*r1 = regaddr & 0x1e;
31 
32 	regaddr >>= 5;
33 	*r2 = regaddr & 0x7;
34 
35 	regaddr >>= 3;
36 	*page = regaddr & 0x3ff;
37 }
38 
39 static int
40 qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
41 {
42 	u16 *cached_lo = &priv->mdio_cache.lo;
43 	struct mii_bus *bus = priv->bus;
44 	int ret;
45 
46 	if (lo == *cached_lo)
47 		return 0;
48 
49 	ret = bus->write(bus, phy_id, regnum, lo);
50 	if (ret < 0)
51 		dev_err_ratelimited(&bus->dev,
52 				    "failed to write qca8k 32bit lo register\n");
53 
54 	*cached_lo = lo;
55 	return 0;
56 }
57 
58 static int
59 qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
60 {
61 	u16 *cached_hi = &priv->mdio_cache.hi;
62 	struct mii_bus *bus = priv->bus;
63 	int ret;
64 
65 	if (hi == *cached_hi)
66 		return 0;
67 
68 	ret = bus->write(bus, phy_id, regnum, hi);
69 	if (ret < 0)
70 		dev_err_ratelimited(&bus->dev,
71 				    "failed to write qca8k 32bit hi register\n");
72 
73 	*cached_hi = hi;
74 	return 0;
75 }
76 
77 static int
78 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
79 {
80 	int ret;
81 
82 	ret = bus->read(bus, phy_id, regnum);
83 	if (ret >= 0) {
84 		*val = ret;
85 		ret = bus->read(bus, phy_id, regnum + 1);
86 		*val |= ret << 16;
87 	}
88 
89 	if (ret < 0) {
90 		dev_err_ratelimited(&bus->dev,
91 				    "failed to read qca8k 32bit register\n");
92 		*val = 0;
93 		return ret;
94 	}
95 
96 	return 0;
97 }
98 
99 static void
100 qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
101 {
102 	u16 lo, hi;
103 	int ret;
104 
105 	lo = val & 0xffff;
106 	hi = (u16)(val >> 16);
107 
108 	ret = qca8k_set_lo(priv, phy_id, regnum, lo);
109 	if (ret >= 0)
110 		ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
111 }
112 
113 static int
114 qca8k_set_page(struct qca8k_priv *priv, u16 page)
115 {
116 	u16 *cached_page = &priv->mdio_cache.page;
117 	struct mii_bus *bus = priv->bus;
118 	int ret;
119 
120 	if (page == *cached_page)
121 		return 0;
122 
123 	ret = bus->write(bus, 0x18, 0, page);
124 	if (ret < 0) {
125 		dev_err_ratelimited(&bus->dev,
126 				    "failed to set qca8k page\n");
127 		return ret;
128 	}
129 
130 	*cached_page = page;
131 	usleep_range(1000, 2000);
132 	return 0;
133 }
134 
135 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
136 {
137 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
138 	struct qca8k_priv *priv = ds->priv;
139 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
140 	u8 len, cmd;
141 
142 	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
143 	mgmt_eth_data = &priv->mgmt_eth_data;
144 
145 	cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
146 	len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
147 
148 	/* Make sure the seq match the requested packet */
149 	if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
150 		mgmt_eth_data->ack = true;
151 
152 	if (cmd == MDIO_READ) {
153 		mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
154 
155 		/* Get the rest of the 12 byte of data.
156 		 * The read/write function will extract the requested data.
157 		 */
158 		if (len > QCA_HDR_MGMT_DATA1_LEN)
159 			memcpy(mgmt_eth_data->data + 1, skb->data,
160 			       QCA_HDR_MGMT_DATA2_LEN);
161 	}
162 
163 	complete(&mgmt_eth_data->rw_done);
164 }
165 
166 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
167 					       int priority, unsigned int len)
168 {
169 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
170 	unsigned int real_len;
171 	struct sk_buff *skb;
172 	u32 *data2;
173 	u16 hdr;
174 
175 	skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
176 	if (!skb)
177 		return NULL;
178 
179 	/* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
180 	 * Actually for some reason the steps are:
181 	 * 0: nothing
182 	 * 1-4: first 4 byte
183 	 * 5-6: first 12 byte
184 	 * 7-15: all 16 byte
185 	 */
186 	if (len == 16)
187 		real_len = 15;
188 	else
189 		real_len = len;
190 
191 	skb_reset_mac_header(skb);
192 	skb_set_network_header(skb, skb->len);
193 
194 	mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
195 
196 	hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
197 	hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
198 	hdr |= QCA_HDR_XMIT_FROM_CPU;
199 	hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
200 	hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
201 
202 	mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
203 	mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
204 	mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
205 	mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
206 					   QCA_HDR_MGMT_CHECK_CODE_VAL);
207 
208 	if (cmd == MDIO_WRITE)
209 		mgmt_ethhdr->mdio_data = *val;
210 
211 	mgmt_ethhdr->hdr = htons(hdr);
212 
213 	data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
214 	if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
215 		memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
216 
217 	return skb;
218 }
219 
220 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
221 {
222 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
223 
224 	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
225 	mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
226 }
227 
228 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
229 {
230 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
231 	struct sk_buff *skb;
232 	bool ack;
233 	int ret;
234 
235 	skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
236 				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
237 	if (!skb)
238 		return -ENOMEM;
239 
240 	mutex_lock(&mgmt_eth_data->mutex);
241 
242 	/* Check mgmt_master if is operational */
243 	if (!priv->mgmt_master) {
244 		kfree_skb(skb);
245 		mutex_unlock(&mgmt_eth_data->mutex);
246 		return -EINVAL;
247 	}
248 
249 	skb->dev = priv->mgmt_master;
250 
251 	reinit_completion(&mgmt_eth_data->rw_done);
252 
253 	/* Increment seq_num and set it in the mdio pkt */
254 	mgmt_eth_data->seq++;
255 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
256 	mgmt_eth_data->ack = false;
257 
258 	dev_queue_xmit(skb);
259 
260 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
261 					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
262 
263 	*val = mgmt_eth_data->data[0];
264 	if (len > QCA_HDR_MGMT_DATA1_LEN)
265 		memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
266 
267 	ack = mgmt_eth_data->ack;
268 
269 	mutex_unlock(&mgmt_eth_data->mutex);
270 
271 	if (ret <= 0)
272 		return -ETIMEDOUT;
273 
274 	if (!ack)
275 		return -EINVAL;
276 
277 	return 0;
278 }
279 
280 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
281 {
282 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
283 	struct sk_buff *skb;
284 	bool ack;
285 	int ret;
286 
287 	skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
288 				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
289 	if (!skb)
290 		return -ENOMEM;
291 
292 	mutex_lock(&mgmt_eth_data->mutex);
293 
294 	/* Check mgmt_master if is operational */
295 	if (!priv->mgmt_master) {
296 		kfree_skb(skb);
297 		mutex_unlock(&mgmt_eth_data->mutex);
298 		return -EINVAL;
299 	}
300 
301 	skb->dev = priv->mgmt_master;
302 
303 	reinit_completion(&mgmt_eth_data->rw_done);
304 
305 	/* Increment seq_num and set it in the mdio pkt */
306 	mgmt_eth_data->seq++;
307 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
308 	mgmt_eth_data->ack = false;
309 
310 	dev_queue_xmit(skb);
311 
312 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
313 					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
314 
315 	ack = mgmt_eth_data->ack;
316 
317 	mutex_unlock(&mgmt_eth_data->mutex);
318 
319 	if (ret <= 0)
320 		return -ETIMEDOUT;
321 
322 	if (!ack)
323 		return -EINVAL;
324 
325 	return 0;
326 }
327 
328 static int
329 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
330 {
331 	u32 val = 0;
332 	int ret;
333 
334 	ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
335 	if (ret)
336 		return ret;
337 
338 	val &= ~mask;
339 	val |= write_val;
340 
341 	return qca8k_write_eth(priv, reg, &val, sizeof(val));
342 }
343 
344 static int
345 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
346 {
347 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
348 	struct mii_bus *bus = priv->bus;
349 	u16 r1, r2, page;
350 	int ret;
351 
352 	if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
353 		return 0;
354 
355 	qca8k_split_addr(reg, &r1, &r2, &page);
356 
357 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
358 
359 	ret = qca8k_set_page(priv, page);
360 	if (ret < 0)
361 		goto exit;
362 
363 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
364 
365 exit:
366 	mutex_unlock(&bus->mdio_lock);
367 	return ret;
368 }
369 
370 static int
371 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
372 {
373 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
374 	struct mii_bus *bus = priv->bus;
375 	u16 r1, r2, page;
376 	int ret;
377 
378 	if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
379 		return 0;
380 
381 	qca8k_split_addr(reg, &r1, &r2, &page);
382 
383 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
384 
385 	ret = qca8k_set_page(priv, page);
386 	if (ret < 0)
387 		goto exit;
388 
389 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
390 
391 exit:
392 	mutex_unlock(&bus->mdio_lock);
393 	return ret;
394 }
395 
396 static int
397 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
398 {
399 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
400 	struct mii_bus *bus = priv->bus;
401 	u16 r1, r2, page;
402 	u32 val;
403 	int ret;
404 
405 	if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
406 		return 0;
407 
408 	qca8k_split_addr(reg, &r1, &r2, &page);
409 
410 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
411 
412 	ret = qca8k_set_page(priv, page);
413 	if (ret < 0)
414 		goto exit;
415 
416 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
417 	if (ret < 0)
418 		goto exit;
419 
420 	val &= ~mask;
421 	val |= write_val;
422 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
423 
424 exit:
425 	mutex_unlock(&bus->mdio_lock);
426 
427 	return ret;
428 }
429 
430 static struct regmap_config qca8k_regmap_config = {
431 	.reg_bits = 16,
432 	.val_bits = 32,
433 	.reg_stride = 4,
434 	.max_register = 0x16ac, /* end MIB - Port6 range */
435 	.reg_read = qca8k_regmap_read,
436 	.reg_write = qca8k_regmap_write,
437 	.reg_update_bits = qca8k_regmap_update_bits,
438 	.rd_table = &qca8k_readable_table,
439 	.disable_locking = true, /* Locking is handled by qca8k read/write */
440 	.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
441 };
442 
443 static int
444 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
445 			struct sk_buff *read_skb, u32 *val)
446 {
447 	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
448 	bool ack;
449 	int ret;
450 
451 	reinit_completion(&mgmt_eth_data->rw_done);
452 
453 	/* Increment seq_num and set it in the copy pkt */
454 	mgmt_eth_data->seq++;
455 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
456 	mgmt_eth_data->ack = false;
457 
458 	dev_queue_xmit(skb);
459 
460 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
461 					  QCA8K_ETHERNET_TIMEOUT);
462 
463 	ack = mgmt_eth_data->ack;
464 
465 	if (ret <= 0)
466 		return -ETIMEDOUT;
467 
468 	if (!ack)
469 		return -EINVAL;
470 
471 	*val = mgmt_eth_data->data[0];
472 
473 	return 0;
474 }
475 
476 static int
477 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
478 		      int regnum, u16 data)
479 {
480 	struct sk_buff *write_skb, *clear_skb, *read_skb;
481 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
482 	u32 write_val, clear_val = 0, val;
483 	struct net_device *mgmt_master;
484 	int ret, ret1;
485 	bool ack;
486 
487 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
488 		return -EINVAL;
489 
490 	mgmt_eth_data = &priv->mgmt_eth_data;
491 
492 	write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
493 		    QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
494 		    QCA8K_MDIO_MASTER_REG_ADDR(regnum);
495 
496 	if (read) {
497 		write_val |= QCA8K_MDIO_MASTER_READ;
498 	} else {
499 		write_val |= QCA8K_MDIO_MASTER_WRITE;
500 		write_val |= QCA8K_MDIO_MASTER_DATA(data);
501 	}
502 
503 	/* Prealloc all the needed skb before the lock */
504 	write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
505 					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
506 	if (!write_skb)
507 		return -ENOMEM;
508 
509 	clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
510 					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
511 	if (!clear_skb) {
512 		ret = -ENOMEM;
513 		goto err_clear_skb;
514 	}
515 
516 	read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
517 					   QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
518 	if (!read_skb) {
519 		ret = -ENOMEM;
520 		goto err_read_skb;
521 	}
522 
523 	/* Actually start the request:
524 	 * 1. Send mdio master packet
525 	 * 2. Busy Wait for mdio master command
526 	 * 3. Get the data if we are reading
527 	 * 4. Reset the mdio master (even with error)
528 	 */
529 	mutex_lock(&mgmt_eth_data->mutex);
530 
531 	/* Check if mgmt_master is operational */
532 	mgmt_master = priv->mgmt_master;
533 	if (!mgmt_master) {
534 		mutex_unlock(&mgmt_eth_data->mutex);
535 		ret = -EINVAL;
536 		goto err_mgmt_master;
537 	}
538 
539 	read_skb->dev = mgmt_master;
540 	clear_skb->dev = mgmt_master;
541 	write_skb->dev = mgmt_master;
542 
543 	reinit_completion(&mgmt_eth_data->rw_done);
544 
545 	/* Increment seq_num and set it in the write pkt */
546 	mgmt_eth_data->seq++;
547 	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
548 	mgmt_eth_data->ack = false;
549 
550 	dev_queue_xmit(write_skb);
551 
552 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
553 					  QCA8K_ETHERNET_TIMEOUT);
554 
555 	ack = mgmt_eth_data->ack;
556 
557 	if (ret <= 0) {
558 		ret = -ETIMEDOUT;
559 		kfree_skb(read_skb);
560 		goto exit;
561 	}
562 
563 	if (!ack) {
564 		ret = -EINVAL;
565 		kfree_skb(read_skb);
566 		goto exit;
567 	}
568 
569 	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
570 				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
571 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
572 				mgmt_eth_data, read_skb, &val);
573 
574 	if (ret < 0 && ret1 < 0) {
575 		ret = ret1;
576 		goto exit;
577 	}
578 
579 	if (read) {
580 		reinit_completion(&mgmt_eth_data->rw_done);
581 
582 		/* Increment seq_num and set it in the read pkt */
583 		mgmt_eth_data->seq++;
584 		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
585 		mgmt_eth_data->ack = false;
586 
587 		dev_queue_xmit(read_skb);
588 
589 		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
590 						  QCA8K_ETHERNET_TIMEOUT);
591 
592 		ack = mgmt_eth_data->ack;
593 
594 		if (ret <= 0) {
595 			ret = -ETIMEDOUT;
596 			goto exit;
597 		}
598 
599 		if (!ack) {
600 			ret = -EINVAL;
601 			goto exit;
602 		}
603 
604 		ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
605 	} else {
606 		kfree_skb(read_skb);
607 	}
608 exit:
609 	reinit_completion(&mgmt_eth_data->rw_done);
610 
611 	/* Increment seq_num and set it in the clear pkt */
612 	mgmt_eth_data->seq++;
613 	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
614 	mgmt_eth_data->ack = false;
615 
616 	dev_queue_xmit(clear_skb);
617 
618 	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
619 				    QCA8K_ETHERNET_TIMEOUT);
620 
621 	mutex_unlock(&mgmt_eth_data->mutex);
622 
623 	return ret;
624 
625 	/* Error handling before lock */
626 err_mgmt_master:
627 	kfree_skb(read_skb);
628 err_read_skb:
629 	kfree_skb(clear_skb);
630 err_clear_skb:
631 	kfree_skb(write_skb);
632 
633 	return ret;
634 }
635 
636 static u32
637 qca8k_port_to_phy(int port)
638 {
639 	/* From Andrew Lunn:
640 	 * Port 0 has no internal phy.
641 	 * Port 1 has an internal PHY at MDIO address 0.
642 	 * Port 2 has an internal PHY at MDIO address 1.
643 	 * ...
644 	 * Port 5 has an internal PHY at MDIO address 4.
645 	 * Port 6 has no internal PHY.
646 	 */
647 
648 	return port - 1;
649 }
650 
651 static int
652 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
653 {
654 	u16 r1, r2, page;
655 	u32 val;
656 	int ret, ret1;
657 
658 	qca8k_split_addr(reg, &r1, &r2, &page);
659 
660 	ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
661 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
662 				bus, 0x10 | r2, r1, &val);
663 
664 	/* Check if qca8k_read has failed for a different reason
665 	 * before returnting -ETIMEDOUT
666 	 */
667 	if (ret < 0 && ret1 < 0)
668 		return ret1;
669 
670 	return ret;
671 }
672 
673 static int
674 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
675 {
676 	struct mii_bus *bus = priv->bus;
677 	u16 r1, r2, page;
678 	u32 val;
679 	int ret;
680 
681 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
682 		return -EINVAL;
683 
684 	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
685 	      QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
686 	      QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
687 	      QCA8K_MDIO_MASTER_DATA(data);
688 
689 	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
690 
691 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
692 
693 	ret = qca8k_set_page(priv, page);
694 	if (ret)
695 		goto exit;
696 
697 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
698 
699 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
700 				   QCA8K_MDIO_MASTER_BUSY);
701 
702 exit:
703 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
704 	qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
705 
706 	mutex_unlock(&bus->mdio_lock);
707 
708 	return ret;
709 }
710 
711 static int
712 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
713 {
714 	struct mii_bus *bus = priv->bus;
715 	u16 r1, r2, page;
716 	u32 val;
717 	int ret;
718 
719 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
720 		return -EINVAL;
721 
722 	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
723 	      QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
724 	      QCA8K_MDIO_MASTER_REG_ADDR(regnum);
725 
726 	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
727 
728 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
729 
730 	ret = qca8k_set_page(priv, page);
731 	if (ret)
732 		goto exit;
733 
734 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
735 
736 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
737 				   QCA8K_MDIO_MASTER_BUSY);
738 	if (ret)
739 		goto exit;
740 
741 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
742 
743 exit:
744 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
745 	qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
746 
747 	mutex_unlock(&bus->mdio_lock);
748 
749 	if (ret >= 0)
750 		ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
751 
752 	return ret;
753 }
754 
755 static int
756 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
757 {
758 	struct qca8k_priv *priv = slave_bus->priv;
759 	int ret;
760 
761 	/* Use mdio Ethernet when available, fallback to legacy one on error */
762 	ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
763 	if (!ret)
764 		return 0;
765 
766 	return qca8k_mdio_write(priv, phy, regnum, data);
767 }
768 
769 static int
770 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
771 {
772 	struct qca8k_priv *priv = slave_bus->priv;
773 	int ret;
774 
775 	/* Use mdio Ethernet when available, fallback to legacy one on error */
776 	ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
777 	if (ret >= 0)
778 		return ret;
779 
780 	ret = qca8k_mdio_read(priv, phy, regnum);
781 
782 	if (ret < 0)
783 		return 0xffff;
784 
785 	return ret;
786 }
787 
788 static int
789 qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
790 {
791 	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
792 
793 	return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
794 }
795 
796 static int
797 qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
798 {
799 	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
800 
801 	return qca8k_internal_mdio_read(slave_bus, port, regnum);
802 }
803 
804 static int
805 qca8k_mdio_register(struct qca8k_priv *priv)
806 {
807 	struct dsa_switch *ds = priv->ds;
808 	struct device_node *mdio;
809 	struct mii_bus *bus;
810 
811 	bus = devm_mdiobus_alloc(ds->dev);
812 	if (!bus)
813 		return -ENOMEM;
814 
815 	bus->priv = (void *)priv;
816 	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
817 		 ds->dst->index, ds->index);
818 	bus->parent = ds->dev;
819 	bus->phy_mask = ~ds->phys_mii_mask;
820 	ds->slave_mii_bus = bus;
821 
822 	/* Check if the devicetree declare the port:phy mapping */
823 	mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
824 	if (of_device_is_available(mdio)) {
825 		bus->name = "qca8k slave mii";
826 		bus->read = qca8k_internal_mdio_read;
827 		bus->write = qca8k_internal_mdio_write;
828 		return devm_of_mdiobus_register(priv->dev, bus, mdio);
829 	}
830 
831 	/* If a mapping can't be found the legacy mapping is used,
832 	 * using the qca8k_port_to_phy function
833 	 */
834 	bus->name = "qca8k-legacy slave mii";
835 	bus->read = qca8k_legacy_mdio_read;
836 	bus->write = qca8k_legacy_mdio_write;
837 	return devm_mdiobus_register(priv->dev, bus);
838 }
839 
840 static int
841 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
842 {
843 	u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
844 	struct device_node *ports, *port;
845 	phy_interface_t mode;
846 	int err;
847 
848 	ports = of_get_child_by_name(priv->dev->of_node, "ports");
849 	if (!ports)
850 		ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
851 
852 	if (!ports)
853 		return -EINVAL;
854 
855 	for_each_available_child_of_node(ports, port) {
856 		err = of_property_read_u32(port, "reg", &reg);
857 		if (err) {
858 			of_node_put(port);
859 			of_node_put(ports);
860 			return err;
861 		}
862 
863 		if (!dsa_is_user_port(priv->ds, reg))
864 			continue;
865 
866 		of_get_phy_mode(port, &mode);
867 
868 		if (of_property_read_bool(port, "phy-handle") &&
869 		    mode != PHY_INTERFACE_MODE_INTERNAL)
870 			external_mdio_mask |= BIT(reg);
871 		else
872 			internal_mdio_mask |= BIT(reg);
873 	}
874 
875 	of_node_put(ports);
876 	if (!external_mdio_mask && !internal_mdio_mask) {
877 		dev_err(priv->dev, "no PHYs are defined.\n");
878 		return -EINVAL;
879 	}
880 
881 	/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
882 	 * the MDIO_MASTER register also _disconnects_ the external MDC
883 	 * passthrough to the internal PHYs. It's not possible to use both
884 	 * configurations at the same time!
885 	 *
886 	 * Because this came up during the review process:
887 	 * If the external mdio-bus driver is capable magically disabling
888 	 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
889 	 * accessors for the time being, it would be possible to pull this
890 	 * off.
891 	 */
892 	if (!!external_mdio_mask && !!internal_mdio_mask) {
893 		dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
894 		return -EINVAL;
895 	}
896 
897 	if (external_mdio_mask) {
898 		/* Make sure to disable the internal mdio bus in cases
899 		 * a dt-overlay and driver reload changed the configuration
900 		 */
901 
902 		return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
903 					 QCA8K_MDIO_MASTER_EN);
904 	}
905 
906 	return qca8k_mdio_register(priv);
907 }
908 
909 static int
910 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
911 {
912 	u32 mask = 0;
913 	int ret = 0;
914 
915 	/* SoC specific settings for ipq8064.
916 	 * If more device require this consider adding
917 	 * a dedicated binding.
918 	 */
919 	if (of_machine_is_compatible("qcom,ipq8064"))
920 		mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
921 
922 	/* SoC specific settings for ipq8065 */
923 	if (of_machine_is_compatible("qcom,ipq8065"))
924 		mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
925 
926 	if (mask) {
927 		ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
928 				QCA8K_MAC_PWR_RGMII0_1_8V |
929 				QCA8K_MAC_PWR_RGMII1_1_8V,
930 				mask);
931 	}
932 
933 	return ret;
934 }
935 
936 static int qca8k_find_cpu_port(struct dsa_switch *ds)
937 {
938 	struct qca8k_priv *priv = ds->priv;
939 
940 	/* Find the connected cpu port. Valid port are 0 or 6 */
941 	if (dsa_is_cpu_port(ds, 0))
942 		return 0;
943 
944 	dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
945 
946 	if (dsa_is_cpu_port(ds, 6))
947 		return 6;
948 
949 	return -EINVAL;
950 }
951 
952 static int
953 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
954 {
955 	const struct qca8k_match_data *data = priv->info;
956 	struct device_node *node = priv->dev->of_node;
957 	u32 val = 0;
958 	int ret;
959 
960 	/* QCA8327 require to set to the correct mode.
961 	 * His bigger brother QCA8328 have the 172 pin layout.
962 	 * Should be applied by default but we set this just to make sure.
963 	 */
964 	if (priv->switch_id == QCA8K_ID_QCA8327) {
965 		/* Set the correct package of 148 pin for QCA8327 */
966 		if (data->reduced_package)
967 			val |= QCA8327_PWS_PACKAGE148_EN;
968 
969 		ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
970 				val);
971 		if (ret)
972 			return ret;
973 	}
974 
975 	if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
976 		val |= QCA8K_PWS_POWER_ON_SEL;
977 
978 	if (of_property_read_bool(node, "qca,led-open-drain")) {
979 		if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
980 			dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
981 			return -EINVAL;
982 		}
983 
984 		val |= QCA8K_PWS_LED_OPEN_EN_CSR;
985 	}
986 
987 	return qca8k_rmw(priv, QCA8K_REG_PWS,
988 			QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
989 			val);
990 }
991 
992 static int
993 qca8k_parse_port_config(struct qca8k_priv *priv)
994 {
995 	int port, cpu_port_index = -1, ret;
996 	struct device_node *port_dn;
997 	phy_interface_t mode;
998 	struct dsa_port *dp;
999 	u32 delay;
1000 
1001 	/* We have 2 CPU port. Check them */
1002 	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1003 		/* Skip every other port */
1004 		if (port != 0 && port != 6)
1005 			continue;
1006 
1007 		dp = dsa_to_port(priv->ds, port);
1008 		port_dn = dp->dn;
1009 		cpu_port_index++;
1010 
1011 		if (!of_device_is_available(port_dn))
1012 			continue;
1013 
1014 		ret = of_get_phy_mode(port_dn, &mode);
1015 		if (ret)
1016 			continue;
1017 
1018 		switch (mode) {
1019 		case PHY_INTERFACE_MODE_RGMII:
1020 		case PHY_INTERFACE_MODE_RGMII_ID:
1021 		case PHY_INTERFACE_MODE_RGMII_TXID:
1022 		case PHY_INTERFACE_MODE_RGMII_RXID:
1023 		case PHY_INTERFACE_MODE_SGMII:
1024 			delay = 0;
1025 
1026 			if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1027 				/* Switch regs accept value in ns, convert ps to ns */
1028 				delay = delay / 1000;
1029 			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1030 				 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1031 				delay = 1;
1032 
1033 			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1034 				dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1035 				delay = 3;
1036 			}
1037 
1038 			priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1039 
1040 			delay = 0;
1041 
1042 			if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1043 				/* Switch regs accept value in ns, convert ps to ns */
1044 				delay = delay / 1000;
1045 			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1046 				 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1047 				delay = 2;
1048 
1049 			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1050 				dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1051 				delay = 3;
1052 			}
1053 
1054 			priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1055 
1056 			/* Skip sgmii parsing for rgmii* mode */
1057 			if (mode == PHY_INTERFACE_MODE_RGMII ||
1058 			    mode == PHY_INTERFACE_MODE_RGMII_ID ||
1059 			    mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1060 			    mode == PHY_INTERFACE_MODE_RGMII_RXID)
1061 				break;
1062 
1063 			if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1064 				priv->ports_config.sgmii_tx_clk_falling_edge = true;
1065 
1066 			if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1067 				priv->ports_config.sgmii_rx_clk_falling_edge = true;
1068 
1069 			if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1070 				priv->ports_config.sgmii_enable_pll = true;
1071 
1072 				if (priv->switch_id == QCA8K_ID_QCA8327) {
1073 					dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1074 					priv->ports_config.sgmii_enable_pll = false;
1075 				}
1076 
1077 				if (priv->switch_revision < 2)
1078 					dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1079 			}
1080 
1081 			break;
1082 		default:
1083 			continue;
1084 		}
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static void
1091 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1092 				      u32 reg)
1093 {
1094 	u32 delay, val = 0;
1095 	int ret;
1096 
1097 	/* Delay can be declared in 3 different way.
1098 	 * Mode to rgmii and internal-delay standard binding defined
1099 	 * rgmii-id or rgmii-tx/rx phy mode set.
1100 	 * The parse logic set a delay different than 0 only when one
1101 	 * of the 3 different way is used. In all other case delay is
1102 	 * not enabled. With ID or TX/RXID delay is enabled and set
1103 	 * to the default and recommended value.
1104 	 */
1105 	if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1106 		delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1107 
1108 		val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1109 			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1110 	}
1111 
1112 	if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1113 		delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1114 
1115 		val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1116 			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1117 	}
1118 
1119 	/* Set RGMII delay based on the selected values */
1120 	ret = qca8k_rmw(priv, reg,
1121 			QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1122 			QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1123 			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1124 			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1125 			val);
1126 	if (ret)
1127 		dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1128 			cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1129 }
1130 
1131 static struct phylink_pcs *
1132 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1133 			     phy_interface_t interface)
1134 {
1135 	struct qca8k_priv *priv = ds->priv;
1136 	struct phylink_pcs *pcs = NULL;
1137 
1138 	switch (interface) {
1139 	case PHY_INTERFACE_MODE_SGMII:
1140 	case PHY_INTERFACE_MODE_1000BASEX:
1141 		switch (port) {
1142 		case 0:
1143 			pcs = &priv->pcs_port_0.pcs;
1144 			break;
1145 
1146 		case 6:
1147 			pcs = &priv->pcs_port_6.pcs;
1148 			break;
1149 		}
1150 		break;
1151 
1152 	default:
1153 		break;
1154 	}
1155 
1156 	return pcs;
1157 }
1158 
1159 static void
1160 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1161 			 const struct phylink_link_state *state)
1162 {
1163 	struct qca8k_priv *priv = ds->priv;
1164 	int cpu_port_index;
1165 	u32 reg;
1166 
1167 	switch (port) {
1168 	case 0: /* 1st CPU port */
1169 		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1170 		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1171 		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1172 		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1173 		    state->interface != PHY_INTERFACE_MODE_SGMII)
1174 			return;
1175 
1176 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1177 		cpu_port_index = QCA8K_CPU_PORT0;
1178 		break;
1179 	case 1:
1180 	case 2:
1181 	case 3:
1182 	case 4:
1183 	case 5:
1184 		/* Internal PHY, nothing to do */
1185 		return;
1186 	case 6: /* 2nd CPU port / external PHY */
1187 		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1188 		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1189 		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1190 		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1191 		    state->interface != PHY_INTERFACE_MODE_SGMII &&
1192 		    state->interface != PHY_INTERFACE_MODE_1000BASEX)
1193 			return;
1194 
1195 		reg = QCA8K_REG_PORT6_PAD_CTRL;
1196 		cpu_port_index = QCA8K_CPU_PORT6;
1197 		break;
1198 	default:
1199 		dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1200 		return;
1201 	}
1202 
1203 	if (port != 6 && phylink_autoneg_inband(mode)) {
1204 		dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1205 			__func__);
1206 		return;
1207 	}
1208 
1209 	switch (state->interface) {
1210 	case PHY_INTERFACE_MODE_RGMII:
1211 	case PHY_INTERFACE_MODE_RGMII_ID:
1212 	case PHY_INTERFACE_MODE_RGMII_TXID:
1213 	case PHY_INTERFACE_MODE_RGMII_RXID:
1214 		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1215 
1216 		/* Configure rgmii delay */
1217 		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1218 
1219 		/* QCA8337 requires to set rgmii rx delay for all ports.
1220 		 * This is enabled through PORT5_PAD_CTRL for all ports,
1221 		 * rather than individual port registers.
1222 		 */
1223 		if (priv->switch_id == QCA8K_ID_QCA8337)
1224 			qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1225 				    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1226 		break;
1227 	case PHY_INTERFACE_MODE_SGMII:
1228 	case PHY_INTERFACE_MODE_1000BASEX:
1229 		/* Enable SGMII on the port */
1230 		qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1231 		break;
1232 	default:
1233 		dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1234 			phy_modes(state->interface), port);
1235 		return;
1236 	}
1237 }
1238 
1239 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1240 				   struct phylink_config *config)
1241 {
1242 	switch (port) {
1243 	case 0: /* 1st CPU port */
1244 		phy_interface_set_rgmii(config->supported_interfaces);
1245 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1246 			  config->supported_interfaces);
1247 		break;
1248 
1249 	case 1:
1250 	case 2:
1251 	case 3:
1252 	case 4:
1253 	case 5:
1254 		/* Internal PHY */
1255 		__set_bit(PHY_INTERFACE_MODE_GMII,
1256 			  config->supported_interfaces);
1257 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1258 			  config->supported_interfaces);
1259 		break;
1260 
1261 	case 6: /* 2nd CPU port / external PHY */
1262 		phy_interface_set_rgmii(config->supported_interfaces);
1263 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1264 			  config->supported_interfaces);
1265 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1266 			  config->supported_interfaces);
1267 		break;
1268 	}
1269 
1270 	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1271 		MAC_10 | MAC_100 | MAC_1000FD;
1272 
1273 	config->legacy_pre_march2020 = false;
1274 }
1275 
1276 static void
1277 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1278 			    phy_interface_t interface)
1279 {
1280 	struct qca8k_priv *priv = ds->priv;
1281 
1282 	qca8k_port_set_status(priv, port, 0);
1283 }
1284 
1285 static void
1286 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1287 			  phy_interface_t interface, struct phy_device *phydev,
1288 			  int speed, int duplex, bool tx_pause, bool rx_pause)
1289 {
1290 	struct qca8k_priv *priv = ds->priv;
1291 	u32 reg;
1292 
1293 	if (phylink_autoneg_inband(mode)) {
1294 		reg = QCA8K_PORT_STATUS_LINK_AUTO;
1295 	} else {
1296 		switch (speed) {
1297 		case SPEED_10:
1298 			reg = QCA8K_PORT_STATUS_SPEED_10;
1299 			break;
1300 		case SPEED_100:
1301 			reg = QCA8K_PORT_STATUS_SPEED_100;
1302 			break;
1303 		case SPEED_1000:
1304 			reg = QCA8K_PORT_STATUS_SPEED_1000;
1305 			break;
1306 		default:
1307 			reg = QCA8K_PORT_STATUS_LINK_AUTO;
1308 			break;
1309 		}
1310 
1311 		if (duplex == DUPLEX_FULL)
1312 			reg |= QCA8K_PORT_STATUS_DUPLEX;
1313 
1314 		if (rx_pause || dsa_is_cpu_port(ds, port))
1315 			reg |= QCA8K_PORT_STATUS_RXFLOW;
1316 
1317 		if (tx_pause || dsa_is_cpu_port(ds, port))
1318 			reg |= QCA8K_PORT_STATUS_TXFLOW;
1319 	}
1320 
1321 	reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1322 
1323 	qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1324 }
1325 
1326 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1327 {
1328 	return container_of(pcs, struct qca8k_pcs, pcs);
1329 }
1330 
1331 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1332 				struct phylink_link_state *state)
1333 {
1334 	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1335 	int port = pcs_to_qca8k_pcs(pcs)->port;
1336 	u32 reg;
1337 	int ret;
1338 
1339 	ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1340 	if (ret < 0) {
1341 		state->link = false;
1342 		return;
1343 	}
1344 
1345 	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1346 	state->an_complete = state->link;
1347 	state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1348 	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1349 							   DUPLEX_HALF;
1350 
1351 	switch (reg & QCA8K_PORT_STATUS_SPEED) {
1352 	case QCA8K_PORT_STATUS_SPEED_10:
1353 		state->speed = SPEED_10;
1354 		break;
1355 	case QCA8K_PORT_STATUS_SPEED_100:
1356 		state->speed = SPEED_100;
1357 		break;
1358 	case QCA8K_PORT_STATUS_SPEED_1000:
1359 		state->speed = SPEED_1000;
1360 		break;
1361 	default:
1362 		state->speed = SPEED_UNKNOWN;
1363 		break;
1364 	}
1365 
1366 	if (reg & QCA8K_PORT_STATUS_RXFLOW)
1367 		state->pause |= MLO_PAUSE_RX;
1368 	if (reg & QCA8K_PORT_STATUS_TXFLOW)
1369 		state->pause |= MLO_PAUSE_TX;
1370 }
1371 
1372 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1373 			    phy_interface_t interface,
1374 			    const unsigned long *advertising,
1375 			    bool permit_pause_to_mac)
1376 {
1377 	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1378 	int cpu_port_index, ret, port;
1379 	u32 reg, val;
1380 
1381 	port = pcs_to_qca8k_pcs(pcs)->port;
1382 	switch (port) {
1383 	case 0:
1384 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1385 		cpu_port_index = QCA8K_CPU_PORT0;
1386 		break;
1387 
1388 	case 6:
1389 		reg = QCA8K_REG_PORT6_PAD_CTRL;
1390 		cpu_port_index = QCA8K_CPU_PORT6;
1391 		break;
1392 
1393 	default:
1394 		WARN_ON(1);
1395 		return -EINVAL;
1396 	}
1397 
1398 	/* Enable/disable SerDes auto-negotiation as necessary */
1399 	ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1400 	if (ret)
1401 		return ret;
1402 	if (phylink_autoneg_inband(mode))
1403 		val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1404 	else
1405 		val |= QCA8K_PWS_SERDES_AEN_DIS;
1406 	qca8k_write(priv, QCA8K_REG_PWS, val);
1407 
1408 	/* Configure the SGMII parameters */
1409 	ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1410 	if (ret)
1411 		return ret;
1412 
1413 	val |= QCA8K_SGMII_EN_SD;
1414 
1415 	if (priv->ports_config.sgmii_enable_pll)
1416 		val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1417 		       QCA8K_SGMII_EN_TX;
1418 
1419 	if (dsa_is_cpu_port(priv->ds, port)) {
1420 		/* CPU port, we're talking to the CPU MAC, be a PHY */
1421 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1422 		val |= QCA8K_SGMII_MODE_CTRL_PHY;
1423 	} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1424 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1425 		val |= QCA8K_SGMII_MODE_CTRL_MAC;
1426 	} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1427 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1428 		val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1429 	}
1430 
1431 	qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1432 
1433 	/* From original code is reported port instability as SGMII also
1434 	 * require delay set. Apply advised values here or take them from DT.
1435 	 */
1436 	if (interface == PHY_INTERFACE_MODE_SGMII)
1437 		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1438 	/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1439 	 * falling edge is set writing in the PORT0 PAD reg
1440 	 */
1441 	if (priv->switch_id == QCA8K_ID_QCA8327 ||
1442 	    priv->switch_id == QCA8K_ID_QCA8337)
1443 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1444 
1445 	val = 0;
1446 
1447 	/* SGMII Clock phase configuration */
1448 	if (priv->ports_config.sgmii_rx_clk_falling_edge)
1449 		val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1450 
1451 	if (priv->ports_config.sgmii_tx_clk_falling_edge)
1452 		val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1453 
1454 	if (val)
1455 		ret = qca8k_rmw(priv, reg,
1456 				QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1457 				QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1458 				val);
1459 
1460 	return 0;
1461 }
1462 
1463 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1464 {
1465 }
1466 
1467 static const struct phylink_pcs_ops qca8k_pcs_ops = {
1468 	.pcs_get_state = qca8k_pcs_get_state,
1469 	.pcs_config = qca8k_pcs_config,
1470 	.pcs_an_restart = qca8k_pcs_an_restart,
1471 };
1472 
1473 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1474 			    int port)
1475 {
1476 	qpcs->pcs.ops = &qca8k_pcs_ops;
1477 
1478 	/* We don't have interrupts for link changes, so we need to poll */
1479 	qpcs->pcs.poll = true;
1480 	qpcs->priv = priv;
1481 	qpcs->port = port;
1482 }
1483 
1484 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
1485 {
1486 	struct qca8k_mib_eth_data *mib_eth_data;
1487 	struct qca8k_priv *priv = ds->priv;
1488 	const struct qca8k_mib_desc *mib;
1489 	struct mib_ethhdr *mib_ethhdr;
1490 	int i, mib_len, offset = 0;
1491 	u64 *data;
1492 	u8 port;
1493 
1494 	mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
1495 	mib_eth_data = &priv->mib_eth_data;
1496 
1497 	/* The switch autocast every port. Ignore other packet and
1498 	 * parse only the requested one.
1499 	 */
1500 	port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
1501 	if (port != mib_eth_data->req_port)
1502 		goto exit;
1503 
1504 	data = mib_eth_data->data;
1505 
1506 	for (i = 0; i < priv->info->mib_count; i++) {
1507 		mib = &ar8327_mib[i];
1508 
1509 		/* First 3 mib are present in the skb head */
1510 		if (i < 3) {
1511 			data[i] = mib_ethhdr->data[i];
1512 			continue;
1513 		}
1514 
1515 		mib_len = sizeof(uint32_t);
1516 
1517 		/* Some mib are 64 bit wide */
1518 		if (mib->size == 2)
1519 			mib_len = sizeof(uint64_t);
1520 
1521 		/* Copy the mib value from packet to the */
1522 		memcpy(data + i, skb->data + offset, mib_len);
1523 
1524 		/* Set the offset for the next mib */
1525 		offset += mib_len;
1526 	}
1527 
1528 exit:
1529 	/* Complete on receiving all the mib packet */
1530 	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
1531 		complete(&mib_eth_data->rw_done);
1532 }
1533 
1534 static int
1535 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
1536 {
1537 	struct dsa_port *dp = dsa_to_port(ds, port);
1538 	struct qca8k_mib_eth_data *mib_eth_data;
1539 	struct qca8k_priv *priv = ds->priv;
1540 	int ret;
1541 
1542 	mib_eth_data = &priv->mib_eth_data;
1543 
1544 	mutex_lock(&mib_eth_data->mutex);
1545 
1546 	reinit_completion(&mib_eth_data->rw_done);
1547 
1548 	mib_eth_data->req_port = dp->index;
1549 	mib_eth_data->data = data;
1550 	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
1551 
1552 	mutex_lock(&priv->reg_mutex);
1553 
1554 	/* Send mib autocast request */
1555 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
1556 				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
1557 				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
1558 				 QCA8K_MIB_BUSY);
1559 
1560 	mutex_unlock(&priv->reg_mutex);
1561 
1562 	if (ret)
1563 		goto exit;
1564 
1565 	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
1566 
1567 exit:
1568 	mutex_unlock(&mib_eth_data->mutex);
1569 
1570 	return ret;
1571 }
1572 
1573 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
1574 {
1575 	struct qca8k_priv *priv = ds->priv;
1576 
1577 	/* Communicate to the phy internal driver the switch revision.
1578 	 * Based on the switch revision different values needs to be
1579 	 * set to the dbg and mmd reg on the phy.
1580 	 * The first 2 bit are used to communicate the switch revision
1581 	 * to the phy driver.
1582 	 */
1583 	if (port > 0 && port < 6)
1584 		return priv->switch_revision;
1585 
1586 	return 0;
1587 }
1588 
1589 static enum dsa_tag_protocol
1590 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
1591 		       enum dsa_tag_protocol mp)
1592 {
1593 	return DSA_TAG_PROTO_QCA;
1594 }
1595 
1596 static void
1597 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
1598 		    bool operational)
1599 {
1600 	struct dsa_port *dp = master->dsa_ptr;
1601 	struct qca8k_priv *priv = ds->priv;
1602 
1603 	/* Ethernet MIB/MDIO is only supported for CPU port 0 */
1604 	if (dp->index != 0)
1605 		return;
1606 
1607 	mutex_lock(&priv->mgmt_eth_data.mutex);
1608 	mutex_lock(&priv->mib_eth_data.mutex);
1609 
1610 	priv->mgmt_master = operational ? (struct net_device *)master : NULL;
1611 
1612 	mutex_unlock(&priv->mib_eth_data.mutex);
1613 	mutex_unlock(&priv->mgmt_eth_data.mutex);
1614 }
1615 
1616 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
1617 				      enum dsa_tag_protocol proto)
1618 {
1619 	struct qca_tagger_data *tagger_data;
1620 
1621 	switch (proto) {
1622 	case DSA_TAG_PROTO_QCA:
1623 		tagger_data = ds->tagger_data;
1624 
1625 		tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
1626 		tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
1627 
1628 		break;
1629 	default:
1630 		return -EOPNOTSUPP;
1631 	}
1632 
1633 	return 0;
1634 }
1635 
1636 static int
1637 qca8k_setup(struct dsa_switch *ds)
1638 {
1639 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1640 	int cpu_port, ret, i;
1641 	u32 mask;
1642 
1643 	cpu_port = qca8k_find_cpu_port(ds);
1644 	if (cpu_port < 0) {
1645 		dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1646 		return cpu_port;
1647 	}
1648 
1649 	/* Parse CPU port config to be later used in phy_link mac_config */
1650 	ret = qca8k_parse_port_config(priv);
1651 	if (ret)
1652 		return ret;
1653 
1654 	ret = qca8k_setup_mdio_bus(priv);
1655 	if (ret)
1656 		return ret;
1657 
1658 	ret = qca8k_setup_of_pws_reg(priv);
1659 	if (ret)
1660 		return ret;
1661 
1662 	ret = qca8k_setup_mac_pwr_sel(priv);
1663 	if (ret)
1664 		return ret;
1665 
1666 	qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
1667 	qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
1668 
1669 	/* Make sure MAC06 is disabled */
1670 	ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1671 				QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1672 	if (ret) {
1673 		dev_err(priv->dev, "failed disabling MAC06 exchange");
1674 		return ret;
1675 	}
1676 
1677 	/* Enable CPU Port */
1678 	ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1679 			      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1680 	if (ret) {
1681 		dev_err(priv->dev, "failed enabling CPU port");
1682 		return ret;
1683 	}
1684 
1685 	/* Enable MIB counters */
1686 	ret = qca8k_mib_init(priv);
1687 	if (ret)
1688 		dev_warn(priv->dev, "mib init failed");
1689 
1690 	/* Initial setup of all ports */
1691 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1692 		/* Disable forwarding by default on all ports */
1693 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1694 				QCA8K_PORT_LOOKUP_MEMBER, 0);
1695 		if (ret)
1696 			return ret;
1697 
1698 		/* Enable QCA header mode on all cpu ports */
1699 		if (dsa_is_cpu_port(ds, i)) {
1700 			ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1701 					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1702 					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1703 			if (ret) {
1704 				dev_err(priv->dev, "failed enabling QCA header mode");
1705 				return ret;
1706 			}
1707 		}
1708 
1709 		/* Disable MAC by default on all user ports */
1710 		if (dsa_is_user_port(ds, i))
1711 			qca8k_port_set_status(priv, i, 0);
1712 	}
1713 
1714 	/* Forward all unknown frames to CPU port for Linux processing
1715 	 * Notice that in multi-cpu config only one port should be set
1716 	 * for igmp, unknown, multicast and broadcast packet
1717 	 */
1718 	ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1719 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1720 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1721 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1722 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1723 	if (ret)
1724 		return ret;
1725 
1726 	/* Setup connection between CPU port & user ports
1727 	 * Configure specific switch configuration for ports
1728 	 */
1729 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1730 		/* CPU port gets connected to all user ports of the switch */
1731 		if (dsa_is_cpu_port(ds, i)) {
1732 			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1733 					QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1734 			if (ret)
1735 				return ret;
1736 		}
1737 
1738 		/* Individual user ports get connected to CPU port only */
1739 		if (dsa_is_user_port(ds, i)) {
1740 			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1741 					QCA8K_PORT_LOOKUP_MEMBER,
1742 					BIT(cpu_port));
1743 			if (ret)
1744 				return ret;
1745 
1746 			/* Enable ARP Auto-learning by default */
1747 			ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1748 					      QCA8K_PORT_LOOKUP_LEARN);
1749 			if (ret)
1750 				return ret;
1751 
1752 			/* For port based vlans to work we need to set the
1753 			 * default egress vid
1754 			 */
1755 			ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1756 					QCA8K_EGREES_VLAN_PORT_MASK(i),
1757 					QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1758 			if (ret)
1759 				return ret;
1760 
1761 			ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1762 					  QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1763 					  QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1764 			if (ret)
1765 				return ret;
1766 		}
1767 
1768 		/* The port 5 of the qca8337 have some problem in flood condition. The
1769 		 * original legacy driver had some specific buffer and priority settings
1770 		 * for the different port suggested by the QCA switch team. Add this
1771 		 * missing settings to improve switch stability under load condition.
1772 		 * This problem is limited to qca8337 and other qca8k switch are not affected.
1773 		 */
1774 		if (priv->switch_id == QCA8K_ID_QCA8337) {
1775 			switch (i) {
1776 			/* The 2 CPU port and port 5 requires some different
1777 			 * priority than any other ports.
1778 			 */
1779 			case 0:
1780 			case 5:
1781 			case 6:
1782 				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1783 					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1784 					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1785 					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1786 					QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1787 					QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1788 					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1789 				break;
1790 			default:
1791 				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1792 					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1793 					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1794 					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1795 					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1796 			}
1797 			qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1798 
1799 			mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1800 			QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1801 			QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1802 			QCA8K_PORT_HOL_CTRL1_WRED_EN;
1803 			qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1804 				  QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1805 				  QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1806 				  QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1807 				  QCA8K_PORT_HOL_CTRL1_WRED_EN,
1808 				  mask);
1809 		}
1810 	}
1811 
1812 	/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1813 	if (priv->switch_id == QCA8K_ID_QCA8327) {
1814 		mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1815 		       QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1816 		qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1817 			  QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1818 			  QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1819 			  mask);
1820 	}
1821 
1822 	/* Setup our port MTUs to match power on defaults */
1823 	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1824 	if (ret)
1825 		dev_warn(priv->dev, "failed setting MTU settings");
1826 
1827 	/* Flush the FDB table */
1828 	qca8k_fdb_flush(priv);
1829 
1830 	/* Set min a max ageing value supported */
1831 	ds->ageing_time_min = 7000;
1832 	ds->ageing_time_max = 458745000;
1833 
1834 	/* Set max number of LAGs supported */
1835 	ds->num_lag_ids = QCA8K_NUM_LAGS;
1836 
1837 	return 0;
1838 }
1839 
1840 static const struct dsa_switch_ops qca8k_switch_ops = {
1841 	.get_tag_protocol	= qca8k_get_tag_protocol,
1842 	.setup			= qca8k_setup,
1843 	.get_strings		= qca8k_get_strings,
1844 	.get_ethtool_stats	= qca8k_get_ethtool_stats,
1845 	.get_sset_count		= qca8k_get_sset_count,
1846 	.set_ageing_time	= qca8k_set_ageing_time,
1847 	.get_mac_eee		= qca8k_get_mac_eee,
1848 	.set_mac_eee		= qca8k_set_mac_eee,
1849 	.port_enable		= qca8k_port_enable,
1850 	.port_disable		= qca8k_port_disable,
1851 	.port_change_mtu	= qca8k_port_change_mtu,
1852 	.port_max_mtu		= qca8k_port_max_mtu,
1853 	.port_stp_state_set	= qca8k_port_stp_state_set,
1854 	.port_bridge_join	= qca8k_port_bridge_join,
1855 	.port_bridge_leave	= qca8k_port_bridge_leave,
1856 	.port_fast_age		= qca8k_port_fast_age,
1857 	.port_fdb_add		= qca8k_port_fdb_add,
1858 	.port_fdb_del		= qca8k_port_fdb_del,
1859 	.port_fdb_dump		= qca8k_port_fdb_dump,
1860 	.port_mdb_add		= qca8k_port_mdb_add,
1861 	.port_mdb_del		= qca8k_port_mdb_del,
1862 	.port_mirror_add	= qca8k_port_mirror_add,
1863 	.port_mirror_del	= qca8k_port_mirror_del,
1864 	.port_vlan_filtering	= qca8k_port_vlan_filtering,
1865 	.port_vlan_add		= qca8k_port_vlan_add,
1866 	.port_vlan_del		= qca8k_port_vlan_del,
1867 	.phylink_get_caps	= qca8k_phylink_get_caps,
1868 	.phylink_mac_select_pcs	= qca8k_phylink_mac_select_pcs,
1869 	.phylink_mac_config	= qca8k_phylink_mac_config,
1870 	.phylink_mac_link_down	= qca8k_phylink_mac_link_down,
1871 	.phylink_mac_link_up	= qca8k_phylink_mac_link_up,
1872 	.get_phy_flags		= qca8k_get_phy_flags,
1873 	.port_lag_join		= qca8k_port_lag_join,
1874 	.port_lag_leave		= qca8k_port_lag_leave,
1875 	.master_state_change	= qca8k_master_change,
1876 	.connect_tag_protocol	= qca8k_connect_tag_protocol,
1877 };
1878 
1879 static int
1880 qca8k_sw_probe(struct mdio_device *mdiodev)
1881 {
1882 	struct qca8k_priv *priv;
1883 	int ret;
1884 
1885 	/* allocate the private data struct so that we can probe the switches
1886 	 * ID register
1887 	 */
1888 	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
1889 	if (!priv)
1890 		return -ENOMEM;
1891 
1892 	priv->bus = mdiodev->bus;
1893 	priv->dev = &mdiodev->dev;
1894 	priv->info = of_device_get_match_data(priv->dev);
1895 
1896 	priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
1897 						   GPIOD_ASIS);
1898 	if (IS_ERR(priv->reset_gpio))
1899 		return PTR_ERR(priv->reset_gpio);
1900 
1901 	if (priv->reset_gpio) {
1902 		gpiod_set_value_cansleep(priv->reset_gpio, 1);
1903 		/* The active low duration must be greater than 10 ms
1904 		 * and checkpatch.pl wants 20 ms.
1905 		 */
1906 		msleep(20);
1907 		gpiod_set_value_cansleep(priv->reset_gpio, 0);
1908 	}
1909 
1910 	/* Start by setting up the register mapping */
1911 	priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
1912 					&qca8k_regmap_config);
1913 	if (IS_ERR(priv->regmap)) {
1914 		dev_err(priv->dev, "regmap initialization failed");
1915 		return PTR_ERR(priv->regmap);
1916 	}
1917 
1918 	priv->mdio_cache.page = 0xffff;
1919 	priv->mdio_cache.lo = 0xffff;
1920 	priv->mdio_cache.hi = 0xffff;
1921 
1922 	/* Check the detected switch id */
1923 	ret = qca8k_read_switch_id(priv);
1924 	if (ret)
1925 		return ret;
1926 
1927 	priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
1928 	if (!priv->ds)
1929 		return -ENOMEM;
1930 
1931 	mutex_init(&priv->mgmt_eth_data.mutex);
1932 	init_completion(&priv->mgmt_eth_data.rw_done);
1933 
1934 	mutex_init(&priv->mib_eth_data.mutex);
1935 	init_completion(&priv->mib_eth_data.rw_done);
1936 
1937 	priv->ds->dev = &mdiodev->dev;
1938 	priv->ds->num_ports = QCA8K_NUM_PORTS;
1939 	priv->ds->priv = priv;
1940 	priv->ds->ops = &qca8k_switch_ops;
1941 	mutex_init(&priv->reg_mutex);
1942 	dev_set_drvdata(&mdiodev->dev, priv);
1943 
1944 	return dsa_register_switch(priv->ds);
1945 }
1946 
1947 static void
1948 qca8k_sw_remove(struct mdio_device *mdiodev)
1949 {
1950 	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
1951 	int i;
1952 
1953 	if (!priv)
1954 		return;
1955 
1956 	for (i = 0; i < QCA8K_NUM_PORTS; i++)
1957 		qca8k_port_set_status(priv, i, 0);
1958 
1959 	dsa_unregister_switch(priv->ds);
1960 }
1961 
1962 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
1963 {
1964 	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
1965 
1966 	if (!priv)
1967 		return;
1968 
1969 	dsa_switch_shutdown(priv->ds);
1970 
1971 	dev_set_drvdata(&mdiodev->dev, NULL);
1972 }
1973 
1974 #ifdef CONFIG_PM_SLEEP
1975 static void
1976 qca8k_set_pm(struct qca8k_priv *priv, int enable)
1977 {
1978 	int port;
1979 
1980 	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1981 		/* Do not enable on resume if the port was
1982 		 * disabled before.
1983 		 */
1984 		if (!(priv->port_enabled_map & BIT(port)))
1985 			continue;
1986 
1987 		qca8k_port_set_status(priv, port, enable);
1988 	}
1989 }
1990 
1991 static int qca8k_suspend(struct device *dev)
1992 {
1993 	struct qca8k_priv *priv = dev_get_drvdata(dev);
1994 
1995 	qca8k_set_pm(priv, 0);
1996 
1997 	return dsa_switch_suspend(priv->ds);
1998 }
1999 
2000 static int qca8k_resume(struct device *dev)
2001 {
2002 	struct qca8k_priv *priv = dev_get_drvdata(dev);
2003 
2004 	qca8k_set_pm(priv, 1);
2005 
2006 	return dsa_switch_resume(priv->ds);
2007 }
2008 #endif /* CONFIG_PM_SLEEP */
2009 
2010 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2011 			 qca8k_suspend, qca8k_resume);
2012 
2013 static const struct qca8k_info_ops qca8xxx_ops = {
2014 	.autocast_mib = qca8k_get_ethtool_stats_eth,
2015 	.read_eth = qca8k_read_eth,
2016 	.write_eth = qca8k_write_eth,
2017 };
2018 
2019 static const struct qca8k_match_data qca8327 = {
2020 	.id = QCA8K_ID_QCA8327,
2021 	.reduced_package = true,
2022 	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2023 	.ops = &qca8xxx_ops,
2024 };
2025 
2026 static const struct qca8k_match_data qca8328 = {
2027 	.id = QCA8K_ID_QCA8327,
2028 	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2029 	.ops = &qca8xxx_ops,
2030 };
2031 
2032 static const struct qca8k_match_data qca833x = {
2033 	.id = QCA8K_ID_QCA8337,
2034 	.mib_count = QCA8K_QCA833X_MIB_COUNT,
2035 	.ops = &qca8xxx_ops,
2036 };
2037 
2038 static const struct of_device_id qca8k_of_match[] = {
2039 	{ .compatible = "qca,qca8327", .data = &qca8327 },
2040 	{ .compatible = "qca,qca8328", .data = &qca8328 },
2041 	{ .compatible = "qca,qca8334", .data = &qca833x },
2042 	{ .compatible = "qca,qca8337", .data = &qca833x },
2043 	{ /* sentinel */ },
2044 };
2045 
2046 static struct mdio_driver qca8kmdio_driver = {
2047 	.probe  = qca8k_sw_probe,
2048 	.remove = qca8k_sw_remove,
2049 	.shutdown = qca8k_sw_shutdown,
2050 	.mdiodrv.driver = {
2051 		.name = "qca8k",
2052 		.of_match_table = qca8k_of_match,
2053 		.pm = &qca8k_pm_ops,
2054 	},
2055 };
2056 
2057 mdio_module_driver(qca8kmdio_driver);
2058 
2059 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2060 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2061 MODULE_LICENSE("GPL v2");
2062 MODULE_ALIAS("platform:qca8k");
2063