xref: /openbmc/linux/drivers/net/phy/dp83640.c (revision 3a9a231d)
1 /*
2  * Driver for the National Semiconductor DP83640 PHYTER
3  *
4  * Copyright (C) 2010 OMICRON electronics GmbH
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/ethtool.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/mii.h>
24 #include <linux/module.h>
25 #include <linux/net_tstamp.h>
26 #include <linux/netdevice.h>
27 #include <linux/phy.h>
28 #include <linux/ptp_classify.h>
29 #include <linux/ptp_clock_kernel.h>
30 
31 #include "dp83640_reg.h"
32 
33 #define DP83640_PHY_ID	0x20005ce1
34 #define PAGESEL		0x13
35 #define LAYER4		0x02
36 #define LAYER2		0x01
37 #define MAX_RXTS	64
38 #define N_EXT_TS	6
39 #define PSF_PTPVER	2
40 #define PSF_EVNT	0x4000
41 #define PSF_RX		0x2000
42 #define PSF_TX		0x1000
43 #define EXT_EVENT	1
44 #define CAL_EVENT	7
45 #define CAL_TRIGGER	7
46 #define PER_TRIGGER	6
47 
48 /* phyter seems to miss the mark by 16 ns */
49 #define ADJTIME_FIX	16
50 
51 #if defined(__BIG_ENDIAN)
52 #define ENDIAN_FLAG	0
53 #elif defined(__LITTLE_ENDIAN)
54 #define ENDIAN_FLAG	PSF_ENDIAN
55 #endif
56 
57 #define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
58 
59 struct phy_rxts {
60 	u16 ns_lo;   /* ns[15:0] */
61 	u16 ns_hi;   /* overflow[1:0], ns[29:16] */
62 	u16 sec_lo;  /* sec[15:0] */
63 	u16 sec_hi;  /* sec[31:16] */
64 	u16 seqid;   /* sequenceId[15:0] */
65 	u16 msgtype; /* messageType[3:0], hash[11:0] */
66 };
67 
68 struct phy_txts {
69 	u16 ns_lo;   /* ns[15:0] */
70 	u16 ns_hi;   /* overflow[1:0], ns[29:16] */
71 	u16 sec_lo;  /* sec[15:0] */
72 	u16 sec_hi;  /* sec[31:16] */
73 };
74 
75 struct rxts {
76 	struct list_head list;
77 	unsigned long tmo;
78 	u64 ns;
79 	u16 seqid;
80 	u8  msgtype;
81 	u16 hash;
82 };
83 
84 struct dp83640_clock;
85 
86 struct dp83640_private {
87 	struct list_head list;
88 	struct dp83640_clock *clock;
89 	struct phy_device *phydev;
90 	struct work_struct ts_work;
91 	int hwts_tx_en;
92 	int hwts_rx_en;
93 	int layer;
94 	int version;
95 	/* remember state of cfg0 during calibration */
96 	int cfg0;
97 	/* remember the last event time stamp */
98 	struct phy_txts edata;
99 	/* list of rx timestamps */
100 	struct list_head rxts;
101 	struct list_head rxpool;
102 	struct rxts rx_pool_data[MAX_RXTS];
103 	/* protects above three fields from concurrent access */
104 	spinlock_t rx_lock;
105 	/* queues of incoming and outgoing packets */
106 	struct sk_buff_head rx_queue;
107 	struct sk_buff_head tx_queue;
108 };
109 
110 struct dp83640_clock {
111 	/* keeps the instance in the 'phyter_clocks' list */
112 	struct list_head list;
113 	/* we create one clock instance per MII bus */
114 	struct mii_bus *bus;
115 	/* protects extended registers from concurrent access */
116 	struct mutex extreg_lock;
117 	/* remembers which page was last selected */
118 	int page;
119 	/* our advertised capabilities */
120 	struct ptp_clock_info caps;
121 	/* protects the three fields below from concurrent access */
122 	struct mutex clock_lock;
123 	/* the one phyter from which we shall read */
124 	struct dp83640_private *chosen;
125 	/* list of the other attached phyters, not chosen */
126 	struct list_head phylist;
127 	/* reference to our PTP hardware clock */
128 	struct ptp_clock *ptp_clock;
129 };
130 
131 /* globals */
132 
133 enum {
134 	CALIBRATE_GPIO,
135 	PEROUT_GPIO,
136 	EXTTS0_GPIO,
137 	EXTTS1_GPIO,
138 	EXTTS2_GPIO,
139 	EXTTS3_GPIO,
140 	EXTTS4_GPIO,
141 	EXTTS5_GPIO,
142 	GPIO_TABLE_SIZE
143 };
144 
145 static int chosen_phy = -1;
146 static ushort gpio_tab[GPIO_TABLE_SIZE] = {
147 	1, 2, 3, 4, 8, 9, 10, 11
148 };
149 
150 module_param(chosen_phy, int, 0444);
151 module_param_array(gpio_tab, ushort, NULL, 0444);
152 
153 MODULE_PARM_DESC(chosen_phy, \
154 	"The address of the PHY to use for the ancillary clock features");
155 MODULE_PARM_DESC(gpio_tab, \
156 	"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
157 
158 /* a list of clocks and a mutex to protect it */
159 static LIST_HEAD(phyter_clocks);
160 static DEFINE_MUTEX(phyter_clocks_lock);
161 
162 static void rx_timestamp_work(struct work_struct *work);
163 
164 /* extended register access functions */
165 
166 #define BROADCAST_ADDR 31
167 
168 static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val)
169 {
170 	return mdiobus_write(bus, BROADCAST_ADDR, regnum, val);
171 }
172 
173 /* Caller must hold extreg_lock. */
174 static int ext_read(struct phy_device *phydev, int page, u32 regnum)
175 {
176 	struct dp83640_private *dp83640 = phydev->priv;
177 	int val;
178 
179 	if (dp83640->clock->page != page) {
180 		broadcast_write(phydev->bus, PAGESEL, page);
181 		dp83640->clock->page = page;
182 	}
183 	val = phy_read(phydev, regnum);
184 
185 	return val;
186 }
187 
188 /* Caller must hold extreg_lock. */
189 static void ext_write(int broadcast, struct phy_device *phydev,
190 		      int page, u32 regnum, u16 val)
191 {
192 	struct dp83640_private *dp83640 = phydev->priv;
193 
194 	if (dp83640->clock->page != page) {
195 		broadcast_write(phydev->bus, PAGESEL, page);
196 		dp83640->clock->page = page;
197 	}
198 	if (broadcast)
199 		broadcast_write(phydev->bus, regnum, val);
200 	else
201 		phy_write(phydev, regnum, val);
202 }
203 
204 /* Caller must hold extreg_lock. */
205 static int tdr_write(int bc, struct phy_device *dev,
206 		     const struct timespec *ts, u16 cmd)
207 {
208 	ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0]  */
209 	ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16);   /* ns[31:16] */
210 	ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec & 0xffff); /* sec[15:0] */
211 	ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec >> 16);    /* sec[31:16]*/
212 
213 	ext_write(bc, dev, PAGE4, PTP_CTL, cmd);
214 
215 	return 0;
216 }
217 
218 /* convert phy timestamps into driver timestamps */
219 
220 static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
221 {
222 	u32 sec;
223 
224 	sec = p->sec_lo;
225 	sec |= p->sec_hi << 16;
226 
227 	rxts->ns = p->ns_lo;
228 	rxts->ns |= (p->ns_hi & 0x3fff) << 16;
229 	rxts->ns += ((u64)sec) * 1000000000ULL;
230 	rxts->seqid = p->seqid;
231 	rxts->msgtype = (p->msgtype >> 12) & 0xf;
232 	rxts->hash = p->msgtype & 0x0fff;
233 	rxts->tmo = jiffies + 2;
234 }
235 
236 static u64 phy2txts(struct phy_txts *p)
237 {
238 	u64 ns;
239 	u32 sec;
240 
241 	sec = p->sec_lo;
242 	sec |= p->sec_hi << 16;
243 
244 	ns = p->ns_lo;
245 	ns |= (p->ns_hi & 0x3fff) << 16;
246 	ns += ((u64)sec) * 1000000000ULL;
247 
248 	return ns;
249 }
250 
251 static void periodic_output(struct dp83640_clock *clock,
252 			    struct ptp_clock_request *clkreq, bool on)
253 {
254 	struct dp83640_private *dp83640 = clock->chosen;
255 	struct phy_device *phydev = dp83640->phydev;
256 	u32 sec, nsec, period;
257 	u16 gpio, ptp_trig, trigger, val;
258 
259 	gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
260 	trigger = PER_TRIGGER;
261 
262 	ptp_trig = TRIG_WR |
263 		(trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT |
264 		(gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT |
265 		TRIG_PER |
266 		TRIG_PULSE;
267 
268 	val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
269 
270 	if (!on) {
271 		val |= TRIG_DIS;
272 		mutex_lock(&clock->extreg_lock);
273 		ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
274 		ext_write(0, phydev, PAGE4, PTP_CTL, val);
275 		mutex_unlock(&clock->extreg_lock);
276 		return;
277 	}
278 
279 	sec = clkreq->perout.start.sec;
280 	nsec = clkreq->perout.start.nsec;
281 	period = clkreq->perout.period.sec * 1000000000UL;
282 	period += clkreq->perout.period.nsec;
283 
284 	mutex_lock(&clock->extreg_lock);
285 
286 	ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
287 
288 	/*load trigger*/
289 	val |= TRIG_LOAD;
290 	ext_write(0, phydev, PAGE4, PTP_CTL, val);
291 	ext_write(0, phydev, PAGE4, PTP_TDR, nsec & 0xffff);   /* ns[15:0] */
292 	ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16);      /* ns[31:16] */
293 	ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff);    /* sec[15:0] */
294 	ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16);       /* sec[31:16] */
295 	ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
296 	ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16);    /* ns[31:16] */
297 
298 	/*enable trigger*/
299 	val &= ~TRIG_LOAD;
300 	val |= TRIG_EN;
301 	ext_write(0, phydev, PAGE4, PTP_CTL, val);
302 
303 	mutex_unlock(&clock->extreg_lock);
304 }
305 
306 /* ptp clock methods */
307 
308 static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
309 {
310 	struct dp83640_clock *clock =
311 		container_of(ptp, struct dp83640_clock, caps);
312 	struct phy_device *phydev = clock->chosen->phydev;
313 	u64 rate;
314 	int neg_adj = 0;
315 	u16 hi, lo;
316 
317 	if (ppb < 0) {
318 		neg_adj = 1;
319 		ppb = -ppb;
320 	}
321 	rate = ppb;
322 	rate <<= 26;
323 	rate = div_u64(rate, 1953125);
324 
325 	hi = (rate >> 16) & PTP_RATE_HI_MASK;
326 	if (neg_adj)
327 		hi |= PTP_RATE_DIR;
328 
329 	lo = rate & 0xffff;
330 
331 	mutex_lock(&clock->extreg_lock);
332 
333 	ext_write(1, phydev, PAGE4, PTP_RATEH, hi);
334 	ext_write(1, phydev, PAGE4, PTP_RATEL, lo);
335 
336 	mutex_unlock(&clock->extreg_lock);
337 
338 	return 0;
339 }
340 
341 static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
342 {
343 	struct dp83640_clock *clock =
344 		container_of(ptp, struct dp83640_clock, caps);
345 	struct phy_device *phydev = clock->chosen->phydev;
346 	struct timespec ts;
347 	int err;
348 
349 	delta += ADJTIME_FIX;
350 
351 	ts = ns_to_timespec(delta);
352 
353 	mutex_lock(&clock->extreg_lock);
354 
355 	err = tdr_write(1, phydev, &ts, PTP_STEP_CLK);
356 
357 	mutex_unlock(&clock->extreg_lock);
358 
359 	return err;
360 }
361 
362 static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
363 {
364 	struct dp83640_clock *clock =
365 		container_of(ptp, struct dp83640_clock, caps);
366 	struct phy_device *phydev = clock->chosen->phydev;
367 	unsigned int val[4];
368 
369 	mutex_lock(&clock->extreg_lock);
370 
371 	ext_write(0, phydev, PAGE4, PTP_CTL, PTP_RD_CLK);
372 
373 	val[0] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[15:0] */
374 	val[1] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[31:16] */
375 	val[2] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[15:0] */
376 	val[3] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[31:16] */
377 
378 	mutex_unlock(&clock->extreg_lock);
379 
380 	ts->tv_nsec = val[0] | (val[1] << 16);
381 	ts->tv_sec  = val[2] | (val[3] << 16);
382 
383 	return 0;
384 }
385 
386 static int ptp_dp83640_settime(struct ptp_clock_info *ptp,
387 			       const struct timespec *ts)
388 {
389 	struct dp83640_clock *clock =
390 		container_of(ptp, struct dp83640_clock, caps);
391 	struct phy_device *phydev = clock->chosen->phydev;
392 	int err;
393 
394 	mutex_lock(&clock->extreg_lock);
395 
396 	err = tdr_write(1, phydev, ts, PTP_LOAD_CLK);
397 
398 	mutex_unlock(&clock->extreg_lock);
399 
400 	return err;
401 }
402 
403 static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
404 			      struct ptp_clock_request *rq, int on)
405 {
406 	struct dp83640_clock *clock =
407 		container_of(ptp, struct dp83640_clock, caps);
408 	struct phy_device *phydev = clock->chosen->phydev;
409 	int index;
410 	u16 evnt, event_num, gpio_num;
411 
412 	switch (rq->type) {
413 	case PTP_CLK_REQ_EXTTS:
414 		index = rq->extts.index;
415 		if (index < 0 || index >= N_EXT_TS)
416 			return -EINVAL;
417 		event_num = EXT_EVENT + index;
418 		evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
419 		if (on) {
420 			gpio_num = gpio_tab[EXTTS0_GPIO + index];
421 			evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
422 			evnt |= EVNT_RISE;
423 		}
424 		ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
425 		return 0;
426 
427 	case PTP_CLK_REQ_PEROUT:
428 		if (rq->perout.index != 0)
429 			return -EINVAL;
430 		periodic_output(clock, rq, on);
431 		return 0;
432 
433 	default:
434 		break;
435 	}
436 
437 	return -EOPNOTSUPP;
438 }
439 
440 static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
441 static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
442 
443 static void enable_status_frames(struct phy_device *phydev, bool on)
444 {
445 	u16 cfg0 = 0, ver;
446 
447 	if (on)
448 		cfg0 = PSF_EVNT_EN | PSF_RXTS_EN | PSF_TXTS_EN | ENDIAN_FLAG;
449 
450 	ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
451 
452 	ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
453 	ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
454 
455 	if (!phydev->attached_dev) {
456 		pr_warning("dp83640: expected to find an attached netdevice\n");
457 		return;
458 	}
459 
460 	if (on) {
461 		if (dev_mc_add(phydev->attached_dev, status_frame_dst))
462 			pr_warning("dp83640: failed to add mc address\n");
463 	} else {
464 		if (dev_mc_del(phydev->attached_dev, status_frame_dst))
465 			pr_warning("dp83640: failed to delete mc address\n");
466 	}
467 }
468 
469 static bool is_status_frame(struct sk_buff *skb, int type)
470 {
471 	struct ethhdr *h = eth_hdr(skb);
472 
473 	if (PTP_CLASS_V2_L2 == type &&
474 	    !memcmp(h->h_source, status_frame_src, sizeof(status_frame_src)))
475 		return true;
476 	else
477 		return false;
478 }
479 
480 static int expired(struct rxts *rxts)
481 {
482 	return time_after(jiffies, rxts->tmo);
483 }
484 
485 /* Caller must hold rx_lock. */
486 static void prune_rx_ts(struct dp83640_private *dp83640)
487 {
488 	struct list_head *this, *next;
489 	struct rxts *rxts;
490 
491 	list_for_each_safe(this, next, &dp83640->rxts) {
492 		rxts = list_entry(this, struct rxts, list);
493 		if (expired(rxts)) {
494 			list_del_init(&rxts->list);
495 			list_add(&rxts->list, &dp83640->rxpool);
496 		}
497 	}
498 }
499 
500 /* synchronize the phyters so they act as one clock */
501 
502 static void enable_broadcast(struct phy_device *phydev, int init_page, int on)
503 {
504 	int val;
505 	phy_write(phydev, PAGESEL, 0);
506 	val = phy_read(phydev, PHYCR2);
507 	if (on)
508 		val |= BC_WRITE;
509 	else
510 		val &= ~BC_WRITE;
511 	phy_write(phydev, PHYCR2, val);
512 	phy_write(phydev, PAGESEL, init_page);
513 }
514 
515 static void recalibrate(struct dp83640_clock *clock)
516 {
517 	s64 now, diff;
518 	struct phy_txts event_ts;
519 	struct timespec ts;
520 	struct list_head *this;
521 	struct dp83640_private *tmp;
522 	struct phy_device *master = clock->chosen->phydev;
523 	u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
524 
525 	trigger = CAL_TRIGGER;
526 	cal_gpio = gpio_tab[CALIBRATE_GPIO];
527 
528 	mutex_lock(&clock->extreg_lock);
529 
530 	/*
531 	 * enable broadcast, disable status frames, enable ptp clock
532 	 */
533 	list_for_each(this, &clock->phylist) {
534 		tmp = list_entry(this, struct dp83640_private, list);
535 		enable_broadcast(tmp->phydev, clock->page, 1);
536 		tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0);
537 		ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0);
538 		ext_write(0, tmp->phydev, PAGE4, PTP_CTL, PTP_ENABLE);
539 	}
540 	enable_broadcast(master, clock->page, 1);
541 	cfg0 = ext_read(master, PAGE5, PSF_CFG0);
542 	ext_write(0, master, PAGE5, PSF_CFG0, 0);
543 	ext_write(0, master, PAGE4, PTP_CTL, PTP_ENABLE);
544 
545 	/*
546 	 * enable an event timestamp
547 	 */
548 	evnt = EVNT_WR | EVNT_RISE | EVNT_SINGLE;
549 	evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
550 	evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
551 
552 	list_for_each(this, &clock->phylist) {
553 		tmp = list_entry(this, struct dp83640_private, list);
554 		ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt);
555 	}
556 	ext_write(0, master, PAGE5, PTP_EVNT, evnt);
557 
558 	/*
559 	 * configure a trigger
560 	 */
561 	ptp_trig = TRIG_WR | TRIG_IF_LATE | TRIG_PULSE;
562 	ptp_trig |= (trigger  & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT;
563 	ptp_trig |= (cal_gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT;
564 	ext_write(0, master, PAGE5, PTP_TRIG, ptp_trig);
565 
566 	/* load trigger */
567 	val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
568 	val |= TRIG_LOAD;
569 	ext_write(0, master, PAGE4, PTP_CTL, val);
570 
571 	/* enable trigger */
572 	val &= ~TRIG_LOAD;
573 	val |= TRIG_EN;
574 	ext_write(0, master, PAGE4, PTP_CTL, val);
575 
576 	/* disable trigger */
577 	val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
578 	val |= TRIG_DIS;
579 	ext_write(0, master, PAGE4, PTP_CTL, val);
580 
581 	/*
582 	 * read out and correct offsets
583 	 */
584 	val = ext_read(master, PAGE4, PTP_STS);
585 	pr_info("master PTP_STS  0x%04hx", val);
586 	val = ext_read(master, PAGE4, PTP_ESTS);
587 	pr_info("master PTP_ESTS 0x%04hx", val);
588 	event_ts.ns_lo  = ext_read(master, PAGE4, PTP_EDATA);
589 	event_ts.ns_hi  = ext_read(master, PAGE4, PTP_EDATA);
590 	event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
591 	event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA);
592 	now = phy2txts(&event_ts);
593 
594 	list_for_each(this, &clock->phylist) {
595 		tmp = list_entry(this, struct dp83640_private, list);
596 		val = ext_read(tmp->phydev, PAGE4, PTP_STS);
597 		pr_info("slave  PTP_STS  0x%04hx", val);
598 		val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
599 		pr_info("slave  PTP_ESTS 0x%04hx", val);
600 		event_ts.ns_lo  = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
601 		event_ts.ns_hi  = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
602 		event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
603 		event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
604 		diff = now - (s64) phy2txts(&event_ts);
605 		pr_info("slave offset %lld nanoseconds\n", diff);
606 		diff += ADJTIME_FIX;
607 		ts = ns_to_timespec(diff);
608 		tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
609 	}
610 
611 	/*
612 	 * restore status frames
613 	 */
614 	list_for_each(this, &clock->phylist) {
615 		tmp = list_entry(this, struct dp83640_private, list);
616 		ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0);
617 	}
618 	ext_write(0, master, PAGE5, PSF_CFG0, cfg0);
619 
620 	mutex_unlock(&clock->extreg_lock);
621 }
622 
623 /* time stamping methods */
624 
625 static inline u16 exts_chan_to_edata(int ch)
626 {
627 	return 1 << ((ch + EXT_EVENT) * 2);
628 }
629 
630 static int decode_evnt(struct dp83640_private *dp83640,
631 		       void *data, u16 ests)
632 {
633 	struct phy_txts *phy_txts;
634 	struct ptp_clock_event event;
635 	int i, parsed;
636 	int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
637 	u16 ext_status = 0;
638 
639 	if (ests & MULT_EVNT) {
640 		ext_status = *(u16 *) data;
641 		data += sizeof(ext_status);
642 	}
643 
644 	phy_txts = data;
645 
646 	switch (words) { /* fall through in every case */
647 	case 3:
648 		dp83640->edata.sec_hi = phy_txts->sec_hi;
649 	case 2:
650 		dp83640->edata.sec_lo = phy_txts->sec_lo;
651 	case 1:
652 		dp83640->edata.ns_hi = phy_txts->ns_hi;
653 	case 0:
654 		dp83640->edata.ns_lo = phy_txts->ns_lo;
655 	}
656 
657 	if (ext_status) {
658 		parsed = words + 2;
659 	} else {
660 		parsed = words + 1;
661 		i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT;
662 		ext_status = exts_chan_to_edata(i);
663 	}
664 
665 	event.type = PTP_CLOCK_EXTTS;
666 	event.timestamp = phy2txts(&dp83640->edata);
667 
668 	for (i = 0; i < N_EXT_TS; i++) {
669 		if (ext_status & exts_chan_to_edata(i)) {
670 			event.index = i;
671 			ptp_clock_event(dp83640->clock->ptp_clock, &event);
672 		}
673 	}
674 
675 	return parsed * sizeof(u16);
676 }
677 
678 static void decode_rxts(struct dp83640_private *dp83640,
679 			struct phy_rxts *phy_rxts)
680 {
681 	struct rxts *rxts;
682 	unsigned long flags;
683 
684 	spin_lock_irqsave(&dp83640->rx_lock, flags);
685 
686 	prune_rx_ts(dp83640);
687 
688 	if (list_empty(&dp83640->rxpool)) {
689 		pr_debug("dp83640: rx timestamp pool is empty\n");
690 		goto out;
691 	}
692 	rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
693 	list_del_init(&rxts->list);
694 	phy2rxts(phy_rxts, rxts);
695 	list_add_tail(&rxts->list, &dp83640->rxts);
696 out:
697 	spin_unlock_irqrestore(&dp83640->rx_lock, flags);
698 }
699 
700 static void decode_txts(struct dp83640_private *dp83640,
701 			struct phy_txts *phy_txts)
702 {
703 	struct skb_shared_hwtstamps shhwtstamps;
704 	struct sk_buff *skb;
705 	u64 ns;
706 
707 	/* We must already have the skb that triggered this. */
708 
709 	skb = skb_dequeue(&dp83640->tx_queue);
710 
711 	if (!skb) {
712 		pr_debug("dp83640: have timestamp but tx_queue empty\n");
713 		return;
714 	}
715 	ns = phy2txts(phy_txts);
716 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
717 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
718 	skb_complete_tx_timestamp(skb, &shhwtstamps);
719 }
720 
721 static void decode_status_frame(struct dp83640_private *dp83640,
722 				struct sk_buff *skb)
723 {
724 	struct phy_rxts *phy_rxts;
725 	struct phy_txts *phy_txts;
726 	u8 *ptr;
727 	int len, size;
728 	u16 ests, type;
729 
730 	ptr = skb->data + 2;
731 
732 	for (len = skb_headlen(skb) - 2; len > sizeof(type); len -= size) {
733 
734 		type = *(u16 *)ptr;
735 		ests = type & 0x0fff;
736 		type = type & 0xf000;
737 		len -= sizeof(type);
738 		ptr += sizeof(type);
739 
740 		if (PSF_RX == type && len >= sizeof(*phy_rxts)) {
741 
742 			phy_rxts = (struct phy_rxts *) ptr;
743 			decode_rxts(dp83640, phy_rxts);
744 			size = sizeof(*phy_rxts);
745 
746 		} else if (PSF_TX == type && len >= sizeof(*phy_txts)) {
747 
748 			phy_txts = (struct phy_txts *) ptr;
749 			decode_txts(dp83640, phy_txts);
750 			size = sizeof(*phy_txts);
751 
752 		} else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
753 
754 			size = decode_evnt(dp83640, ptr, ests);
755 
756 		} else {
757 			size = 0;
758 			break;
759 		}
760 		ptr += size;
761 	}
762 }
763 
764 static int is_sync(struct sk_buff *skb, int type)
765 {
766 	u8 *data = skb->data, *msgtype;
767 	unsigned int offset = 0;
768 
769 	switch (type) {
770 	case PTP_CLASS_V1_IPV4:
771 	case PTP_CLASS_V2_IPV4:
772 		offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
773 		break;
774 	case PTP_CLASS_V1_IPV6:
775 	case PTP_CLASS_V2_IPV6:
776 		offset = OFF_PTP6;
777 		break;
778 	case PTP_CLASS_V2_L2:
779 		offset = ETH_HLEN;
780 		break;
781 	case PTP_CLASS_V2_VLAN:
782 		offset = ETH_HLEN + VLAN_HLEN;
783 		break;
784 	default:
785 		return 0;
786 	}
787 
788 	if (type & PTP_CLASS_V1)
789 		offset += OFF_PTP_CONTROL;
790 
791 	if (skb->len < offset + 1)
792 		return 0;
793 
794 	msgtype = data + offset;
795 
796 	return (*msgtype & 0xf) == 0;
797 }
798 
799 static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
800 {
801 	u16 *seqid;
802 	unsigned int offset;
803 	u8 *msgtype, *data = skb_mac_header(skb);
804 
805 	/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
806 
807 	switch (type) {
808 	case PTP_CLASS_V1_IPV4:
809 	case PTP_CLASS_V2_IPV4:
810 		offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
811 		break;
812 	case PTP_CLASS_V1_IPV6:
813 	case PTP_CLASS_V2_IPV6:
814 		offset = OFF_PTP6;
815 		break;
816 	case PTP_CLASS_V2_L2:
817 		offset = ETH_HLEN;
818 		break;
819 	case PTP_CLASS_V2_VLAN:
820 		offset = ETH_HLEN + VLAN_HLEN;
821 		break;
822 	default:
823 		return 0;
824 	}
825 
826 	if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
827 		return 0;
828 
829 	if (unlikely(type & PTP_CLASS_V1))
830 		msgtype = data + offset + OFF_PTP_CONTROL;
831 	else
832 		msgtype = data + offset;
833 
834 	seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
835 
836 	return (rxts->msgtype == (*msgtype & 0xf) &&
837 		rxts->seqid   == ntohs(*seqid));
838 }
839 
840 static void dp83640_free_clocks(void)
841 {
842 	struct dp83640_clock *clock;
843 	struct list_head *this, *next;
844 
845 	mutex_lock(&phyter_clocks_lock);
846 
847 	list_for_each_safe(this, next, &phyter_clocks) {
848 		clock = list_entry(this, struct dp83640_clock, list);
849 		if (!list_empty(&clock->phylist)) {
850 			pr_warning("phy list non-empty while unloading");
851 			BUG();
852 		}
853 		list_del(&clock->list);
854 		mutex_destroy(&clock->extreg_lock);
855 		mutex_destroy(&clock->clock_lock);
856 		put_device(&clock->bus->dev);
857 		kfree(clock);
858 	}
859 
860 	mutex_unlock(&phyter_clocks_lock);
861 }
862 
863 static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
864 {
865 	INIT_LIST_HEAD(&clock->list);
866 	clock->bus = bus;
867 	mutex_init(&clock->extreg_lock);
868 	mutex_init(&clock->clock_lock);
869 	INIT_LIST_HEAD(&clock->phylist);
870 	clock->caps.owner = THIS_MODULE;
871 	sprintf(clock->caps.name, "dp83640 timer");
872 	clock->caps.max_adj	= 1953124;
873 	clock->caps.n_alarm	= 0;
874 	clock->caps.n_ext_ts	= N_EXT_TS;
875 	clock->caps.n_per_out	= 1;
876 	clock->caps.pps		= 0;
877 	clock->caps.adjfreq	= ptp_dp83640_adjfreq;
878 	clock->caps.adjtime	= ptp_dp83640_adjtime;
879 	clock->caps.gettime	= ptp_dp83640_gettime;
880 	clock->caps.settime	= ptp_dp83640_settime;
881 	clock->caps.enable	= ptp_dp83640_enable;
882 	/*
883 	 * Get a reference to this bus instance.
884 	 */
885 	get_device(&bus->dev);
886 }
887 
888 static int choose_this_phy(struct dp83640_clock *clock,
889 			   struct phy_device *phydev)
890 {
891 	if (chosen_phy == -1 && !clock->chosen)
892 		return 1;
893 
894 	if (chosen_phy == phydev->addr)
895 		return 1;
896 
897 	return 0;
898 }
899 
900 static struct dp83640_clock *dp83640_clock_get(struct dp83640_clock *clock)
901 {
902 	if (clock)
903 		mutex_lock(&clock->clock_lock);
904 	return clock;
905 }
906 
907 /*
908  * Look up and lock a clock by bus instance.
909  * If there is no clock for this bus, then create it first.
910  */
911 static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
912 {
913 	struct dp83640_clock *clock = NULL, *tmp;
914 	struct list_head *this;
915 
916 	mutex_lock(&phyter_clocks_lock);
917 
918 	list_for_each(this, &phyter_clocks) {
919 		tmp = list_entry(this, struct dp83640_clock, list);
920 		if (tmp->bus == bus) {
921 			clock = tmp;
922 			break;
923 		}
924 	}
925 	if (clock)
926 		goto out;
927 
928 	clock = kzalloc(sizeof(struct dp83640_clock), GFP_KERNEL);
929 	if (!clock)
930 		goto out;
931 
932 	dp83640_clock_init(clock, bus);
933 	list_add_tail(&phyter_clocks, &clock->list);
934 out:
935 	mutex_unlock(&phyter_clocks_lock);
936 
937 	return dp83640_clock_get(clock);
938 }
939 
940 static void dp83640_clock_put(struct dp83640_clock *clock)
941 {
942 	mutex_unlock(&clock->clock_lock);
943 }
944 
945 static int dp83640_probe(struct phy_device *phydev)
946 {
947 	struct dp83640_clock *clock;
948 	struct dp83640_private *dp83640;
949 	int err = -ENOMEM, i;
950 
951 	if (phydev->addr == BROADCAST_ADDR)
952 		return 0;
953 
954 	clock = dp83640_clock_get_bus(phydev->bus);
955 	if (!clock)
956 		goto no_clock;
957 
958 	dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
959 	if (!dp83640)
960 		goto no_memory;
961 
962 	dp83640->phydev = phydev;
963 	INIT_WORK(&dp83640->ts_work, rx_timestamp_work);
964 
965 	INIT_LIST_HEAD(&dp83640->rxts);
966 	INIT_LIST_HEAD(&dp83640->rxpool);
967 	for (i = 0; i < MAX_RXTS; i++)
968 		list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
969 
970 	phydev->priv = dp83640;
971 
972 	spin_lock_init(&dp83640->rx_lock);
973 	skb_queue_head_init(&dp83640->rx_queue);
974 	skb_queue_head_init(&dp83640->tx_queue);
975 
976 	dp83640->clock = clock;
977 
978 	if (choose_this_phy(clock, phydev)) {
979 		clock->chosen = dp83640;
980 		clock->ptp_clock = ptp_clock_register(&clock->caps);
981 		if (IS_ERR(clock->ptp_clock)) {
982 			err = PTR_ERR(clock->ptp_clock);
983 			goto no_register;
984 		}
985 	} else
986 		list_add_tail(&dp83640->list, &clock->phylist);
987 
988 	if (clock->chosen && !list_empty(&clock->phylist))
989 		recalibrate(clock);
990 	else
991 		enable_broadcast(dp83640->phydev, clock->page, 1);
992 
993 	dp83640_clock_put(clock);
994 	return 0;
995 
996 no_register:
997 	clock->chosen = NULL;
998 	kfree(dp83640);
999 no_memory:
1000 	dp83640_clock_put(clock);
1001 no_clock:
1002 	return err;
1003 }
1004 
1005 static void dp83640_remove(struct phy_device *phydev)
1006 {
1007 	struct dp83640_clock *clock;
1008 	struct list_head *this, *next;
1009 	struct dp83640_private *tmp, *dp83640 = phydev->priv;
1010 	struct sk_buff *skb;
1011 
1012 	if (phydev->addr == BROADCAST_ADDR)
1013 		return;
1014 
1015 	enable_status_frames(phydev, false);
1016 	cancel_work_sync(&dp83640->ts_work);
1017 
1018 	while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL)
1019 		kfree_skb(skb);
1020 
1021 	while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
1022 		skb_complete_tx_timestamp(skb, NULL);
1023 
1024 	clock = dp83640_clock_get(dp83640->clock);
1025 
1026 	if (dp83640 == clock->chosen) {
1027 		ptp_clock_unregister(clock->ptp_clock);
1028 		clock->chosen = NULL;
1029 	} else {
1030 		list_for_each_safe(this, next, &clock->phylist) {
1031 			tmp = list_entry(this, struct dp83640_private, list);
1032 			if (tmp == dp83640) {
1033 				list_del_init(&tmp->list);
1034 				break;
1035 			}
1036 		}
1037 	}
1038 
1039 	dp83640_clock_put(clock);
1040 	kfree(dp83640);
1041 }
1042 
1043 static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
1044 {
1045 	struct dp83640_private *dp83640 = phydev->priv;
1046 	struct hwtstamp_config cfg;
1047 	u16 txcfg0, rxcfg0;
1048 
1049 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1050 		return -EFAULT;
1051 
1052 	if (cfg.flags) /* reserved for future extensions */
1053 		return -EINVAL;
1054 
1055 	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
1056 		return -ERANGE;
1057 
1058 	dp83640->hwts_tx_en = cfg.tx_type;
1059 
1060 	switch (cfg.rx_filter) {
1061 	case HWTSTAMP_FILTER_NONE:
1062 		dp83640->hwts_rx_en = 0;
1063 		dp83640->layer = 0;
1064 		dp83640->version = 0;
1065 		break;
1066 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1067 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1068 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1069 		dp83640->hwts_rx_en = 1;
1070 		dp83640->layer = LAYER4;
1071 		dp83640->version = 1;
1072 		break;
1073 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1074 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1075 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1076 		dp83640->hwts_rx_en = 1;
1077 		dp83640->layer = LAYER4;
1078 		dp83640->version = 2;
1079 		break;
1080 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1081 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1082 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1083 		dp83640->hwts_rx_en = 1;
1084 		dp83640->layer = LAYER2;
1085 		dp83640->version = 2;
1086 		break;
1087 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1088 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1089 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1090 		dp83640->hwts_rx_en = 1;
1091 		dp83640->layer = LAYER4|LAYER2;
1092 		dp83640->version = 2;
1093 		break;
1094 	default:
1095 		return -ERANGE;
1096 	}
1097 
1098 	txcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
1099 	rxcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
1100 
1101 	if (dp83640->layer & LAYER2) {
1102 		txcfg0 |= TX_L2_EN;
1103 		rxcfg0 |= RX_L2_EN;
1104 	}
1105 	if (dp83640->layer & LAYER4) {
1106 		txcfg0 |= TX_IPV6_EN | TX_IPV4_EN;
1107 		rxcfg0 |= RX_IPV6_EN | RX_IPV4_EN;
1108 	}
1109 
1110 	if (dp83640->hwts_tx_en)
1111 		txcfg0 |= TX_TS_EN;
1112 
1113 	if (dp83640->hwts_tx_en == HWTSTAMP_TX_ONESTEP_SYNC)
1114 		txcfg0 |= SYNC_1STEP | CHK_1STEP;
1115 
1116 	if (dp83640->hwts_rx_en)
1117 		rxcfg0 |= RX_TS_EN;
1118 
1119 	mutex_lock(&dp83640->clock->extreg_lock);
1120 
1121 	if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
1122 		enable_status_frames(phydev, true);
1123 		ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1124 	}
1125 
1126 	ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
1127 	ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
1128 
1129 	mutex_unlock(&dp83640->clock->extreg_lock);
1130 
1131 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1132 }
1133 
1134 static void rx_timestamp_work(struct work_struct *work)
1135 {
1136 	struct dp83640_private *dp83640 =
1137 		container_of(work, struct dp83640_private, ts_work);
1138 	struct list_head *this, *next;
1139 	struct rxts *rxts;
1140 	struct skb_shared_hwtstamps *shhwtstamps;
1141 	struct sk_buff *skb;
1142 	unsigned int type;
1143 	unsigned long flags;
1144 
1145 	/* Deliver each deferred packet, with or without a time stamp. */
1146 
1147 	while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) {
1148 		type = SKB_PTP_TYPE(skb);
1149 		spin_lock_irqsave(&dp83640->rx_lock, flags);
1150 		list_for_each_safe(this, next, &dp83640->rxts) {
1151 			rxts = list_entry(this, struct rxts, list);
1152 			if (match(skb, type, rxts)) {
1153 				shhwtstamps = skb_hwtstamps(skb);
1154 				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1155 				shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
1156 				list_del_init(&rxts->list);
1157 				list_add(&rxts->list, &dp83640->rxpool);
1158 				break;
1159 			}
1160 		}
1161 		spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1162 		netif_rx(skb);
1163 	}
1164 
1165 	/* Clear out expired time stamps. */
1166 
1167 	spin_lock_irqsave(&dp83640->rx_lock, flags);
1168 	prune_rx_ts(dp83640);
1169 	spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1170 }
1171 
1172 static bool dp83640_rxtstamp(struct phy_device *phydev,
1173 			     struct sk_buff *skb, int type)
1174 {
1175 	struct dp83640_private *dp83640 = phydev->priv;
1176 
1177 	if (!dp83640->hwts_rx_en)
1178 		return false;
1179 
1180 	if (is_status_frame(skb, type)) {
1181 		decode_status_frame(dp83640, skb);
1182 		kfree_skb(skb);
1183 		return true;
1184 	}
1185 
1186 	SKB_PTP_TYPE(skb) = type;
1187 	skb_queue_tail(&dp83640->rx_queue, skb);
1188 	schedule_work(&dp83640->ts_work);
1189 
1190 	return true;
1191 }
1192 
1193 static void dp83640_txtstamp(struct phy_device *phydev,
1194 			     struct sk_buff *skb, int type)
1195 {
1196 	struct dp83640_private *dp83640 = phydev->priv;
1197 
1198 	switch (dp83640->hwts_tx_en) {
1199 
1200 	case HWTSTAMP_TX_ONESTEP_SYNC:
1201 		if (is_sync(skb, type)) {
1202 			skb_complete_tx_timestamp(skb, NULL);
1203 			return;
1204 		}
1205 		/* fall through */
1206 	case HWTSTAMP_TX_ON:
1207 		skb_queue_tail(&dp83640->tx_queue, skb);
1208 		schedule_work(&dp83640->ts_work);
1209 		break;
1210 
1211 	case HWTSTAMP_TX_OFF:
1212 	default:
1213 		skb_complete_tx_timestamp(skb, NULL);
1214 		break;
1215 	}
1216 }
1217 
1218 static struct phy_driver dp83640_driver = {
1219 	.phy_id		= DP83640_PHY_ID,
1220 	.phy_id_mask	= 0xfffffff0,
1221 	.name		= "NatSemi DP83640",
1222 	.features	= PHY_BASIC_FEATURES,
1223 	.flags		= 0,
1224 	.probe		= dp83640_probe,
1225 	.remove		= dp83640_remove,
1226 	.config_aneg	= genphy_config_aneg,
1227 	.read_status	= genphy_read_status,
1228 	.hwtstamp	= dp83640_hwtstamp,
1229 	.rxtstamp	= dp83640_rxtstamp,
1230 	.txtstamp	= dp83640_txtstamp,
1231 	.driver		= {.owner = THIS_MODULE,}
1232 };
1233 
1234 static int __init dp83640_init(void)
1235 {
1236 	return phy_driver_register(&dp83640_driver);
1237 }
1238 
1239 static void __exit dp83640_exit(void)
1240 {
1241 	dp83640_free_clocks();
1242 	phy_driver_unregister(&dp83640_driver);
1243 }
1244 
1245 MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1246 MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
1247 MODULE_LICENSE("GPL");
1248 
1249 module_init(dp83640_init);
1250 module_exit(dp83640_exit);
1251 
1252 static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
1253 	{ DP83640_PHY_ID, 0xfffffff0 },
1254 	{ }
1255 };
1256 
1257 MODULE_DEVICE_TABLE(mdio, dp83640_tbl);
1258