1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include "dwmac-intel.h"
9 #include "dwmac4.h"
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 
13 #define INTEL_MGBE_ADHOC_ADDR	0x15
14 #define INTEL_MGBE_XPCS_ADDR	0x16
15 
16 /* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
17 #define PSE_PTP_CLK_FREQ_MASK		(GMAC_GPO0 | GMAC_GPO3)
18 #define PSE_PTP_CLK_FREQ_19_2MHZ	(GMAC_GPO0)
19 #define PSE_PTP_CLK_FREQ_200MHZ		(GMAC_GPO0 | GMAC_GPO3)
20 #define PSE_PTP_CLK_FREQ_256MHZ		(0)
21 #define PCH_PTP_CLK_FREQ_MASK		(GMAC_GPO0)
22 #define PCH_PTP_CLK_FREQ_19_2MHZ	(GMAC_GPO0)
23 #define PCH_PTP_CLK_FREQ_200MHZ		(0)
24 
25 /* Cross-timestamping defines */
26 #define ART_CPUID_LEAF		0x15
27 #define EHL_PSE_ART_MHZ		19200000
28 
29 struct intel_priv_data {
30 	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
31 	unsigned long crossts_adj;
32 	bool is_pse;
33 };
34 
35 /* This struct is used to associate PCI Function of MAC controller on a board,
36  * discovered via DMI, with the address of PHY connected to the MAC. The
37  * negative value of the address means that MAC controller is not connected
38  * with PHY.
39  */
40 struct stmmac_pci_func_data {
41 	unsigned int func;
42 	int phy_addr;
43 };
44 
45 struct stmmac_pci_dmi_data {
46 	const struct stmmac_pci_func_data *func;
47 	size_t nfuncs;
48 };
49 
50 struct stmmac_pci_info {
51 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
52 };
53 
54 static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
55 				    const struct dmi_system_id *dmi_list)
56 {
57 	const struct stmmac_pci_func_data *func_data;
58 	const struct stmmac_pci_dmi_data *dmi_data;
59 	const struct dmi_system_id *dmi_id;
60 	int func = PCI_FUNC(pdev->devfn);
61 	size_t n;
62 
63 	dmi_id = dmi_first_match(dmi_list);
64 	if (!dmi_id)
65 		return -ENODEV;
66 
67 	dmi_data = dmi_id->driver_data;
68 	func_data = dmi_data->func;
69 
70 	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
71 		if (func_data->func == func)
72 			return func_data->phy_addr;
73 
74 	return -ENODEV;
75 }
76 
77 static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
78 			      int phyreg, u32 mask, u32 val)
79 {
80 	unsigned int retries = 10;
81 	int val_rd;
82 
83 	do {
84 		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
85 		if ((val_rd & mask) == (val & mask))
86 			return 0;
87 		udelay(POLL_DELAY_US);
88 	} while (--retries);
89 
90 	return -ETIMEDOUT;
91 }
92 
93 static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
94 {
95 	struct intel_priv_data *intel_priv = priv_data;
96 	struct stmmac_priv *priv = netdev_priv(ndev);
97 	int serdes_phy_addr = 0;
98 	u32 data = 0;
99 
100 	if (!intel_priv->mdio_adhoc_addr)
101 		return 0;
102 
103 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
104 
105 	/* assert clk_req */
106 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
107 	data |= SERDES_PLL_CLK;
108 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
109 
110 	/* check for clk_ack assertion */
111 	data = serdes_status_poll(priv, serdes_phy_addr,
112 				  SERDES_GSR0,
113 				  SERDES_PLL_CLK,
114 				  SERDES_PLL_CLK);
115 
116 	if (data) {
117 		dev_err(priv->device, "Serdes PLL clk request timeout\n");
118 		return data;
119 	}
120 
121 	/* assert lane reset */
122 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123 	data |= SERDES_RST;
124 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125 
126 	/* check for assert lane reset reflection */
127 	data = serdes_status_poll(priv, serdes_phy_addr,
128 				  SERDES_GSR0,
129 				  SERDES_RST,
130 				  SERDES_RST);
131 
132 	if (data) {
133 		dev_err(priv->device, "Serdes assert lane reset timeout\n");
134 		return data;
135 	}
136 
137 	/*  move power state to P0 */
138 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139 
140 	data &= ~SERDES_PWR_ST_MASK;
141 	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
142 
143 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
144 
145 	/* Check for P0 state */
146 	data = serdes_status_poll(priv, serdes_phy_addr,
147 				  SERDES_GSR0,
148 				  SERDES_PWR_ST_MASK,
149 				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
150 
151 	if (data) {
152 		dev_err(priv->device, "Serdes power state P0 timeout.\n");
153 		return data;
154 	}
155 
156 	/* PSE only - ungate SGMII PHY Rx Clock */
157 	if (intel_priv->is_pse)
158 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
159 			       0, SERDES_PHY_RX_CLK);
160 
161 	return 0;
162 }
163 
164 static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
165 {
166 	struct intel_priv_data *intel_priv = intel_data;
167 	struct stmmac_priv *priv = netdev_priv(ndev);
168 	int serdes_phy_addr = 0;
169 	u32 data = 0;
170 
171 	if (!intel_priv->mdio_adhoc_addr)
172 		return;
173 
174 	serdes_phy_addr = intel_priv->mdio_adhoc_addr;
175 
176 	/* PSE only - gate SGMII PHY Rx Clock */
177 	if (intel_priv->is_pse)
178 		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
179 			       SERDES_PHY_RX_CLK, 0);
180 
181 	/*  move power state to P3 */
182 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
183 
184 	data &= ~SERDES_PWR_ST_MASK;
185 	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
186 
187 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
188 
189 	/* Check for P3 state */
190 	data = serdes_status_poll(priv, serdes_phy_addr,
191 				  SERDES_GSR0,
192 				  SERDES_PWR_ST_MASK,
193 				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
194 
195 	if (data) {
196 		dev_err(priv->device, "Serdes power state P3 timeout\n");
197 		return;
198 	}
199 
200 	/* de-assert clk_req */
201 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
202 	data &= ~SERDES_PLL_CLK;
203 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
204 
205 	/* check for clk_ack de-assert */
206 	data = serdes_status_poll(priv, serdes_phy_addr,
207 				  SERDES_GSR0,
208 				  SERDES_PLL_CLK,
209 				  (u32)~SERDES_PLL_CLK);
210 
211 	if (data) {
212 		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
213 		return;
214 	}
215 
216 	/* de-assert lane reset */
217 	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
218 	data &= ~SERDES_RST;
219 	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
220 
221 	/* check for de-assert lane reset reflection */
222 	data = serdes_status_poll(priv, serdes_phy_addr,
223 				  SERDES_GSR0,
224 				  SERDES_RST,
225 				  (u32)~SERDES_RST);
226 
227 	if (data) {
228 		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
229 		return;
230 	}
231 }
232 
233 /* Program PTP Clock Frequency for different variant of
234  * Intel mGBE that has slightly different GPO mapping
235  */
236 static void intel_mgbe_ptp_clk_freq_config(void *npriv)
237 {
238 	struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
239 	struct intel_priv_data *intel_priv;
240 	u32 gpio_value;
241 
242 	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
243 
244 	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
245 
246 	if (intel_priv->is_pse) {
247 		/* For PSE GbE, use 200MHz */
248 		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
249 		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
250 	} else {
251 		/* For PCH GbE, use 200MHz */
252 		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
253 		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
254 	}
255 
256 	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
257 }
258 
259 static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
260 			u64 *art_time)
261 {
262 	u64 ns;
263 
264 	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
265 	ns <<= GMAC4_ART_TIME_SHIFT;
266 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
267 	ns <<= GMAC4_ART_TIME_SHIFT;
268 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
269 	ns <<= GMAC4_ART_TIME_SHIFT;
270 	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
271 
272 	*art_time = ns;
273 }
274 
275 static int intel_crosststamp(ktime_t *device,
276 			     struct system_counterval_t *system,
277 			     void *ctx)
278 {
279 	struct intel_priv_data *intel_priv;
280 
281 	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
282 	void __iomem *ptpaddr = priv->ptpaddr;
283 	void __iomem *ioaddr = priv->hw->pcsr;
284 	unsigned long flags;
285 	u64 art_time = 0;
286 	u64 ptp_time = 0;
287 	u32 num_snapshot;
288 	u32 gpio_value;
289 	u32 acr_value;
290 	int ret;
291 	u32 v;
292 	int i;
293 
294 	if (!boot_cpu_has(X86_FEATURE_ART))
295 		return -EOPNOTSUPP;
296 
297 	intel_priv = priv->plat->bsp_priv;
298 
299 	/* Both internal crosstimestamping and external triggered event
300 	 * timestamping cannot be run concurrently.
301 	 */
302 	if (priv->plat->ext_snapshot_en)
303 		return -EBUSY;
304 
305 	mutex_lock(&priv->aux_ts_lock);
306 	/* Enable Internal snapshot trigger */
307 	acr_value = readl(ptpaddr + PTP_ACR);
308 	acr_value &= ~PTP_ACR_MASK;
309 	switch (priv->plat->int_snapshot_num) {
310 	case AUX_SNAPSHOT0:
311 		acr_value |= PTP_ACR_ATSEN0;
312 		break;
313 	case AUX_SNAPSHOT1:
314 		acr_value |= PTP_ACR_ATSEN1;
315 		break;
316 	case AUX_SNAPSHOT2:
317 		acr_value |= PTP_ACR_ATSEN2;
318 		break;
319 	case AUX_SNAPSHOT3:
320 		acr_value |= PTP_ACR_ATSEN3;
321 		break;
322 	default:
323 		mutex_unlock(&priv->aux_ts_lock);
324 		return -EINVAL;
325 	}
326 	writel(acr_value, ptpaddr + PTP_ACR);
327 
328 	/* Clear FIFO */
329 	acr_value = readl(ptpaddr + PTP_ACR);
330 	acr_value |= PTP_ACR_ATSFC;
331 	writel(acr_value, ptpaddr + PTP_ACR);
332 	/* Release the mutex */
333 	mutex_unlock(&priv->aux_ts_lock);
334 
335 	/* Trigger Internal snapshot signal
336 	 * Create a rising edge by just toggle the GPO1 to low
337 	 * and back to high.
338 	 */
339 	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
340 	gpio_value &= ~GMAC_GPO1;
341 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
342 	gpio_value |= GMAC_GPO1;
343 	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
344 
345 	/* Poll for time sync operation done */
346 	ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
347 				 (v & GMAC_INT_TSIE), 100, 10000);
348 
349 	if (ret == -ETIMEDOUT) {
350 		pr_err("%s: Wait for time sync operation timeout\n", __func__);
351 		return ret;
352 	}
353 
354 	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
355 			GMAC_TIMESTAMP_ATSNS_MASK) >>
356 			GMAC_TIMESTAMP_ATSNS_SHIFT;
357 
358 	/* Repeat until the timestamps are from the FIFO last segment */
359 	for (i = 0; i < num_snapshot; i++) {
360 		spin_lock_irqsave(&priv->ptp_lock, flags);
361 		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
362 		*device = ns_to_ktime(ptp_time);
363 		spin_unlock_irqrestore(&priv->ptp_lock, flags);
364 		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
365 		*system = convert_art_to_tsc(art_time);
366 	}
367 
368 	system->cycles *= intel_priv->crossts_adj;
369 
370 	return 0;
371 }
372 
373 static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
374 				       int base)
375 {
376 	if (boot_cpu_has(X86_FEATURE_ART)) {
377 		unsigned int art_freq;
378 
379 		/* On systems that support ART, ART frequency can be obtained
380 		 * from ECX register of CPUID leaf (0x15).
381 		 */
382 		art_freq = cpuid_ecx(ART_CPUID_LEAF);
383 		do_div(art_freq, base);
384 		intel_priv->crossts_adj = art_freq;
385 	}
386 }
387 
388 static void common_default_data(struct plat_stmmacenet_data *plat)
389 {
390 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
391 	plat->has_gmac = 1;
392 	plat->force_sf_dma_mode = 1;
393 
394 	plat->mdio_bus_data->needs_reset = true;
395 
396 	/* Set default value for multicast hash bins */
397 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
398 
399 	/* Set default value for unicast filter entries */
400 	plat->unicast_filter_entries = 1;
401 
402 	/* Set the maxmtu to a default of JUMBO_LEN */
403 	plat->maxmtu = JUMBO_LEN;
404 
405 	/* Set default number of RX and TX queues to use */
406 	plat->tx_queues_to_use = 1;
407 	plat->rx_queues_to_use = 1;
408 
409 	/* Disable Priority config by default */
410 	plat->tx_queues_cfg[0].use_prio = false;
411 	plat->rx_queues_cfg[0].use_prio = false;
412 
413 	/* Disable RX queues routing by default */
414 	plat->rx_queues_cfg[0].pkt_route = 0x0;
415 }
416 
417 static int intel_mgbe_common_data(struct pci_dev *pdev,
418 				  struct plat_stmmacenet_data *plat)
419 {
420 	char clk_name[20];
421 	int ret;
422 	int i;
423 
424 	plat->pdev = pdev;
425 	plat->phy_addr = -1;
426 	plat->clk_csr = 5;
427 	plat->has_gmac = 0;
428 	plat->has_gmac4 = 1;
429 	plat->force_sf_dma_mode = 0;
430 	plat->tso_en = 1;
431 
432 	/* Multiplying factor to the clk_eee_i clock time
433 	 * period to make it closer to 100 ns. This value
434 	 * should be programmed such that the clk_eee_time_period *
435 	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
436 	 * clk_eee frequency is 19.2Mhz
437 	 * clk_eee_time_period is 52ns
438 	 * 52ns * (1 + 1) = 104ns
439 	 * MULT_FACT_100NS = 1
440 	 */
441 	plat->mult_fact_100ns = 1;
442 
443 	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
444 
445 	for (i = 0; i < plat->rx_queues_to_use; i++) {
446 		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
447 		plat->rx_queues_cfg[i].chan = i;
448 
449 		/* Disable Priority config by default */
450 		plat->rx_queues_cfg[i].use_prio = false;
451 
452 		/* Disable RX queues routing by default */
453 		plat->rx_queues_cfg[i].pkt_route = 0x0;
454 	}
455 
456 	for (i = 0; i < plat->tx_queues_to_use; i++) {
457 		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
458 
459 		/* Disable Priority config by default */
460 		plat->tx_queues_cfg[i].use_prio = false;
461 		/* Default TX Q0 to use TSO and rest TXQ for TBS */
462 		if (i > 0)
463 			plat->tx_queues_cfg[i].tbs_en = 1;
464 	}
465 
466 	/* FIFO size is 4096 bytes for 1 tx/rx queue */
467 	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
468 	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
469 
470 	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
471 	plat->tx_queues_cfg[0].weight = 0x09;
472 	plat->tx_queues_cfg[1].weight = 0x0A;
473 	plat->tx_queues_cfg[2].weight = 0x0B;
474 	plat->tx_queues_cfg[3].weight = 0x0C;
475 	plat->tx_queues_cfg[4].weight = 0x0D;
476 	plat->tx_queues_cfg[5].weight = 0x0E;
477 	plat->tx_queues_cfg[6].weight = 0x0F;
478 	plat->tx_queues_cfg[7].weight = 0x10;
479 
480 	plat->dma_cfg->pbl = 32;
481 	plat->dma_cfg->pblx8 = true;
482 	plat->dma_cfg->fixed_burst = 0;
483 	plat->dma_cfg->mixed_burst = 0;
484 	plat->dma_cfg->aal = 0;
485 	plat->dma_cfg->dche = true;
486 
487 	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
488 				 GFP_KERNEL);
489 	if (!plat->axi)
490 		return -ENOMEM;
491 
492 	plat->axi->axi_lpi_en = 0;
493 	plat->axi->axi_xit_frm = 0;
494 	plat->axi->axi_wr_osr_lmt = 1;
495 	plat->axi->axi_rd_osr_lmt = 1;
496 	plat->axi->axi_blen[0] = 4;
497 	plat->axi->axi_blen[1] = 8;
498 	plat->axi->axi_blen[2] = 16;
499 
500 	plat->ptp_max_adj = plat->clk_ptp_rate;
501 	plat->eee_usecs_rate = plat->clk_ptp_rate;
502 
503 	/* Set system clock */
504 	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
505 
506 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
507 						   clk_name, NULL, 0,
508 						   plat->clk_ptp_rate);
509 
510 	if (IS_ERR(plat->stmmac_clk)) {
511 		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
512 		plat->stmmac_clk = NULL;
513 	}
514 
515 	ret = clk_prepare_enable(plat->stmmac_clk);
516 	if (ret) {
517 		clk_unregister_fixed_rate(plat->stmmac_clk);
518 		return ret;
519 	}
520 
521 	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
522 
523 	/* Set default value for multicast hash bins */
524 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
525 
526 	/* Set default value for unicast filter entries */
527 	plat->unicast_filter_entries = 1;
528 
529 	/* Set the maxmtu to a default of JUMBO_LEN */
530 	plat->maxmtu = JUMBO_LEN;
531 
532 	plat->vlan_fail_q_en = true;
533 
534 	/* Use the last Rx queue */
535 	plat->vlan_fail_q = plat->rx_queues_to_use - 1;
536 
537 	/* Intel mgbe SGMII interface uses pcs-xcps */
538 	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
539 		plat->mdio_bus_data->has_xpcs = true;
540 		plat->mdio_bus_data->xpcs_an_inband = true;
541 	}
542 
543 	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
544 	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
545 	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
546 
547 	plat->int_snapshot_num = AUX_SNAPSHOT1;
548 	plat->ext_snapshot_num = AUX_SNAPSHOT0;
549 
550 	plat->has_crossts = true;
551 	plat->crosststamp = intel_crosststamp;
552 
553 	/* Setup MSI vector offset specific to Intel mGbE controller */
554 	plat->msi_mac_vec = 29;
555 	plat->msi_lpi_vec = 28;
556 	plat->msi_sfty_ce_vec = 27;
557 	plat->msi_sfty_ue_vec = 26;
558 	plat->msi_rx_base_vec = 0;
559 	plat->msi_tx_base_vec = 1;
560 
561 	return 0;
562 }
563 
564 static int ehl_common_data(struct pci_dev *pdev,
565 			   struct plat_stmmacenet_data *plat)
566 {
567 	plat->rx_queues_to_use = 8;
568 	plat->tx_queues_to_use = 8;
569 	plat->clk_ptp_rate = 200000000;
570 
571 	plat->safety_feat_cfg->tsoee = 1;
572 	plat->safety_feat_cfg->mrxpee = 1;
573 	plat->safety_feat_cfg->mestee = 1;
574 	plat->safety_feat_cfg->mrxee = 1;
575 	plat->safety_feat_cfg->mtxee = 1;
576 	plat->safety_feat_cfg->epsi = 0;
577 	plat->safety_feat_cfg->edpp = 0;
578 	plat->safety_feat_cfg->prtyen = 0;
579 	plat->safety_feat_cfg->tmouten = 0;
580 
581 	return intel_mgbe_common_data(pdev, plat);
582 }
583 
584 static int ehl_sgmii_data(struct pci_dev *pdev,
585 			  struct plat_stmmacenet_data *plat)
586 {
587 	plat->bus_id = 1;
588 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
589 
590 	plat->serdes_powerup = intel_serdes_powerup;
591 	plat->serdes_powerdown = intel_serdes_powerdown;
592 
593 	return ehl_common_data(pdev, plat);
594 }
595 
596 static struct stmmac_pci_info ehl_sgmii1g_info = {
597 	.setup = ehl_sgmii_data,
598 };
599 
600 static int ehl_rgmii_data(struct pci_dev *pdev,
601 			  struct plat_stmmacenet_data *plat)
602 {
603 	plat->bus_id = 1;
604 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
605 
606 	return ehl_common_data(pdev, plat);
607 }
608 
609 static struct stmmac_pci_info ehl_rgmii1g_info = {
610 	.setup = ehl_rgmii_data,
611 };
612 
613 static int ehl_pse0_common_data(struct pci_dev *pdev,
614 				struct plat_stmmacenet_data *plat)
615 {
616 	struct intel_priv_data *intel_priv = plat->bsp_priv;
617 
618 	intel_priv->is_pse = true;
619 	plat->bus_id = 2;
620 	plat->addr64 = 32;
621 
622 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
623 
624 	return ehl_common_data(pdev, plat);
625 }
626 
627 static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
628 				 struct plat_stmmacenet_data *plat)
629 {
630 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
631 	return ehl_pse0_common_data(pdev, plat);
632 }
633 
634 static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
635 	.setup = ehl_pse0_rgmii1g_data,
636 };
637 
638 static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
639 				 struct plat_stmmacenet_data *plat)
640 {
641 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
642 	plat->serdes_powerup = intel_serdes_powerup;
643 	plat->serdes_powerdown = intel_serdes_powerdown;
644 	return ehl_pse0_common_data(pdev, plat);
645 }
646 
647 static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
648 	.setup = ehl_pse0_sgmii1g_data,
649 };
650 
651 static int ehl_pse1_common_data(struct pci_dev *pdev,
652 				struct plat_stmmacenet_data *plat)
653 {
654 	struct intel_priv_data *intel_priv = plat->bsp_priv;
655 
656 	intel_priv->is_pse = true;
657 	plat->bus_id = 3;
658 	plat->addr64 = 32;
659 
660 	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
661 
662 	return ehl_common_data(pdev, plat);
663 }
664 
665 static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
666 				 struct plat_stmmacenet_data *plat)
667 {
668 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
669 	return ehl_pse1_common_data(pdev, plat);
670 }
671 
672 static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
673 	.setup = ehl_pse1_rgmii1g_data,
674 };
675 
676 static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
677 				 struct plat_stmmacenet_data *plat)
678 {
679 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
680 	plat->serdes_powerup = intel_serdes_powerup;
681 	plat->serdes_powerdown = intel_serdes_powerdown;
682 	return ehl_pse1_common_data(pdev, plat);
683 }
684 
685 static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
686 	.setup = ehl_pse1_sgmii1g_data,
687 };
688 
689 static int tgl_common_data(struct pci_dev *pdev,
690 			   struct plat_stmmacenet_data *plat)
691 {
692 	plat->rx_queues_to_use = 6;
693 	plat->tx_queues_to_use = 4;
694 	plat->clk_ptp_rate = 200000000;
695 
696 	plat->safety_feat_cfg->tsoee = 1;
697 	plat->safety_feat_cfg->mrxpee = 0;
698 	plat->safety_feat_cfg->mestee = 1;
699 	plat->safety_feat_cfg->mrxee = 1;
700 	plat->safety_feat_cfg->mtxee = 1;
701 	plat->safety_feat_cfg->epsi = 0;
702 	plat->safety_feat_cfg->edpp = 0;
703 	plat->safety_feat_cfg->prtyen = 0;
704 	plat->safety_feat_cfg->tmouten = 0;
705 
706 	return intel_mgbe_common_data(pdev, plat);
707 }
708 
709 static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
710 			       struct plat_stmmacenet_data *plat)
711 {
712 	plat->bus_id = 1;
713 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
714 	plat->serdes_powerup = intel_serdes_powerup;
715 	plat->serdes_powerdown = intel_serdes_powerdown;
716 	return tgl_common_data(pdev, plat);
717 }
718 
719 static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
720 	.setup = tgl_sgmii_phy0_data,
721 };
722 
723 static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
724 			       struct plat_stmmacenet_data *plat)
725 {
726 	plat->bus_id = 2;
727 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
728 	plat->serdes_powerup = intel_serdes_powerup;
729 	plat->serdes_powerdown = intel_serdes_powerdown;
730 	return tgl_common_data(pdev, plat);
731 }
732 
733 static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
734 	.setup = tgl_sgmii_phy1_data,
735 };
736 
737 static int adls_sgmii_phy0_data(struct pci_dev *pdev,
738 				struct plat_stmmacenet_data *plat)
739 {
740 	plat->bus_id = 1;
741 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
742 
743 	/* SerDes power up and power down are done in BIOS for ADL */
744 
745 	return tgl_common_data(pdev, plat);
746 }
747 
748 static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
749 	.setup = adls_sgmii_phy0_data,
750 };
751 
752 static int adls_sgmii_phy1_data(struct pci_dev *pdev,
753 				struct plat_stmmacenet_data *plat)
754 {
755 	plat->bus_id = 2;
756 	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
757 
758 	/* SerDes power up and power down are done in BIOS for ADL */
759 
760 	return tgl_common_data(pdev, plat);
761 }
762 
763 static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
764 	.setup = adls_sgmii_phy1_data,
765 };
766 static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
767 	{
768 		.func = 6,
769 		.phy_addr = 1,
770 	},
771 };
772 
773 static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
774 	.func = galileo_stmmac_func_data,
775 	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
776 };
777 
778 static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
779 	{
780 		.func = 6,
781 		.phy_addr = 1,
782 	},
783 	{
784 		.func = 7,
785 		.phy_addr = 1,
786 	},
787 };
788 
789 static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
790 	.func = iot2040_stmmac_func_data,
791 	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
792 };
793 
794 static const struct dmi_system_id quark_pci_dmi[] = {
795 	{
796 		.matches = {
797 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
798 		},
799 		.driver_data = (void *)&galileo_stmmac_dmi_data,
800 	},
801 	{
802 		.matches = {
803 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
804 		},
805 		.driver_data = (void *)&galileo_stmmac_dmi_data,
806 	},
807 	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
808 	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
809 	 * has only one pci network device while other asset tags are
810 	 * for IOT2040 which has two.
811 	 */
812 	{
813 		.matches = {
814 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
815 			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
816 					"6ES7647-0AA00-0YA2"),
817 		},
818 		.driver_data = (void *)&galileo_stmmac_dmi_data,
819 	},
820 	{
821 		.matches = {
822 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
823 		},
824 		.driver_data = (void *)&iot2040_stmmac_dmi_data,
825 	},
826 	{}
827 };
828 
829 static int quark_default_data(struct pci_dev *pdev,
830 			      struct plat_stmmacenet_data *plat)
831 {
832 	int ret;
833 
834 	/* Set common default data first */
835 	common_default_data(plat);
836 
837 	/* Refuse to load the driver and register net device if MAC controller
838 	 * does not connect to any PHY interface.
839 	 */
840 	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
841 	if (ret < 0) {
842 		/* Return error to the caller on DMI enabled boards. */
843 		if (dmi_get_system_info(DMI_BOARD_NAME))
844 			return ret;
845 
846 		/* Galileo boards with old firmware don't support DMI. We always
847 		 * use 1 here as PHY address, so at least the first found MAC
848 		 * controller would be probed.
849 		 */
850 		ret = 1;
851 	}
852 
853 	plat->bus_id = pci_dev_id(pdev);
854 	plat->phy_addr = ret;
855 	plat->phy_interface = PHY_INTERFACE_MODE_RMII;
856 
857 	plat->dma_cfg->pbl = 16;
858 	plat->dma_cfg->pblx8 = true;
859 	plat->dma_cfg->fixed_burst = 1;
860 	/* AXI (TODO) */
861 
862 	return 0;
863 }
864 
865 static const struct stmmac_pci_info quark_info = {
866 	.setup = quark_default_data,
867 };
868 
869 static int stmmac_config_single_msi(struct pci_dev *pdev,
870 				    struct plat_stmmacenet_data *plat,
871 				    struct stmmac_resources *res)
872 {
873 	int ret;
874 
875 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
876 	if (ret < 0) {
877 		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
878 			 __func__);
879 		return ret;
880 	}
881 
882 	res->irq = pci_irq_vector(pdev, 0);
883 	res->wol_irq = res->irq;
884 	plat->multi_msi_en = 0;
885 	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
886 		 __func__);
887 
888 	return 0;
889 }
890 
891 static int stmmac_config_multi_msi(struct pci_dev *pdev,
892 				   struct plat_stmmacenet_data *plat,
893 				   struct stmmac_resources *res)
894 {
895 	int ret;
896 	int i;
897 
898 	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
899 	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
900 		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
901 			 __func__);
902 		return -1;
903 	}
904 
905 	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
906 				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
907 	if (ret < 0) {
908 		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
909 			 __func__);
910 		return ret;
911 	}
912 
913 	/* For RX MSI */
914 	for (i = 0; i < plat->rx_queues_to_use; i++) {
915 		res->rx_irq[i] = pci_irq_vector(pdev,
916 						plat->msi_rx_base_vec + i * 2);
917 	}
918 
919 	/* For TX MSI */
920 	for (i = 0; i < plat->tx_queues_to_use; i++) {
921 		res->tx_irq[i] = pci_irq_vector(pdev,
922 						plat->msi_tx_base_vec + i * 2);
923 	}
924 
925 	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
926 		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
927 	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
928 		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
929 	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
930 		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
931 	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
932 		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
933 	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
934 		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
935 
936 	plat->multi_msi_en = 1;
937 	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
938 
939 	return 0;
940 }
941 
942 /**
943  * intel_eth_pci_probe
944  *
945  * @pdev: pci device pointer
946  * @id: pointer to table of device id/id's.
947  *
948  * Description: This probing function gets called for all PCI devices which
949  * match the ID table and are not "owned" by other driver yet. This function
950  * gets passed a "struct pci_dev *" for each device whose entry in the ID table
951  * matches the device. The probe functions returns zero when the driver choose
952  * to take "ownership" of the device or an error code(-ve no) otherwise.
953  */
954 static int intel_eth_pci_probe(struct pci_dev *pdev,
955 			       const struct pci_device_id *id)
956 {
957 	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
958 	struct intel_priv_data *intel_priv;
959 	struct plat_stmmacenet_data *plat;
960 	struct stmmac_resources res;
961 	int ret;
962 
963 	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
964 	if (!intel_priv)
965 		return -ENOMEM;
966 
967 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
968 	if (!plat)
969 		return -ENOMEM;
970 
971 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
972 					   sizeof(*plat->mdio_bus_data),
973 					   GFP_KERNEL);
974 	if (!plat->mdio_bus_data)
975 		return -ENOMEM;
976 
977 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
978 				     GFP_KERNEL);
979 	if (!plat->dma_cfg)
980 		return -ENOMEM;
981 
982 	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
983 					     sizeof(*plat->safety_feat_cfg),
984 					     GFP_KERNEL);
985 	if (!plat->safety_feat_cfg)
986 		return -ENOMEM;
987 
988 	/* Enable pci device */
989 	ret = pcim_enable_device(pdev);
990 	if (ret) {
991 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
992 			__func__);
993 		return ret;
994 	}
995 
996 	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
997 	if (ret)
998 		return ret;
999 
1000 	pci_set_master(pdev);
1001 
1002 	plat->bsp_priv = intel_priv;
1003 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1004 	intel_priv->crossts_adj = 1;
1005 
1006 	/* Initialize all MSI vectors to invalid so that it can be set
1007 	 * according to platform data settings below.
1008 	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
1009 	 */
1010 	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
1011 	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
1012 	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
1013 	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
1014 	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
1015 	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
1016 	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
1017 
1018 	ret = info->setup(pdev, plat);
1019 	if (ret)
1020 		return ret;
1021 
1022 	memset(&res, 0, sizeof(res));
1023 	res.addr = pcim_iomap_table(pdev)[0];
1024 
1025 	if (plat->eee_usecs_rate > 0) {
1026 		u32 tx_lpi_usec;
1027 
1028 		tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
1029 		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
1030 	}
1031 
1032 	ret = stmmac_config_multi_msi(pdev, plat, &res);
1033 	if (ret) {
1034 		ret = stmmac_config_single_msi(pdev, plat, &res);
1035 		if (ret) {
1036 			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1037 				__func__);
1038 			goto err_alloc_irq;
1039 		}
1040 	}
1041 
1042 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1043 	if (ret) {
1044 		goto err_dvr_probe;
1045 	}
1046 
1047 	return 0;
1048 
1049 err_dvr_probe:
1050 	pci_free_irq_vectors(pdev);
1051 err_alloc_irq:
1052 	clk_disable_unprepare(plat->stmmac_clk);
1053 	clk_unregister_fixed_rate(plat->stmmac_clk);
1054 	return ret;
1055 }
1056 
1057 /**
1058  * intel_eth_pci_remove
1059  *
1060  * @pdev: platform device pointer
1061  * Description: this function calls the main to free the net resources
1062  * and releases the PCI resources.
1063  */
1064 static void intel_eth_pci_remove(struct pci_dev *pdev)
1065 {
1066 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1067 	struct stmmac_priv *priv = netdev_priv(ndev);
1068 
1069 	stmmac_dvr_remove(&pdev->dev);
1070 
1071 	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1072 
1073 	pcim_iounmap_regions(pdev, BIT(0));
1074 }
1075 
1076 static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1077 {
1078 	struct pci_dev *pdev = to_pci_dev(dev);
1079 	int ret;
1080 
1081 	ret = stmmac_suspend(dev);
1082 	if (ret)
1083 		return ret;
1084 
1085 	ret = pci_save_state(pdev);
1086 	if (ret)
1087 		return ret;
1088 
1089 	pci_wake_from_d3(pdev, true);
1090 	return 0;
1091 }
1092 
1093 static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1094 {
1095 	struct pci_dev *pdev = to_pci_dev(dev);
1096 	int ret;
1097 
1098 	pci_restore_state(pdev);
1099 	pci_set_power_state(pdev, PCI_D0);
1100 
1101 	ret = pcim_enable_device(pdev);
1102 	if (ret)
1103 		return ret;
1104 
1105 	pci_set_master(pdev);
1106 
1107 	return stmmac_resume(dev);
1108 }
1109 
1110 static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1111 			 intel_eth_pci_resume);
1112 
1113 #define PCI_DEVICE_ID_INTEL_QUARK		0x0937
1114 #define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
1115 #define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
1116 #define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1117 /* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
1118  * which are named PSE0 and PSE1
1119  */
1120 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
1121 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
1122 #define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
1123 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
1124 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
1125 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
1126 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
1127 #define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
1128 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
1129 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
1130 #define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1131 
1132 static const struct pci_device_id intel_eth_pci_id_table[] = {
1133 	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1134 	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1135 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1136 	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1137 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1138 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1139 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1140 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1141 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1142 	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1143 	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1144 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1145 	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1146 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1147 	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1148 	{}
1149 };
1150 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1151 
1152 static struct pci_driver intel_eth_pci_driver = {
1153 	.name = "intel-eth-pci",
1154 	.id_table = intel_eth_pci_id_table,
1155 	.probe = intel_eth_pci_probe,
1156 	.remove = intel_eth_pci_remove,
1157 	.driver         = {
1158 		.pm     = &intel_eth_pm_ops,
1159 	},
1160 };
1161 
1162 module_pci_driver(intel_eth_pci_driver);
1163 
1164 MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1165 MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1166 MODULE_LICENSE("GPL v2");
1167