1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
5  */
6 
7 /* File aq_pci_func.c: Definition of PCI functions. */
8 
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 
12 #include "aq_main.h"
13 #include "aq_nic.h"
14 #include "aq_vec.h"
15 #include "aq_hw.h"
16 #include "aq_pci_func.h"
17 #include "hw_atl/hw_atl_a0.h"
18 #include "hw_atl/hw_atl_b0.h"
19 #include "aq_filters.h"
20 #include "aq_drvinfo.h"
21 #include "aq_macsec.h"
22 
23 static const struct pci_device_id aq_pci_tbl[] = {
24 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
25 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
26 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
27 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
28 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
29 
30 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
31 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
32 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
33 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
34 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
35 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
36 
37 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
38 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
39 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
40 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
41 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
42 	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
43 
44 	{}
45 };
46 
47 static const struct aq_board_revision_s hw_atl_boards[] = {
48 	{ AQ_DEVICE_ID_0001,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
49 	{ AQ_DEVICE_ID_D100,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
50 	{ AQ_DEVICE_ID_D107,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
51 	{ AQ_DEVICE_ID_D108,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
52 	{ AQ_DEVICE_ID_D109,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
53 
54 	{ AQ_DEVICE_ID_0001,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
55 	{ AQ_DEVICE_ID_D100,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
56 	{ AQ_DEVICE_ID_D107,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
57 	{ AQ_DEVICE_ID_D108,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
58 	{ AQ_DEVICE_ID_D109,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
59 
60 	{ AQ_DEVICE_ID_AQC100,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
61 	{ AQ_DEVICE_ID_AQC107,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
62 	{ AQ_DEVICE_ID_AQC108,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
63 	{ AQ_DEVICE_ID_AQC109,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
64 	{ AQ_DEVICE_ID_AQC111,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
65 	{ AQ_DEVICE_ID_AQC112,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
66 
67 	{ AQ_DEVICE_ID_AQC100S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
68 	{ AQ_DEVICE_ID_AQC107S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
69 	{ AQ_DEVICE_ID_AQC108S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
70 	{ AQ_DEVICE_ID_AQC109S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
71 	{ AQ_DEVICE_ID_AQC111S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
72 	{ AQ_DEVICE_ID_AQC112S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
73 };
74 
75 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
76 
77 static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
78 				     const struct aq_hw_ops **ops,
79 				     const struct aq_hw_caps_s **caps)
80 {
81 	int i;
82 
83 	if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
84 		return -EINVAL;
85 
86 	for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
87 		if (hw_atl_boards[i].devid == pdev->device &&
88 		    (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
89 		     hw_atl_boards[i].revision == pdev->revision)) {
90 			*ops = hw_atl_boards[i].ops;
91 			*caps = hw_atl_boards[i].caps;
92 			break;
93 		}
94 	}
95 
96 	if (i == ARRAY_SIZE(hw_atl_boards))
97 		return -EINVAL;
98 
99 	return 0;
100 }
101 
102 int aq_pci_func_init(struct pci_dev *pdev)
103 {
104 	int err;
105 
106 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
107 	if (!err) {
108 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
109 
110 	}
111 	if (err) {
112 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
113 		if (!err)
114 			err = pci_set_consistent_dma_mask(pdev,
115 							  DMA_BIT_MASK(32));
116 	}
117 	if (err != 0) {
118 		err = -ENOSR;
119 		goto err_exit;
120 	}
121 
122 	err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
123 	if (err < 0)
124 		goto err_exit;
125 
126 	pci_set_master(pdev);
127 
128 	return 0;
129 
130 err_exit:
131 	return err;
132 }
133 
134 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
135 			  char *name, irq_handler_t irq_handler,
136 			  void *irq_arg, cpumask_t *affinity_mask)
137 {
138 	struct pci_dev *pdev = self->pdev;
139 	int err;
140 
141 	if (pdev->msix_enabled || pdev->msi_enabled)
142 		err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
143 				  name, irq_arg);
144 	else
145 		err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
146 				  IRQF_SHARED, name, irq_arg);
147 
148 	if (err >= 0) {
149 		self->msix_entry_mask |= (1 << i);
150 
151 		if (pdev->msix_enabled && affinity_mask)
152 			irq_set_affinity_hint(pci_irq_vector(pdev, i),
153 					      affinity_mask);
154 	}
155 
156 	return err;
157 }
158 
159 void aq_pci_func_free_irqs(struct aq_nic_s *self)
160 {
161 	struct pci_dev *pdev = self->pdev;
162 	unsigned int i;
163 	void *irq_data;
164 
165 	for (i = 32U; i--;) {
166 		if (!((1U << i) & self->msix_entry_mask))
167 			continue;
168 		if (self->aq_nic_cfg.link_irq_vec &&
169 		    i == self->aq_nic_cfg.link_irq_vec)
170 			irq_data = self;
171 		else if (i < AQ_CFG_VECS_MAX)
172 			irq_data = self->aq_vec[i];
173 		else
174 			continue;
175 
176 		if (pdev->msix_enabled)
177 			irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
178 		free_irq(pci_irq_vector(pdev, i), irq_data);
179 		self->msix_entry_mask &= ~(1U << i);
180 	}
181 }
182 
183 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
184 {
185 	if (self->pdev->msix_enabled)
186 		return AQ_HW_IRQ_MSIX;
187 	if (self->pdev->msi_enabled)
188 		return AQ_HW_IRQ_MSI;
189 
190 	return AQ_HW_IRQ_LEGACY;
191 }
192 
193 static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
194 {
195 	pci_free_irq_vectors(self->pdev);
196 }
197 
198 static int aq_pci_probe(struct pci_dev *pdev,
199 			const struct pci_device_id *pci_id)
200 {
201 	struct net_device *ndev;
202 	resource_size_t mmio_pa;
203 	struct aq_nic_s *self;
204 	u32 numvecs;
205 	u32 bar;
206 	int err;
207 
208 	err = pci_enable_device(pdev);
209 	if (err)
210 		return err;
211 
212 	err = aq_pci_func_init(pdev);
213 	if (err)
214 		goto err_pci_func;
215 
216 	ndev = aq_ndev_alloc();
217 	if (!ndev) {
218 		err = -ENOMEM;
219 		goto err_ndev;
220 	}
221 
222 	self = netdev_priv(ndev);
223 	self->pdev = pdev;
224 	SET_NETDEV_DEV(ndev, &pdev->dev);
225 	pci_set_drvdata(pdev, self);
226 
227 	mutex_init(&self->fwreq_mutex);
228 
229 	err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
230 					&aq_nic_get_cfg(self)->aq_hw_caps);
231 	if (err)
232 		goto err_ioremap;
233 
234 	self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
235 	if (!self->aq_hw) {
236 		err = -ENOMEM;
237 		goto err_ioremap;
238 	}
239 	self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
240 
241 	for (bar = 0; bar < 4; ++bar) {
242 		if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
243 			resource_size_t reg_sz;
244 
245 			mmio_pa = pci_resource_start(pdev, bar);
246 			if (mmio_pa == 0U) {
247 				err = -EIO;
248 				goto err_free_aq_hw;
249 			}
250 
251 			reg_sz = pci_resource_len(pdev, bar);
252 			if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
253 				err = -EIO;
254 				goto err_free_aq_hw;
255 			}
256 
257 			self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
258 			if (!self->aq_hw->mmio) {
259 				err = -EIO;
260 				goto err_free_aq_hw;
261 			}
262 			break;
263 		}
264 	}
265 
266 	if (bar == 4) {
267 		err = -EIO;
268 		goto err_free_aq_hw;
269 	}
270 
271 	numvecs = min((u8)AQ_CFG_VECS_DEF,
272 		      aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
273 	numvecs = min(numvecs, num_online_cpus());
274 	/* Request IRQ vector for PTP */
275 	numvecs += 1;
276 
277 	numvecs += AQ_HW_SERVICE_IRQS;
278 	/*enable interrupts */
279 #if !AQ_CFG_FORCE_LEGACY_INT
280 	err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
281 				    PCI_IRQ_MSIX | PCI_IRQ_MSI |
282 				    PCI_IRQ_LEGACY);
283 
284 	if (err < 0)
285 		goto err_hwinit;
286 	numvecs = err;
287 #endif
288 	self->irqvecs = numvecs;
289 
290 	/* net device init */
291 	aq_nic_cfg_start(self);
292 
293 	aq_nic_ndev_init(self);
294 
295 	err = aq_nic_ndev_register(self);
296 	if (err < 0)
297 		goto err_register;
298 
299 	aq_drvinfo_init(ndev);
300 
301 	return 0;
302 
303 err_register:
304 	aq_nic_free_vectors(self);
305 	aq_pci_free_irq_vectors(self);
306 err_hwinit:
307 	iounmap(self->aq_hw->mmio);
308 err_free_aq_hw:
309 	kfree(self->aq_hw);
310 err_ioremap:
311 	free_netdev(ndev);
312 err_ndev:
313 	pci_release_regions(pdev);
314 err_pci_func:
315 	pci_disable_device(pdev);
316 
317 	return err;
318 }
319 
320 static void aq_pci_remove(struct pci_dev *pdev)
321 {
322 	struct aq_nic_s *self = pci_get_drvdata(pdev);
323 
324 	if (self->ndev) {
325 		aq_clear_rxnfc_all_rules(self);
326 		if (self->ndev->reg_state == NETREG_REGISTERED)
327 			unregister_netdev(self->ndev);
328 
329 #if IS_ENABLED(CONFIG_MACSEC)
330 		aq_macsec_free(self);
331 #endif
332 		aq_nic_free_vectors(self);
333 		aq_pci_free_irq_vectors(self);
334 		iounmap(self->aq_hw->mmio);
335 		kfree(self->aq_hw);
336 		pci_release_regions(pdev);
337 		free_netdev(self->ndev);
338 	}
339 
340 	pci_disable_device(pdev);
341 }
342 
343 static void aq_pci_shutdown(struct pci_dev *pdev)
344 {
345 	struct aq_nic_s *self = pci_get_drvdata(pdev);
346 
347 	aq_nic_shutdown(self);
348 
349 	pci_disable_device(pdev);
350 
351 	if (system_state == SYSTEM_POWER_OFF) {
352 		pci_wake_from_d3(pdev, false);
353 		pci_set_power_state(pdev, PCI_D3hot);
354 	}
355 }
356 
357 static int aq_suspend_common(struct device *dev, bool deep)
358 {
359 	struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
360 
361 	rtnl_lock();
362 
363 	nic->power_state = AQ_HW_POWER_STATE_D3;
364 	netif_device_detach(nic->ndev);
365 	netif_tx_stop_all_queues(nic->ndev);
366 
367 	if (netif_running(nic->ndev))
368 		aq_nic_stop(nic);
369 
370 	if (deep) {
371 		aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
372 		aq_nic_set_power(nic);
373 	}
374 
375 	rtnl_unlock();
376 
377 	return 0;
378 }
379 
380 static int atl_resume_common(struct device *dev, bool deep)
381 {
382 	struct pci_dev *pdev = to_pci_dev(dev);
383 	struct aq_nic_s *nic;
384 	int ret = 0;
385 
386 	nic = pci_get_drvdata(pdev);
387 
388 	rtnl_lock();
389 
390 	pci_set_power_state(pdev, PCI_D0);
391 	pci_restore_state(pdev);
392 
393 	if (deep) {
394 		ret = aq_nic_init(nic);
395 		if (ret)
396 			goto err_exit;
397 	}
398 
399 	if (netif_running(nic->ndev)) {
400 		ret = aq_nic_start(nic);
401 		if (ret)
402 			goto err_exit;
403 	}
404 
405 	netif_device_attach(nic->ndev);
406 	netif_tx_start_all_queues(nic->ndev);
407 
408 err_exit:
409 	rtnl_unlock();
410 
411 	return ret;
412 }
413 
414 static int aq_pm_freeze(struct device *dev)
415 {
416 	return aq_suspend_common(dev, false);
417 }
418 
419 static int aq_pm_suspend_poweroff(struct device *dev)
420 {
421 	return aq_suspend_common(dev, true);
422 }
423 
424 static int aq_pm_thaw(struct device *dev)
425 {
426 	return atl_resume_common(dev, false);
427 }
428 
429 static int aq_pm_resume_restore(struct device *dev)
430 {
431 	return atl_resume_common(dev, true);
432 }
433 
434 static const struct dev_pm_ops aq_pm_ops = {
435 	.suspend = aq_pm_suspend_poweroff,
436 	.poweroff = aq_pm_suspend_poweroff,
437 	.freeze = aq_pm_freeze,
438 	.resume = aq_pm_resume_restore,
439 	.restore = aq_pm_resume_restore,
440 	.thaw = aq_pm_thaw,
441 };
442 
443 static struct pci_driver aq_pci_ops = {
444 	.name = AQ_CFG_DRV_NAME,
445 	.id_table = aq_pci_tbl,
446 	.probe = aq_pci_probe,
447 	.remove = aq_pci_remove,
448 	.shutdown = aq_pci_shutdown,
449 #ifdef CONFIG_PM
450 	.driver.pm = &aq_pm_ops,
451 #endif
452 };
453 
454 int aq_pci_func_register_driver(void)
455 {
456 	return pci_register_driver(&aq_pci_ops);
457 }
458 
459 void aq_pci_func_unregister_driver(void)
460 {
461 	pci_unregister_driver(&aq_pci_ops);
462 }
463 
464