1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  */
9 
10 /* File aq_pci_func.c: Definition of PCI functions. */
11 
12 #include "aq_pci_func.h"
13 #include "aq_nic.h"
14 #include "aq_vec.h"
15 #include "aq_hw.h"
16 #include <linux/interrupt.h>
17 
18 struct aq_pci_func_s {
19 	struct pci_dev *pdev;
20 	struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
21 	void __iomem *mmio;
22 	void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
23 	resource_size_t mmio_pa;
24 	unsigned int msix_entry_mask;
25 	unsigned int ports;
26 	bool is_pci_enabled;
27 	bool is_regions;
28 	bool is_pci_using_dac;
29 	struct aq_hw_caps_s aq_hw_caps;
30 };
31 
32 struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
33 					struct pci_dev *pdev,
34 					const struct net_device_ops *ndev_ops,
35 					const struct ethtool_ops *eth_ops)
36 {
37 	struct aq_pci_func_s *self = NULL;
38 	int err = 0;
39 	unsigned int port = 0U;
40 
41 	if (!aq_hw_ops) {
42 		err = -EFAULT;
43 		goto err_exit;
44 	}
45 	self = kzalloc(sizeof(*self), GFP_KERNEL);
46 	if (!self) {
47 		err = -ENOMEM;
48 		goto err_exit;
49 	}
50 
51 	pci_set_drvdata(pdev, self);
52 	self->pdev = pdev;
53 
54 	err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
55 	if (err < 0)
56 		goto err_exit;
57 
58 	self->ports = self->aq_hw_caps.ports;
59 
60 	for (port = 0; port < self->ports; ++port) {
61 		struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
62 							    &pdev->dev, self,
63 							    port, aq_hw_ops);
64 
65 		if (!aq_nic) {
66 			err = -ENOMEM;
67 			goto err_exit;
68 		}
69 		self->port[port] = aq_nic;
70 	}
71 
72 err_exit:
73 	if (err < 0) {
74 		if (self)
75 			aq_pci_func_free(self);
76 		self = NULL;
77 	}
78 
79 	(void)err;
80 	return self;
81 }
82 
83 int aq_pci_func_init(struct aq_pci_func_s *self)
84 {
85 	int err = 0;
86 	unsigned int bar = 0U;
87 	unsigned int port = 0U;
88 	unsigned int numvecs = 0U;
89 
90 	err = pci_enable_device(self->pdev);
91 	if (err < 0)
92 		goto err_exit;
93 
94 	self->is_pci_enabled = true;
95 
96 	err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
97 	if (!err) {
98 		err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
99 		self->is_pci_using_dac = 1;
100 	}
101 	if (err) {
102 		err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
103 		if (!err)
104 			err = pci_set_consistent_dma_mask(self->pdev,
105 							  DMA_BIT_MASK(32));
106 		self->is_pci_using_dac = 0;
107 	}
108 	if (err != 0) {
109 		err = -ENOSR;
110 		goto err_exit;
111 	}
112 
113 	err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
114 	if (err < 0)
115 		goto err_exit;
116 
117 	self->is_regions = true;
118 
119 	pci_set_master(self->pdev);
120 
121 	for (bar = 0; bar < 4; ++bar) {
122 		if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
123 			resource_size_t reg_sz;
124 
125 			self->mmio_pa = pci_resource_start(self->pdev, bar);
126 			if (self->mmio_pa == 0U) {
127 				err = -EIO;
128 				goto err_exit;
129 			}
130 
131 			reg_sz = pci_resource_len(self->pdev, bar);
132 			if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
133 				err = -EIO;
134 				goto err_exit;
135 			}
136 
137 			self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
138 			if (!self->mmio) {
139 				err = -EIO;
140 				goto err_exit;
141 			}
142 			break;
143 		}
144 	}
145 
146 	numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
147 	numvecs = min(numvecs, num_online_cpus());
148 
149 	/* enable interrupts */
150 #if !AQ_CFG_FORCE_LEGACY_INT
151 	err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
152 
153 	if (err < 0) {
154 		err = pci_alloc_irq_vectors(self->pdev, 1, 1,
155 				PCI_IRQ_MSI | PCI_IRQ_LEGACY);
156 		if (err < 0)
157 			goto err_exit;
158 	}
159 #endif /* AQ_CFG_FORCE_LEGACY_INT */
160 
161 	/* net device init */
162 	for (port = 0; port < self->ports; ++port) {
163 		if (!self->port[port])
164 			continue;
165 
166 		err = aq_nic_cfg_start(self->port[port]);
167 		if (err < 0)
168 			goto err_exit;
169 
170 		err = aq_nic_ndev_init(self->port[port]);
171 		if (err < 0)
172 			goto err_exit;
173 
174 		err = aq_nic_ndev_register(self->port[port]);
175 		if (err < 0)
176 			goto err_exit;
177 	}
178 
179 err_exit:
180 	if (err < 0)
181 		aq_pci_func_deinit(self);
182 	return err;
183 }
184 
185 int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
186 			  char *name, void *aq_vec, cpumask_t *affinity_mask)
187 {
188 	struct pci_dev *pdev = self->pdev;
189 	int err = 0;
190 
191 	if (pdev->msix_enabled || pdev->msi_enabled)
192 		err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
193 				  name, aq_vec);
194 	else
195 		err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
196 				  IRQF_SHARED, name, aq_vec);
197 
198 	if (err >= 0) {
199 		self->msix_entry_mask |= (1 << i);
200 		self->aq_vec[i] = aq_vec;
201 
202 		if (pdev->msix_enabled)
203 			irq_set_affinity_hint(pci_irq_vector(pdev, i),
204 					      affinity_mask);
205 	}
206 
207 	return err;
208 }
209 
210 void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
211 {
212 	struct pci_dev *pdev = self->pdev;
213 	unsigned int i = 0U;
214 
215 	for (i = 32U; i--;) {
216 		if (!((1U << i) & self->msix_entry_mask))
217 			continue;
218 
219 		if (pdev->msix_enabled)
220 			irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
221 		free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
222 		self->msix_entry_mask &= ~(1U << i);
223 	}
224 }
225 
226 void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
227 {
228 	return self->mmio;
229 }
230 
231 unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
232 {
233 	if (self->pdev->msix_enabled)
234 		return AQ_HW_IRQ_MSIX;
235 	if (self->pdev->msi_enabled)
236 		return AQ_HW_IRQ_MSIX;
237 	return AQ_HW_IRQ_LEGACY;
238 }
239 
240 void aq_pci_func_deinit(struct aq_pci_func_s *self)
241 {
242 	if (!self)
243 		goto err_exit;
244 
245 	aq_pci_func_free_irqs(self);
246 	pci_free_irq_vectors(self->pdev);
247 
248 	if (self->is_regions)
249 		pci_release_regions(self->pdev);
250 
251 	if (self->is_pci_enabled)
252 		pci_disable_device(self->pdev);
253 
254 err_exit:;
255 }
256 
257 void aq_pci_func_free(struct aq_pci_func_s *self)
258 {
259 	unsigned int port = 0U;
260 
261 	if (!self)
262 		goto err_exit;
263 
264 	for (port = 0; port < self->ports; ++port) {
265 		if (!self->port[port])
266 			continue;
267 
268 		aq_nic_ndev_free(self->port[port]);
269 	}
270 
271 	if (self->mmio)
272 		iounmap(self->mmio);
273 
274 	kfree(self);
275 
276 err_exit:;
277 }
278 
279 int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
280 				pm_message_t *pm_msg)
281 {
282 	int err = 0;
283 	unsigned int port = 0U;
284 
285 	if (!self) {
286 		err = -EFAULT;
287 		goto err_exit;
288 	}
289 	for (port = 0; port < self->ports; ++port) {
290 		if (!self->port[port])
291 			continue;
292 
293 		(void)aq_nic_change_pm_state(self->port[port], pm_msg);
294 	}
295 
296 err_exit:
297 	return err;
298 }
299