1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe endpoint controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/of.h>
9 #include <linux/pci-epc.h>
10 #include <linux/platform_device.h>
11 #include <linux/sizes.h>
12
13 #include "pcie-cadence.h"
14
15 #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
16 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
18
cdns_pcie_get_fn_from_vfn(struct cdns_pcie * pcie,u8 fn,u8 vfn)19 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
20 {
21 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
22 u32 first_vf_offset, stride;
23
24 if (vfn == 0)
25 return fn;
26
27 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
28 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
29 fn = fn + first_vf_offset + ((vfn - 1) * stride);
30
31 return fn;
32 }
33
cdns_pcie_ep_write_header(struct pci_epc * epc,u8 fn,u8 vfn,struct pci_epf_header * hdr)34 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
35 struct pci_epf_header *hdr)
36 {
37 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
38 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
39 struct cdns_pcie *pcie = &ep->pcie;
40 u32 reg;
41
42 if (vfn > 1) {
43 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
44 return -EINVAL;
45 } else if (vfn == 1) {
46 reg = cap + PCI_SRIOV_VF_DID;
47 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
48 return 0;
49 }
50
51 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
52 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
53 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
54 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
55 hdr->subclass_code | hdr->baseclass_code << 8);
56 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
57 hdr->cache_line_size);
58 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
59 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
60
61 /*
62 * Vendor ID can only be modified from function 0, all other functions
63 * use the same vendor ID as function 0.
64 */
65 if (fn == 0) {
66 /* Update the vendor IDs. */
67 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
68 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
69
70 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
71 }
72
73 return 0;
74 }
75
cdns_pcie_ep_set_bar(struct pci_epc * epc,u8 fn,u8 vfn,struct pci_epf_bar * epf_bar)76 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
77 struct pci_epf_bar *epf_bar)
78 {
79 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
80 struct cdns_pcie_epf *epf = &ep->epf[fn];
81 struct cdns_pcie *pcie = &ep->pcie;
82 dma_addr_t bar_phys = epf_bar->phys_addr;
83 enum pci_barno bar = epf_bar->barno;
84 int flags = epf_bar->flags;
85 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
86 u64 sz;
87
88 /* BAR size is 2^(aperture + 7) */
89 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
90 /*
91 * roundup_pow_of_two() returns an unsigned long, which is not suited
92 * for 64bit values.
93 */
94 sz = 1ULL << fls64(sz - 1);
95 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
96
97 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
98 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
99 } else {
100 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
101 bool is_64bits = sz > SZ_2G;
102
103 if (is_64bits && (bar & 1))
104 return -EINVAL;
105
106 if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
107 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
108
109 if (is_64bits && is_prefetch)
110 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
111 else if (is_prefetch)
112 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
113 else if (is_64bits)
114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
115 else
116 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
117 }
118
119 addr0 = lower_32_bits(bar_phys);
120 addr1 = upper_32_bits(bar_phys);
121
122 if (vfn == 1)
123 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
124 else
125 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
126 b = (bar < BAR_4) ? bar : bar - BAR_4;
127
128 if (vfn == 0 || vfn == 1) {
129 cfg = cdns_pcie_readl(pcie, reg);
130 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
131 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
132 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
133 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
134 cdns_pcie_writel(pcie, reg, cfg);
135 }
136
137 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
138 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
139 addr0);
140 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
141 addr1);
142
143 if (vfn > 0)
144 epf = &epf->epf[vfn - 1];
145 epf->epf_bar[bar] = epf_bar;
146
147 return 0;
148 }
149
cdns_pcie_ep_clear_bar(struct pci_epc * epc,u8 fn,u8 vfn,struct pci_epf_bar * epf_bar)150 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
151 struct pci_epf_bar *epf_bar)
152 {
153 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
154 struct cdns_pcie_epf *epf = &ep->epf[fn];
155 struct cdns_pcie *pcie = &ep->pcie;
156 enum pci_barno bar = epf_bar->barno;
157 u32 reg, cfg, b, ctrl;
158
159 if (vfn == 1)
160 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
161 else
162 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
163 b = (bar < BAR_4) ? bar : bar - BAR_4;
164
165 if (vfn == 0 || vfn == 1) {
166 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
167 cfg = cdns_pcie_readl(pcie, reg);
168 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
169 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
170 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
171 cdns_pcie_writel(pcie, reg, cfg);
172 }
173
174 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
175 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
176 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
177
178 if (vfn > 0)
179 epf = &epf->epf[vfn - 1];
180 epf->epf_bar[bar] = NULL;
181 }
182
cdns_pcie_ep_map_addr(struct pci_epc * epc,u8 fn,u8 vfn,phys_addr_t addr,u64 pci_addr,size_t size)183 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
184 phys_addr_t addr, u64 pci_addr, size_t size)
185 {
186 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
187 struct cdns_pcie *pcie = &ep->pcie;
188 u32 r;
189
190 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
191 if (r >= ep->max_regions - 1) {
192 dev_err(&epc->dev, "no free outbound region\n");
193 return -EINVAL;
194 }
195
196 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
197 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
198
199 set_bit(r, &ep->ob_region_map);
200 ep->ob_addr[r] = addr;
201
202 return 0;
203 }
204
cdns_pcie_ep_unmap_addr(struct pci_epc * epc,u8 fn,u8 vfn,phys_addr_t addr)205 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
206 phys_addr_t addr)
207 {
208 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
209 struct cdns_pcie *pcie = &ep->pcie;
210 u32 r;
211
212 for (r = 0; r < ep->max_regions - 1; r++)
213 if (ep->ob_addr[r] == addr)
214 break;
215
216 if (r == ep->max_regions - 1)
217 return;
218
219 cdns_pcie_reset_outbound_region(pcie, r);
220
221 ep->ob_addr[r] = 0;
222 clear_bit(r, &ep->ob_region_map);
223 }
224
cdns_pcie_ep_set_msi(struct pci_epc * epc,u8 fn,u8 vfn,u8 mmc)225 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
226 {
227 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
228 struct cdns_pcie *pcie = &ep->pcie;
229 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
230 u16 flags;
231
232 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
233
234 /*
235 * Set the Multiple Message Capable bitfield into the Message Control
236 * register.
237 */
238 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
239 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
240 flags |= PCI_MSI_FLAGS_64BIT;
241 flags &= ~PCI_MSI_FLAGS_MASKBIT;
242 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
243
244 return 0;
245 }
246
cdns_pcie_ep_get_msi(struct pci_epc * epc,u8 fn,u8 vfn)247 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
248 {
249 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
250 struct cdns_pcie *pcie = &ep->pcie;
251 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
252 u16 flags, mme;
253
254 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
255
256 /* Validate that the MSI feature is actually enabled. */
257 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
258 if (!(flags & PCI_MSI_FLAGS_ENABLE))
259 return -EINVAL;
260
261 /*
262 * Get the Multiple Message Enable bitfield from the Message Control
263 * register.
264 */
265 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
266
267 return mme;
268 }
269
cdns_pcie_ep_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)270 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
271 {
272 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
273 struct cdns_pcie *pcie = &ep->pcie;
274 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
275 u32 val, reg;
276
277 func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
278
279 reg = cap + PCI_MSIX_FLAGS;
280 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
281 if (!(val & PCI_MSIX_FLAGS_ENABLE))
282 return -EINVAL;
283
284 val &= PCI_MSIX_FLAGS_QSIZE;
285
286 return val;
287 }
288
cdns_pcie_ep_set_msix(struct pci_epc * epc,u8 fn,u8 vfn,u16 interrupts,enum pci_barno bir,u32 offset)289 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
290 u16 interrupts, enum pci_barno bir,
291 u32 offset)
292 {
293 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
294 struct cdns_pcie *pcie = &ep->pcie;
295 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
296 u32 val, reg;
297
298 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
299
300 reg = cap + PCI_MSIX_FLAGS;
301 val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
302 val &= ~PCI_MSIX_FLAGS_QSIZE;
303 val |= interrupts;
304 cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
305
306 /* Set MSIX BAR and offset */
307 reg = cap + PCI_MSIX_TABLE;
308 val = offset | bir;
309 cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
310
311 /* Set PBA BAR and offset. BAR must match MSIX BAR */
312 reg = cap + PCI_MSIX_PBA;
313 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
314 cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
315
316 return 0;
317 }
318
cdns_pcie_ep_assert_intx(struct cdns_pcie_ep * ep,u8 fn,u8 intx,bool is_asserted)319 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
320 bool is_asserted)
321 {
322 struct cdns_pcie *pcie = &ep->pcie;
323 unsigned long flags;
324 u32 offset;
325 u16 status;
326 u8 msg_code;
327
328 intx &= 3;
329
330 /* Set the outbound region if needed. */
331 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
332 ep->irq_pci_fn != fn)) {
333 /* First region was reserved for IRQ writes. */
334 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
335 ep->irq_phys_addr);
336 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
337 ep->irq_pci_fn = fn;
338 }
339
340 if (is_asserted) {
341 ep->irq_pending |= BIT(intx);
342 msg_code = MSG_CODE_ASSERT_INTA + intx;
343 } else {
344 ep->irq_pending &= ~BIT(intx);
345 msg_code = MSG_CODE_DEASSERT_INTA + intx;
346 }
347
348 spin_lock_irqsave(&ep->lock, flags);
349 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
350 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
351 status ^= PCI_STATUS_INTERRUPT;
352 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
353 }
354 spin_unlock_irqrestore(&ep->lock, flags);
355
356 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
357 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
358 CDNS_PCIE_MSG_NO_DATA;
359 writel(0, ep->irq_cpu_addr + offset);
360 }
361
cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep * ep,u8 fn,u8 vfn,u8 intx)362 static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
363 u8 intx)
364 {
365 u16 cmd;
366
367 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
368 if (cmd & PCI_COMMAND_INTX_DISABLE)
369 return -EINVAL;
370
371 cdns_pcie_ep_assert_intx(ep, fn, intx, true);
372 /*
373 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
374 */
375 mdelay(1);
376 cdns_pcie_ep_assert_intx(ep, fn, intx, false);
377 return 0;
378 }
379
cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep * ep,u8 fn,u8 vfn,u8 interrupt_num)380 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
381 u8 interrupt_num)
382 {
383 struct cdns_pcie *pcie = &ep->pcie;
384 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
385 u16 flags, mme, data, data_mask;
386 u8 msi_count;
387 u64 pci_addr, pci_addr_mask = 0xff;
388
389 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
390
391 /* Check whether the MSI feature has been enabled by the PCI host. */
392 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
393 if (!(flags & PCI_MSI_FLAGS_ENABLE))
394 return -EINVAL;
395
396 /* Get the number of enabled MSIs */
397 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
398 msi_count = 1 << mme;
399 if (!interrupt_num || interrupt_num > msi_count)
400 return -EINVAL;
401
402 /* Compute the data value to be written. */
403 data_mask = msi_count - 1;
404 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
405 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
406
407 /* Get the PCI address where to write the data into. */
408 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
409 pci_addr <<= 32;
410 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
411 pci_addr &= GENMASK_ULL(63, 2);
412
413 /* Set the outbound region if needed. */
414 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
415 ep->irq_pci_fn != fn)) {
416 /* First region was reserved for IRQ writes. */
417 cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
418 false,
419 ep->irq_phys_addr,
420 pci_addr & ~pci_addr_mask,
421 pci_addr_mask + 1);
422 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
423 ep->irq_pci_fn = fn;
424 }
425 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
426
427 return 0;
428 }
429
cdns_pcie_ep_map_msi_irq(struct pci_epc * epc,u8 fn,u8 vfn,phys_addr_t addr,u8 interrupt_num,u32 entry_size,u32 * msi_data,u32 * msi_addr_offset)430 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
431 phys_addr_t addr, u8 interrupt_num,
432 u32 entry_size, u32 *msi_data,
433 u32 *msi_addr_offset)
434 {
435 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
436 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
437 struct cdns_pcie *pcie = &ep->pcie;
438 u64 pci_addr, pci_addr_mask = 0xff;
439 u16 flags, mme, data, data_mask;
440 u8 msi_count;
441 int ret;
442 int i;
443
444 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
445
446 /* Check whether the MSI feature has been enabled by the PCI host. */
447 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
448 if (!(flags & PCI_MSI_FLAGS_ENABLE))
449 return -EINVAL;
450
451 /* Get the number of enabled MSIs */
452 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
453 msi_count = 1 << mme;
454 if (!interrupt_num || interrupt_num > msi_count)
455 return -EINVAL;
456
457 /* Compute the data value to be written. */
458 data_mask = msi_count - 1;
459 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
460 data = data & ~data_mask;
461
462 /* Get the PCI address where to write the data into. */
463 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
464 pci_addr <<= 32;
465 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
466 pci_addr &= GENMASK_ULL(63, 2);
467
468 for (i = 0; i < interrupt_num; i++) {
469 ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
470 pci_addr & ~pci_addr_mask,
471 entry_size);
472 if (ret)
473 return ret;
474 addr = addr + entry_size;
475 }
476
477 *msi_data = data;
478 *msi_addr_offset = pci_addr & pci_addr_mask;
479
480 return 0;
481 }
482
cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep * ep,u8 fn,u8 vfn,u16 interrupt_num)483 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
484 u16 interrupt_num)
485 {
486 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
487 u32 tbl_offset, msg_data, reg;
488 struct cdns_pcie *pcie = &ep->pcie;
489 struct pci_epf_msix_tbl *msix_tbl;
490 struct cdns_pcie_epf *epf;
491 u64 pci_addr_mask = 0xff;
492 u64 msg_addr;
493 u16 flags;
494 u8 bir;
495
496 epf = &ep->epf[fn];
497 if (vfn > 0)
498 epf = &epf->epf[vfn - 1];
499
500 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
501
502 /* Check whether the MSI-X feature has been enabled by the PCI host. */
503 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
504 if (!(flags & PCI_MSIX_FLAGS_ENABLE))
505 return -EINVAL;
506
507 reg = cap + PCI_MSIX_TABLE;
508 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
509 bir = tbl_offset & PCI_MSIX_TABLE_BIR;
510 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
511
512 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
513 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
514 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
515
516 /* Set the outbound region if needed. */
517 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
518 ep->irq_pci_fn != fn) {
519 /* First region was reserved for IRQ writes. */
520 cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
521 false,
522 ep->irq_phys_addr,
523 msg_addr & ~pci_addr_mask,
524 pci_addr_mask + 1);
525 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
526 ep->irq_pci_fn = fn;
527 }
528 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
529
530 return 0;
531 }
532
cdns_pcie_ep_raise_irq(struct pci_epc * epc,u8 fn,u8 vfn,enum pci_epc_irq_type type,u16 interrupt_num)533 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
534 enum pci_epc_irq_type type,
535 u16 interrupt_num)
536 {
537 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
538 struct cdns_pcie *pcie = &ep->pcie;
539 struct device *dev = pcie->dev;
540
541 switch (type) {
542 case PCI_EPC_IRQ_LEGACY:
543 if (vfn > 0) {
544 dev_err(dev, "Cannot raise legacy interrupts for VF\n");
545 return -EINVAL;
546 }
547 return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
548
549 case PCI_EPC_IRQ_MSI:
550 return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
551
552 case PCI_EPC_IRQ_MSIX:
553 return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
554
555 default:
556 break;
557 }
558
559 return -EINVAL;
560 }
561
cdns_pcie_ep_start(struct pci_epc * epc)562 static int cdns_pcie_ep_start(struct pci_epc *epc)
563 {
564 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
565 struct cdns_pcie *pcie = &ep->pcie;
566 struct device *dev = pcie->dev;
567 int max_epfs = sizeof(epc->function_num_map) * 8;
568 int ret, value, epf;
569
570 /*
571 * BIT(0) is hardwired to 1, hence function 0 is always enabled
572 * and can't be disabled anyway.
573 */
574 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
575
576 if (ep->quirk_disable_flr) {
577 for (epf = 0; epf < max_epfs; epf++) {
578 if (!(epc->function_num_map & BIT(epf)))
579 continue;
580
581 value = cdns_pcie_ep_fn_readl(pcie, epf,
582 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
583 PCI_EXP_DEVCAP);
584 value &= ~PCI_EXP_DEVCAP_FLR;
585 cdns_pcie_ep_fn_writel(pcie, epf,
586 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
587 PCI_EXP_DEVCAP, value);
588 }
589 }
590
591 ret = cdns_pcie_start_link(pcie);
592 if (ret) {
593 dev_err(dev, "Failed to start link\n");
594 return ret;
595 }
596
597 return 0;
598 }
599
600 static const struct pci_epc_features cdns_pcie_epc_vf_features = {
601 .linkup_notifier = false,
602 .msi_capable = true,
603 .msix_capable = true,
604 .align = 65536,
605 };
606
607 static const struct pci_epc_features cdns_pcie_epc_features = {
608 .linkup_notifier = false,
609 .msi_capable = true,
610 .msix_capable = true,
611 .align = 256,
612 };
613
614 static const struct pci_epc_features*
cdns_pcie_ep_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)615 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
616 {
617 if (!vfunc_no)
618 return &cdns_pcie_epc_features;
619
620 return &cdns_pcie_epc_vf_features;
621 }
622
623 static const struct pci_epc_ops cdns_pcie_epc_ops = {
624 .write_header = cdns_pcie_ep_write_header,
625 .set_bar = cdns_pcie_ep_set_bar,
626 .clear_bar = cdns_pcie_ep_clear_bar,
627 .map_addr = cdns_pcie_ep_map_addr,
628 .unmap_addr = cdns_pcie_ep_unmap_addr,
629 .set_msi = cdns_pcie_ep_set_msi,
630 .get_msi = cdns_pcie_ep_get_msi,
631 .set_msix = cdns_pcie_ep_set_msix,
632 .get_msix = cdns_pcie_ep_get_msix,
633 .raise_irq = cdns_pcie_ep_raise_irq,
634 .map_msi_irq = cdns_pcie_ep_map_msi_irq,
635 .start = cdns_pcie_ep_start,
636 .get_features = cdns_pcie_ep_get_features,
637 };
638
639
cdns_pcie_ep_setup(struct cdns_pcie_ep * ep)640 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
641 {
642 struct device *dev = ep->pcie.dev;
643 struct platform_device *pdev = to_platform_device(dev);
644 struct device_node *np = dev->of_node;
645 struct cdns_pcie *pcie = &ep->pcie;
646 struct cdns_pcie_epf *epf;
647 struct resource *res;
648 struct pci_epc *epc;
649 int ret;
650 int i;
651
652 pcie->is_rc = false;
653
654 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
655 if (IS_ERR(pcie->reg_base)) {
656 dev_err(dev, "missing \"reg\"\n");
657 return PTR_ERR(pcie->reg_base);
658 }
659
660 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
661 if (!res) {
662 dev_err(dev, "missing \"mem\"\n");
663 return -EINVAL;
664 }
665 pcie->mem_res = res;
666
667 ep->max_regions = CDNS_PCIE_MAX_OB;
668 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
669
670 ep->ob_addr = devm_kcalloc(dev,
671 ep->max_regions, sizeof(*ep->ob_addr),
672 GFP_KERNEL);
673 if (!ep->ob_addr)
674 return -ENOMEM;
675
676 /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
677 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
678
679 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
680 if (IS_ERR(epc)) {
681 dev_err(dev, "failed to create epc device\n");
682 return PTR_ERR(epc);
683 }
684
685 epc_set_drvdata(epc, ep);
686
687 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
688 epc->max_functions = 1;
689
690 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
691 GFP_KERNEL);
692 if (!ep->epf)
693 return -ENOMEM;
694
695 epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
696 sizeof(*epc->max_vfs), GFP_KERNEL);
697 if (!epc->max_vfs)
698 return -ENOMEM;
699
700 ret = of_property_read_u8_array(np, "max-virtual-functions",
701 epc->max_vfs, epc->max_functions);
702 if (ret == 0) {
703 for (i = 0; i < epc->max_functions; i++) {
704 epf = &ep->epf[i];
705 if (epc->max_vfs[i] == 0)
706 continue;
707 epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
708 sizeof(*ep->epf), GFP_KERNEL);
709 if (!epf->epf)
710 return -ENOMEM;
711 }
712 }
713
714 ret = pci_epc_mem_init(epc, pcie->mem_res->start,
715 resource_size(pcie->mem_res), PAGE_SIZE);
716 if (ret < 0) {
717 dev_err(dev, "failed to initialize the memory space\n");
718 return ret;
719 }
720
721 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
722 SZ_128K);
723 if (!ep->irq_cpu_addr) {
724 dev_err(dev, "failed to reserve memory space for MSI\n");
725 ret = -ENOMEM;
726 goto free_epc_mem;
727 }
728 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
729 /* Reserve region 0 for IRQs */
730 set_bit(0, &ep->ob_region_map);
731
732 if (ep->quirk_detect_quiet_flag)
733 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
734
735 spin_lock_init(&ep->lock);
736
737 return 0;
738
739 free_epc_mem:
740 pci_epc_mem_exit(epc);
741
742 return ret;
743 }
744