1 /*
2  * Copyright 2014-2016 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/msi.h>
12 #include <asm/pci-bridge.h>
13 #include <asm/pnv-pci.h>
14 #include <asm/opal.h>
15 #include <misc/cxl.h>
16 
17 #include "pci.h"
18 
19 int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
20 {
21 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
22 	struct pnv_phb *phb = hose->private_data;
23 	struct pnv_ioda_pe *pe;
24 	int rc;
25 
26 	pe = pnv_ioda_get_pe(dev);
27 	if (!pe)
28 		return -ENODEV;
29 
30 	pe_info(pe, "Switching PHB to CXL\n");
31 
32 	rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
33 	if (rc == OPAL_UNSUPPORTED)
34 		dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
35 	else if (rc)
36 		dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
37 
38 	return rc;
39 }
40 EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
41 
42 /* Find PHB for cxl dev and allocate MSI hwirqs?
43  * Returns the absolute hardware IRQ number
44  */
45 int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
46 {
47 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
48 	struct pnv_phb *phb = hose->private_data;
49 	int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
50 
51 	if (hwirq < 0) {
52 		dev_warn(&dev->dev, "Failed to find a free MSI\n");
53 		return -ENOSPC;
54 	}
55 
56 	return phb->msi_base + hwirq;
57 }
58 EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
59 
60 void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
61 {
62 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
63 	struct pnv_phb *phb = hose->private_data;
64 
65 	msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
66 }
67 EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
68 
69 void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
70 				  struct pci_dev *dev)
71 {
72 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
73 	struct pnv_phb *phb = hose->private_data;
74 	int i, hwirq;
75 
76 	for (i = 1; i < CXL_IRQ_RANGES; i++) {
77 		if (!irqs->range[i])
78 			continue;
79 		pr_devel("cxl release irq range 0x%x: offset: 0x%lx  limit: %ld\n",
80 			 i, irqs->offset[i],
81 			 irqs->range[i]);
82 		hwirq = irqs->offset[i] - phb->msi_base;
83 		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
84 				       irqs->range[i]);
85 	}
86 }
87 EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
88 
89 int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
90 			       struct pci_dev *dev, int num)
91 {
92 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
93 	struct pnv_phb *phb = hose->private_data;
94 	int i, hwirq, try;
95 
96 	memset(irqs, 0, sizeof(struct cxl_irq_ranges));
97 
98 	/* 0 is reserved for the multiplexed PSL DSI interrupt */
99 	for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
100 		try = num;
101 		while (try) {
102 			hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
103 			if (hwirq >= 0)
104 				break;
105 			try /= 2;
106 		}
107 		if (!try)
108 			goto fail;
109 
110 		irqs->offset[i] = phb->msi_base + hwirq;
111 		irqs->range[i] = try;
112 		pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx  limit: %li\n",
113 			 i, irqs->offset[i], irqs->range[i]);
114 		num -= try;
115 	}
116 	if (num)
117 		goto fail;
118 
119 	return 0;
120 fail:
121 	pnv_cxl_release_hwirq_ranges(irqs, dev);
122 	return -ENOSPC;
123 }
124 EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
125 
126 int pnv_cxl_get_irq_count(struct pci_dev *dev)
127 {
128 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
129 	struct pnv_phb *phb = hose->private_data;
130 
131 	return phb->msi_bmp.irq_count;
132 }
133 EXPORT_SYMBOL(pnv_cxl_get_irq_count);
134 
135 int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
136 			   unsigned int virq)
137 {
138 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
139 	struct pnv_phb *phb = hose->private_data;
140 	unsigned int xive_num = hwirq - phb->msi_base;
141 	struct pnv_ioda_pe *pe;
142 	int rc;
143 
144 	if (!(pe = pnv_ioda_get_pe(dev)))
145 		return -ENODEV;
146 
147 	/* Assign XIVE to PE */
148 	rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
149 	if (rc) {
150 		pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
151 			"hwirq 0x%x XIVE 0x%x PE\n",
152 			pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
153 		return -EIO;
154 	}
155 	pnv_set_msi_irq_chip(phb, virq);
156 
157 	return 0;
158 }
159 EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
160 
161 #if IS_MODULE(CONFIG_CXL)
162 static inline int get_cxl_module(void)
163 {
164 	struct module *cxl_module;
165 
166 	mutex_lock(&module_mutex);
167 
168 	cxl_module = find_module("cxl");
169 	if (cxl_module)
170 		__module_get(cxl_module);
171 
172 	mutex_unlock(&module_mutex);
173 
174 	if (!cxl_module)
175 		return -ENODEV;
176 
177 	return 0;
178 }
179 #else
180 static inline int get_cxl_module(void) { return 0; }
181 #endif
182 
183 /*
184  * Sets flags and switches the controller ops to enable the cxl kernel api.
185  * Originally the cxl kernel API operated on a virtual PHB, but certain cards
186  * such as the Mellanox CX4 use a peer model instead and for these cards the
187  * cxl kernel api will operate on the real PHB.
188  */
189 int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
190 {
191 	struct pnv_phb *phb = hose->private_data;
192 	int rc;
193 
194 	if (!enable) {
195 		/*
196 		 * Once cxl mode is enabled on the PHB, there is currently no
197 		 * known safe method to disable it again, and trying risks a
198 		 * checkstop. If we can find a way to safely disable cxl mode
199 		 * in the future we can revisit this, but for now the only sane
200 		 * thing to do is to refuse to disable cxl mode:
201 		 */
202 		return -EPERM;
203 	}
204 
205 	/*
206 	 * Hold a reference to the cxl module since several PHB operations now
207 	 * depend on it, and it would be insane to allow it to be removed so
208 	 * long as we are in this mode (and since we can't safely disable this
209 	 * mode once enabled...).
210 	 */
211 	rc = get_cxl_module();
212 	if (rc)
213 		return rc;
214 
215 	phb->flags |= PNV_PHB_FLAG_CXL;
216 	hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
217 
218 	return 0;
219 }
220 EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
221 
222 bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
223 {
224 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
225 	struct pnv_phb *phb = hose->private_data;
226 
227 	return !!(phb->flags & PNV_PHB_FLAG_CXL);
228 }
229 EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
230 
231 struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
232 {
233 	struct pnv_phb *phb = hose->private_data;
234 
235 	return (struct cxl_afu *)phb->cxl_afu;
236 }
237 EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
238 
239 void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
240 {
241 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
242 	struct pnv_phb *phb = hose->private_data;
243 
244 	phb->cxl_afu = afu;
245 }
246 EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
247 
248 /*
249  * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
250  * by other functions on the device for memory access and interrupts. When the
251  * other functions are enabled we explicitly take a reference on the cxl
252  * function since they will use it, and allocate a default context associated
253  * with that function just like the vPHB model of the cxl kernel API.
254  */
255 bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
256 {
257 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
258 	struct pnv_phb *phb = hose->private_data;
259 	struct cxl_afu *afu = phb->cxl_afu;
260 
261 	if (!pnv_pci_enable_device_hook(dev))
262 		return false;
263 
264 
265 	/* No special handling for the cxl function, which is always PF 0 */
266 	if (PCI_FUNC(dev->devfn) == 0)
267 		return true;
268 
269 	if (!afu) {
270 		dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
271 		return false;
272 	}
273 
274 	dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
275 
276 	/* Make sure the peer AFU can't go away while this device is active */
277 	cxl_afu_get(afu);
278 
279 	return cxl_pci_associate_default_context(dev, afu);
280 }
281 
282 void pnv_cxl_disable_device(struct pci_dev *dev)
283 {
284 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
285 	struct pnv_phb *phb = hose->private_data;
286 	struct cxl_afu *afu = phb->cxl_afu;
287 
288 	/* No special handling for cxl function: */
289 	if (PCI_FUNC(dev->devfn) == 0)
290 		return;
291 
292 	cxl_pci_disable_device(dev);
293 	cxl_afu_put(afu);
294 }
295 
296 /*
297  * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
298  * function handles setting up the IVTE entries for the XSL to use.
299  *
300  * We are currently not filling out the MSIX table, since the only currently
301  * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
302  * is up to their driver to fill that out. In the future we may fill out the
303  * MSIX table (and change the IVTE entries to be an index to the MSIX table)
304  * for adapters implementing the Full MSI-X mode described in the CAIA.
305  */
306 int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
307 {
308 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
309 	struct pnv_phb *phb = hose->private_data;
310 	struct msi_desc *entry;
311 	struct cxl_context *ctx = NULL;
312 	unsigned int virq;
313 	int hwirq;
314 	int afu_irq = 0;
315 	int rc;
316 
317 	if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
318 		return -ENODEV;
319 
320 	if (pdev->no_64bit_msi && !phb->msi32_support)
321 		return -ENODEV;
322 
323 	rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
324 	if (rc)
325 		return rc;
326 
327 	for_each_pci_msi_entry(entry, pdev) {
328 		if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
329 			pr_warn("%s: Supports only 64-bit MSIs\n",
330 				pci_name(pdev));
331 			return -ENXIO;
332 		}
333 
334 		hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
335 		if (WARN_ON(hwirq <= 0))
336 			return (hwirq ? hwirq : -ENOMEM);
337 
338 		virq = irq_create_mapping(NULL, hwirq);
339 		if (!virq) {
340 			pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
341 				pci_name(pdev));
342 			return -ENOMEM;
343 		}
344 
345 		rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
346 		if (rc) {
347 			pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
348 			irq_dispose_mapping(virq);
349 			return rc;
350 		}
351 
352 		irq_set_msi_desc(virq, entry);
353 	}
354 
355 	return 0;
356 }
357 
358 void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
359 {
360 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
361 	struct pnv_phb *phb = hose->private_data;
362 	struct msi_desc *entry;
363 	irq_hw_number_t hwirq;
364 
365 	if (WARN_ON(!phb))
366 		return;
367 
368 	for_each_pci_msi_entry(entry, pdev) {
369 		if (!entry->irq)
370 			continue;
371 		hwirq = virq_to_hw(entry->irq);
372 		irq_set_msi_desc(entry->irq, NULL);
373 		irq_dispose_mapping(entry->irq);
374 	}
375 
376 	cxl_cx4_teardown_msi_irqs(pdev);
377 }
378