1 /*
2  * Copyright 2014-2016 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <asm/pci-bridge.h>
12 #include <asm/pnv-pci.h>
13 #include <asm/opal.h>
14 #include <misc/cxl.h>
15 
16 #include "pci.h"
17 
18 int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
19 {
20 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
21 	struct pnv_phb *phb = hose->private_data;
22 	struct pnv_ioda_pe *pe;
23 	int rc;
24 
25 	pe = pnv_ioda_get_pe(dev);
26 	if (!pe)
27 		return -ENODEV;
28 
29 	pe_info(pe, "Switching PHB to CXL\n");
30 
31 	rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
32 	if (rc == OPAL_UNSUPPORTED)
33 		dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
34 	else if (rc)
35 		dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
36 
37 	return rc;
38 }
39 EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
40 
41 /* Find PHB for cxl dev and allocate MSI hwirqs?
42  * Returns the absolute hardware IRQ number
43  */
44 int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
45 {
46 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
47 	struct pnv_phb *phb = hose->private_data;
48 	int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
49 
50 	if (hwirq < 0) {
51 		dev_warn(&dev->dev, "Failed to find a free MSI\n");
52 		return -ENOSPC;
53 	}
54 
55 	return phb->msi_base + hwirq;
56 }
57 EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
58 
59 void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
60 {
61 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
62 	struct pnv_phb *phb = hose->private_data;
63 
64 	msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
65 }
66 EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
67 
68 void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
69 				  struct pci_dev *dev)
70 {
71 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
72 	struct pnv_phb *phb = hose->private_data;
73 	int i, hwirq;
74 
75 	for (i = 1; i < CXL_IRQ_RANGES; i++) {
76 		if (!irqs->range[i])
77 			continue;
78 		pr_devel("cxl release irq range 0x%x: offset: 0x%lx  limit: %ld\n",
79 			 i, irqs->offset[i],
80 			 irqs->range[i]);
81 		hwirq = irqs->offset[i] - phb->msi_base;
82 		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
83 				       irqs->range[i]);
84 	}
85 }
86 EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
87 
88 int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
89 			       struct pci_dev *dev, int num)
90 {
91 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
92 	struct pnv_phb *phb = hose->private_data;
93 	int i, hwirq, try;
94 
95 	memset(irqs, 0, sizeof(struct cxl_irq_ranges));
96 
97 	/* 0 is reserved for the multiplexed PSL DSI interrupt */
98 	for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
99 		try = num;
100 		while (try) {
101 			hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
102 			if (hwirq >= 0)
103 				break;
104 			try /= 2;
105 		}
106 		if (!try)
107 			goto fail;
108 
109 		irqs->offset[i] = phb->msi_base + hwirq;
110 		irqs->range[i] = try;
111 		pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx  limit: %li\n",
112 			 i, irqs->offset[i], irqs->range[i]);
113 		num -= try;
114 	}
115 	if (num)
116 		goto fail;
117 
118 	return 0;
119 fail:
120 	pnv_cxl_release_hwirq_ranges(irqs, dev);
121 	return -ENOSPC;
122 }
123 EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
124 
125 int pnv_cxl_get_irq_count(struct pci_dev *dev)
126 {
127 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
128 	struct pnv_phb *phb = hose->private_data;
129 
130 	return phb->msi_bmp.irq_count;
131 }
132 EXPORT_SYMBOL(pnv_cxl_get_irq_count);
133 
134 int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
135 			   unsigned int virq)
136 {
137 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
138 	struct pnv_phb *phb = hose->private_data;
139 	unsigned int xive_num = hwirq - phb->msi_base;
140 	struct pnv_ioda_pe *pe;
141 	int rc;
142 
143 	if (!(pe = pnv_ioda_get_pe(dev)))
144 		return -ENODEV;
145 
146 	/* Assign XIVE to PE */
147 	rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
148 	if (rc) {
149 		pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
150 			"hwirq 0x%x XIVE 0x%x PE\n",
151 			pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
152 		return -EIO;
153 	}
154 	pnv_set_msi_irq_chip(phb, virq);
155 
156 	return 0;
157 }
158 EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
159 
160 #if IS_MODULE(CONFIG_CXL)
161 static inline int get_cxl_module(void)
162 {
163 	struct module *cxl_module;
164 
165 	mutex_lock(&module_mutex);
166 
167 	cxl_module = find_module("cxl");
168 	if (cxl_module)
169 		__module_get(cxl_module);
170 
171 	mutex_unlock(&module_mutex);
172 
173 	if (!cxl_module)
174 		return -ENODEV;
175 
176 	return 0;
177 }
178 #else
179 static inline int get_cxl_module(void) { return 0; }
180 #endif
181 
182 /*
183  * Sets flags and switches the controller ops to enable the cxl kernel api.
184  * Originally the cxl kernel API operated on a virtual PHB, but certain cards
185  * such as the Mellanox CX4 use a peer model instead and for these cards the
186  * cxl kernel api will operate on the real PHB.
187  */
188 int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
189 {
190 	struct pnv_phb *phb = hose->private_data;
191 	int rc;
192 
193 	if (!enable) {
194 		/*
195 		 * Once cxl mode is enabled on the PHB, there is currently no
196 		 * known safe method to disable it again, and trying risks a
197 		 * checkstop. If we can find a way to safely disable cxl mode
198 		 * in the future we can revisit this, but for now the only sane
199 		 * thing to do is to refuse to disable cxl mode:
200 		 */
201 		return -EPERM;
202 	}
203 
204 	/*
205 	 * Hold a reference to the cxl module since several PHB operations now
206 	 * depend on it, and it would be insane to allow it to be removed so
207 	 * long as we are in this mode (and since we can't safely disable this
208 	 * mode once enabled...).
209 	 */
210 	rc = get_cxl_module();
211 	if (rc)
212 		return rc;
213 
214 	phb->flags |= PNV_PHB_FLAG_CXL;
215 	hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
216 
217 	return 0;
218 }
219 EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
220 
221 bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
222 {
223 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
224 	struct pnv_phb *phb = hose->private_data;
225 
226 	return !!(phb->flags & PNV_PHB_FLAG_CXL);
227 }
228 EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
229 
230 struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
231 {
232 	struct pnv_phb *phb = hose->private_data;
233 
234 	return (struct cxl_afu *)phb->cxl_afu;
235 }
236 EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
237 
238 void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
239 {
240 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
241 	struct pnv_phb *phb = hose->private_data;
242 
243 	phb->cxl_afu = afu;
244 }
245 EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
246 
247 /*
248  * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
249  * by other functions on the device for memory access and interrupts. When the
250  * other functions are enabled we explicitly take a reference on the cxl
251  * function since they will use it, and allocate a default context associated
252  * with that function just like the vPHB model of the cxl kernel API.
253  */
254 bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
255 {
256 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
257 	struct pnv_phb *phb = hose->private_data;
258 	struct cxl_afu *afu = phb->cxl_afu;
259 
260 	if (!pnv_pci_enable_device_hook(dev))
261 		return false;
262 
263 
264 	/* No special handling for the cxl function, which is always PF 0 */
265 	if (PCI_FUNC(dev->devfn) == 0)
266 		return true;
267 
268 	if (!afu) {
269 		dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
270 		return false;
271 	}
272 
273 	dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
274 
275 	/* Make sure the peer AFU can't go away while this device is active */
276 	cxl_afu_get(afu);
277 
278 	return cxl_pci_associate_default_context(dev, afu);
279 }
280 
281 void pnv_cxl_disable_device(struct pci_dev *dev)
282 {
283 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
284 	struct pnv_phb *phb = hose->private_data;
285 	struct cxl_afu *afu = phb->cxl_afu;
286 
287 	/* No special handling for cxl function: */
288 	if (PCI_FUNC(dev->devfn) == 0)
289 		return;
290 
291 	cxl_pci_disable_device(dev);
292 	cxl_afu_put(afu);
293 }
294