xref: /openbmc/linux/lib/devres.c (revision 645f08975f49441b3e753d8dc5b740cbcb226594)
1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/err.h>
3  #include <linux/pci.h>
4  #include <linux/io.h>
5  #include <linux/gfp.h>
6  #include <linux/export.h>
7  #include <linux/of_address.h>
8  
9  enum devm_ioremap_type {
10  	DEVM_IOREMAP = 0,
11  	DEVM_IOREMAP_UC,
12  	DEVM_IOREMAP_WC,
13  };
14  
15  void devm_ioremap_release(struct device *dev, void *res)
16  {
17  	iounmap(*(void __iomem **)res);
18  }
19  
20  static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
21  {
22  	return *(void **)res == match_data;
23  }
24  
25  static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
26  				    resource_size_t size,
27  				    enum devm_ioremap_type type)
28  {
29  	void __iomem **ptr, *addr = NULL;
30  
31  	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
32  	if (!ptr)
33  		return NULL;
34  
35  	switch (type) {
36  	case DEVM_IOREMAP:
37  		addr = ioremap(offset, size);
38  		break;
39  	case DEVM_IOREMAP_UC:
40  		addr = ioremap_uc(offset, size);
41  		break;
42  	case DEVM_IOREMAP_WC:
43  		addr = ioremap_wc(offset, size);
44  		break;
45  	}
46  
47  	if (addr) {
48  		*ptr = addr;
49  		devres_add(dev, ptr);
50  	} else
51  		devres_free(ptr);
52  
53  	return addr;
54  }
55  
56  /**
57   * devm_ioremap - Managed ioremap()
58   * @dev: Generic device to remap IO address for
59   * @offset: Resource address to map
60   * @size: Size of map
61   *
62   * Managed ioremap().  Map is automatically unmapped on driver detach.
63   */
64  void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
65  			   resource_size_t size)
66  {
67  	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
68  }
69  EXPORT_SYMBOL(devm_ioremap);
70  
71  /**
72   * devm_ioremap_uc - Managed ioremap_uc()
73   * @dev: Generic device to remap IO address for
74   * @offset: Resource address to map
75   * @size: Size of map
76   *
77   * Managed ioremap_uc().  Map is automatically unmapped on driver detach.
78   */
79  void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
80  			      resource_size_t size)
81  {
82  	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
83  }
84  EXPORT_SYMBOL_GPL(devm_ioremap_uc);
85  
86  /**
87   * devm_ioremap_wc - Managed ioremap_wc()
88   * @dev: Generic device to remap IO address for
89   * @offset: Resource address to map
90   * @size: Size of map
91   *
92   * Managed ioremap_wc().  Map is automatically unmapped on driver detach.
93   */
94  void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
95  			      resource_size_t size)
96  {
97  	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
98  }
99  EXPORT_SYMBOL(devm_ioremap_wc);
100  
101  /**
102   * devm_iounmap - Managed iounmap()
103   * @dev: Generic device to unmap for
104   * @addr: Address to unmap
105   *
106   * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
107   */
108  void devm_iounmap(struct device *dev, void __iomem *addr)
109  {
110  	WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
111  			       (__force void *)addr));
112  	iounmap(addr);
113  }
114  EXPORT_SYMBOL(devm_iounmap);
115  
116  static void __iomem *
117  __devm_ioremap_resource(struct device *dev, const struct resource *res,
118  			enum devm_ioremap_type type)
119  {
120  	resource_size_t size;
121  	void __iomem *dest_ptr;
122  	char *pretty_name;
123  
124  	BUG_ON(!dev);
125  
126  	if (!res || resource_type(res) != IORESOURCE_MEM) {
127  		dev_err(dev, "invalid resource\n");
128  		return IOMEM_ERR_PTR(-EINVAL);
129  	}
130  
131  	size = resource_size(res);
132  
133  	if (res->name)
134  		pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
135  					     dev_name(dev), res->name);
136  	else
137  		pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
138  	if (!pretty_name)
139  		return IOMEM_ERR_PTR(-ENOMEM);
140  
141  	if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
142  		dev_err(dev, "can't request region for resource %pR\n", res);
143  		return IOMEM_ERR_PTR(-EBUSY);
144  	}
145  
146  	dest_ptr = __devm_ioremap(dev, res->start, size, type);
147  	if (!dest_ptr) {
148  		dev_err(dev, "ioremap failed for resource %pR\n", res);
149  		devm_release_mem_region(dev, res->start, size);
150  		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
151  	}
152  
153  	return dest_ptr;
154  }
155  
156  /**
157   * devm_ioremap_resource() - check, request region, and ioremap resource
158   * @dev: generic device to handle the resource for
159   * @res: resource to be handled
160   *
161   * Checks that a resource is a valid memory region, requests the memory
162   * region and ioremaps it. All operations are managed and will be undone
163   * on driver detach.
164   *
165   * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
166   * on failure. Usage example:
167   *
168   *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
169   *	base = devm_ioremap_resource(&pdev->dev, res);
170   *	if (IS_ERR(base))
171   *		return PTR_ERR(base);
172   */
173  void __iomem *devm_ioremap_resource(struct device *dev,
174  				    const struct resource *res)
175  {
176  	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
177  }
178  EXPORT_SYMBOL(devm_ioremap_resource);
179  
180  /**
181   * devm_ioremap_resource_wc() - write-combined variant of
182   *				devm_ioremap_resource()
183   * @dev: generic device to handle the resource for
184   * @res: resource to be handled
185   *
186   * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
187   * on failure. Usage example:
188   */
189  void __iomem *devm_ioremap_resource_wc(struct device *dev,
190  				       const struct resource *res)
191  {
192  	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
193  }
194  
195  /*
196   * devm_of_iomap - Requests a resource and maps the memory mapped IO
197   *		   for a given device_node managed by a given device
198   *
199   * Checks that a resource is a valid memory region, requests the memory
200   * region and ioremaps it. All operations are managed and will be undone
201   * on driver detach of the device.
202   *
203   * This is to be used when a device requests/maps resources described
204   * by other device tree nodes (children or otherwise).
205   *
206   * @dev:	The device "managing" the resource
207   * @node:       The device-tree node where the resource resides
208   * @index:	index of the MMIO range in the "reg" property
209   * @size:	Returns the size of the resource (pass NULL if not needed)
210   * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
211   * error code on failure. Usage example:
212   *
213   *	base = devm_of_iomap(&pdev->dev, node, 0, NULL);
214   *	if (IS_ERR(base))
215   *		return PTR_ERR(base);
216   *
217   * Please Note: This is not a one-to-one replacement for of_iomap() because the
218   * of_iomap() function does not track whether the region is already mapped.  If
219   * two drivers try to map the same memory, the of_iomap() function will succeed
220   * but the the devm_of_iomap() function will return -EBUSY.
221   *
222   */
223  void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
224  			    resource_size_t *size)
225  {
226  	struct resource res;
227  
228  	if (of_address_to_resource(node, index, &res))
229  		return IOMEM_ERR_PTR(-EINVAL);
230  	if (size)
231  		*size = resource_size(&res);
232  	return devm_ioremap_resource(dev, &res);
233  }
234  EXPORT_SYMBOL(devm_of_iomap);
235  
236  #ifdef CONFIG_HAS_IOPORT_MAP
237  /*
238   * Generic iomap devres
239   */
240  static void devm_ioport_map_release(struct device *dev, void *res)
241  {
242  	ioport_unmap(*(void __iomem **)res);
243  }
244  
245  static int devm_ioport_map_match(struct device *dev, void *res,
246  				 void *match_data)
247  {
248  	return *(void **)res == match_data;
249  }
250  
251  /**
252   * devm_ioport_map - Managed ioport_map()
253   * @dev: Generic device to map ioport for
254   * @port: Port to map
255   * @nr: Number of ports to map
256   *
257   * Managed ioport_map().  Map is automatically unmapped on driver
258   * detach.
259   */
260  void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
261  			       unsigned int nr)
262  {
263  	void __iomem **ptr, *addr;
264  
265  	ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
266  	if (!ptr)
267  		return NULL;
268  
269  	addr = ioport_map(port, nr);
270  	if (addr) {
271  		*ptr = addr;
272  		devres_add(dev, ptr);
273  	} else
274  		devres_free(ptr);
275  
276  	return addr;
277  }
278  EXPORT_SYMBOL(devm_ioport_map);
279  
280  /**
281   * devm_ioport_unmap - Managed ioport_unmap()
282   * @dev: Generic device to unmap for
283   * @addr: Address to unmap
284   *
285   * Managed ioport_unmap().  @addr must have been mapped using
286   * devm_ioport_map().
287   */
288  void devm_ioport_unmap(struct device *dev, void __iomem *addr)
289  {
290  	ioport_unmap(addr);
291  	WARN_ON(devres_destroy(dev, devm_ioport_map_release,
292  			       devm_ioport_map_match, (__force void *)addr));
293  }
294  EXPORT_SYMBOL(devm_ioport_unmap);
295  #endif /* CONFIG_HAS_IOPORT_MAP */
296  
297  #ifdef CONFIG_PCI
298  /*
299   * PCI iomap devres
300   */
301  #define PCIM_IOMAP_MAX	PCI_STD_NUM_BARS
302  
303  struct pcim_iomap_devres {
304  	void __iomem *table[PCIM_IOMAP_MAX];
305  };
306  
307  static void pcim_iomap_release(struct device *gendev, void *res)
308  {
309  	struct pci_dev *dev = to_pci_dev(gendev);
310  	struct pcim_iomap_devres *this = res;
311  	int i;
312  
313  	for (i = 0; i < PCIM_IOMAP_MAX; i++)
314  		if (this->table[i])
315  			pci_iounmap(dev, this->table[i]);
316  }
317  
318  /**
319   * pcim_iomap_table - access iomap allocation table
320   * @pdev: PCI device to access iomap table for
321   *
322   * Access iomap allocation table for @dev.  If iomap table doesn't
323   * exist and @pdev is managed, it will be allocated.  All iomaps
324   * recorded in the iomap table are automatically unmapped on driver
325   * detach.
326   *
327   * This function might sleep when the table is first allocated but can
328   * be safely called without context and guaranteed to succed once
329   * allocated.
330   */
331  void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
332  {
333  	struct pcim_iomap_devres *dr, *new_dr;
334  
335  	dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
336  	if (dr)
337  		return dr->table;
338  
339  	new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
340  	if (!new_dr)
341  		return NULL;
342  	dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
343  	return dr->table;
344  }
345  EXPORT_SYMBOL(pcim_iomap_table);
346  
347  /**
348   * pcim_iomap - Managed pcim_iomap()
349   * @pdev: PCI device to iomap for
350   * @bar: BAR to iomap
351   * @maxlen: Maximum length of iomap
352   *
353   * Managed pci_iomap().  Map is automatically unmapped on driver
354   * detach.
355   */
356  void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
357  {
358  	void __iomem **tbl;
359  
360  	BUG_ON(bar >= PCIM_IOMAP_MAX);
361  
362  	tbl = (void __iomem **)pcim_iomap_table(pdev);
363  	if (!tbl || tbl[bar])	/* duplicate mappings not allowed */
364  		return NULL;
365  
366  	tbl[bar] = pci_iomap(pdev, bar, maxlen);
367  	return tbl[bar];
368  }
369  EXPORT_SYMBOL(pcim_iomap);
370  
371  /**
372   * pcim_iounmap - Managed pci_iounmap()
373   * @pdev: PCI device to iounmap for
374   * @addr: Address to unmap
375   *
376   * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
377   */
378  void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
379  {
380  	void __iomem **tbl;
381  	int i;
382  
383  	pci_iounmap(pdev, addr);
384  
385  	tbl = (void __iomem **)pcim_iomap_table(pdev);
386  	BUG_ON(!tbl);
387  
388  	for (i = 0; i < PCIM_IOMAP_MAX; i++)
389  		if (tbl[i] == addr) {
390  			tbl[i] = NULL;
391  			return;
392  		}
393  	WARN_ON(1);
394  }
395  EXPORT_SYMBOL(pcim_iounmap);
396  
397  /**
398   * pcim_iomap_regions - Request and iomap PCI BARs
399   * @pdev: PCI device to map IO resources for
400   * @mask: Mask of BARs to request and iomap
401   * @name: Name used when requesting regions
402   *
403   * Request and iomap regions specified by @mask.
404   */
405  int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
406  {
407  	void __iomem * const *iomap;
408  	int i, rc;
409  
410  	iomap = pcim_iomap_table(pdev);
411  	if (!iomap)
412  		return -ENOMEM;
413  
414  	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
415  		unsigned long len;
416  
417  		if (!(mask & (1 << i)))
418  			continue;
419  
420  		rc = -EINVAL;
421  		len = pci_resource_len(pdev, i);
422  		if (!len)
423  			goto err_inval;
424  
425  		rc = pci_request_region(pdev, i, name);
426  		if (rc)
427  			goto err_inval;
428  
429  		rc = -ENOMEM;
430  		if (!pcim_iomap(pdev, i, 0))
431  			goto err_region;
432  	}
433  
434  	return 0;
435  
436   err_region:
437  	pci_release_region(pdev, i);
438   err_inval:
439  	while (--i >= 0) {
440  		if (!(mask & (1 << i)))
441  			continue;
442  		pcim_iounmap(pdev, iomap[i]);
443  		pci_release_region(pdev, i);
444  	}
445  
446  	return rc;
447  }
448  EXPORT_SYMBOL(pcim_iomap_regions);
449  
450  /**
451   * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
452   * @pdev: PCI device to map IO resources for
453   * @mask: Mask of BARs to iomap
454   * @name: Name used when requesting regions
455   *
456   * Request all PCI BARs and iomap regions specified by @mask.
457   */
458  int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
459  				   const char *name)
460  {
461  	int request_mask = ((1 << 6) - 1) & ~mask;
462  	int rc;
463  
464  	rc = pci_request_selected_regions(pdev, request_mask, name);
465  	if (rc)
466  		return rc;
467  
468  	rc = pcim_iomap_regions(pdev, mask, name);
469  	if (rc)
470  		pci_release_selected_regions(pdev, request_mask);
471  	return rc;
472  }
473  EXPORT_SYMBOL(pcim_iomap_regions_request_all);
474  
475  /**
476   * pcim_iounmap_regions - Unmap and release PCI BARs
477   * @pdev: PCI device to map IO resources for
478   * @mask: Mask of BARs to unmap and release
479   *
480   * Unmap and release regions specified by @mask.
481   */
482  void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
483  {
484  	void __iomem * const *iomap;
485  	int i;
486  
487  	iomap = pcim_iomap_table(pdev);
488  	if (!iomap)
489  		return;
490  
491  	for (i = 0; i < PCIM_IOMAP_MAX; i++) {
492  		if (!(mask & (1 << i)))
493  			continue;
494  
495  		pcim_iounmap(pdev, iomap[i]);
496  		pci_release_region(pdev, i);
497  	}
498  }
499  EXPORT_SYMBOL(pcim_iounmap_regions);
500  #endif /* CONFIG_PCI */
501