xref: /openbmc/linux/lib/devres.c (revision 875e0c31)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f4a18312SThierry Reding #include <linux/err.h>
35ea81769SAl Viro #include <linux/pci.h>
45ea81769SAl Viro #include <linux/io.h>
55a0e3ad6STejun Heo #include <linux/gfp.h>
68bc3bcc9SPaul Gortmaker #include <linux/export.h>
7d5e83827SBenjamin Herrenschmidt #include <linux/of_address.h>
85ea81769SAl Viro 
91b723413SYisheng Xie enum devm_ioremap_type {
101b723413SYisheng Xie 	DEVM_IOREMAP = 0,
11e537654bSTuowen Zhao 	DEVM_IOREMAP_UC,
121b723413SYisheng Xie 	DEVM_IOREMAP_WC,
137c566bb5SHector Martin 	DEVM_IOREMAP_NP,
141b723413SYisheng Xie };
151b723413SYisheng Xie 
devm_ioremap_release(struct device * dev,void * res)16b41e5fffSEmil Medve void devm_ioremap_release(struct device *dev, void *res)
175ea81769SAl Viro {
185ea81769SAl Viro 	iounmap(*(void __iomem **)res);
195ea81769SAl Viro }
205ea81769SAl Viro 
devm_ioremap_match(struct device * dev,void * res,void * match_data)215ea81769SAl Viro static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
225ea81769SAl Viro {
235ea81769SAl Viro 	return *(void **)res == match_data;
245ea81769SAl Viro }
255ea81769SAl Viro 
__devm_ioremap(struct device * dev,resource_size_t offset,resource_size_t size,enum devm_ioremap_type type)261b723413SYisheng Xie static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
271b723413SYisheng Xie 				    resource_size_t size,
281b723413SYisheng Xie 				    enum devm_ioremap_type type)
291b723413SYisheng Xie {
301b723413SYisheng Xie 	void __iomem **ptr, *addr = NULL;
311b723413SYisheng Xie 
3255656016SMark-PK Tsai 	ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
3355656016SMark-PK Tsai 				dev_to_node(dev));
341b723413SYisheng Xie 	if (!ptr)
351b723413SYisheng Xie 		return NULL;
361b723413SYisheng Xie 
371b723413SYisheng Xie 	switch (type) {
381b723413SYisheng Xie 	case DEVM_IOREMAP:
391b723413SYisheng Xie 		addr = ioremap(offset, size);
401b723413SYisheng Xie 		break;
41e537654bSTuowen Zhao 	case DEVM_IOREMAP_UC:
42e537654bSTuowen Zhao 		addr = ioremap_uc(offset, size);
43e537654bSTuowen Zhao 		break;
441b723413SYisheng Xie 	case DEVM_IOREMAP_WC:
451b723413SYisheng Xie 		addr = ioremap_wc(offset, size);
461b723413SYisheng Xie 		break;
477c566bb5SHector Martin 	case DEVM_IOREMAP_NP:
487c566bb5SHector Martin 		addr = ioremap_np(offset, size);
497c566bb5SHector Martin 		break;
501b723413SYisheng Xie 	}
511b723413SYisheng Xie 
521b723413SYisheng Xie 	if (addr) {
531b723413SYisheng Xie 		*ptr = addr;
541b723413SYisheng Xie 		devres_add(dev, ptr);
551b723413SYisheng Xie 	} else
561b723413SYisheng Xie 		devres_free(ptr);
571b723413SYisheng Xie 
581b723413SYisheng Xie 	return addr;
591b723413SYisheng Xie }
601b723413SYisheng Xie 
615ea81769SAl Viro /**
625ea81769SAl Viro  * devm_ioremap - Managed ioremap()
635ea81769SAl Viro  * @dev: Generic device to remap IO address for
646524754eSLorenzo Pieralisi  * @offset: Resource address to map
655ea81769SAl Viro  * @size: Size of map
665ea81769SAl Viro  *
675ea81769SAl Viro  * Managed ioremap().  Map is automatically unmapped on driver detach.
685ea81769SAl Viro  */
devm_ioremap(struct device * dev,resource_size_t offset,resource_size_t size)694f452e8aSKumar Gala void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
705559b7bcSCristian Stoica 			   resource_size_t size)
715ea81769SAl Viro {
721b723413SYisheng Xie 	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
735ea81769SAl Viro }
745ea81769SAl Viro EXPORT_SYMBOL(devm_ioremap);
755ea81769SAl Viro 
765ea81769SAl Viro /**
77e537654bSTuowen Zhao  * devm_ioremap_uc - Managed ioremap_uc()
78e537654bSTuowen Zhao  * @dev: Generic device to remap IO address for
79e537654bSTuowen Zhao  * @offset: Resource address to map
80e537654bSTuowen Zhao  * @size: Size of map
81e537654bSTuowen Zhao  *
82e537654bSTuowen Zhao  * Managed ioremap_uc().  Map is automatically unmapped on driver detach.
83e537654bSTuowen Zhao  */
devm_ioremap_uc(struct device * dev,resource_size_t offset,resource_size_t size)84e537654bSTuowen Zhao void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
85e537654bSTuowen Zhao 			      resource_size_t size)
86e537654bSTuowen Zhao {
87e537654bSTuowen Zhao 	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
88e537654bSTuowen Zhao }
89e537654bSTuowen Zhao EXPORT_SYMBOL_GPL(devm_ioremap_uc);
90e537654bSTuowen Zhao 
91e537654bSTuowen Zhao /**
9234644524SAbhilash Kesavan  * devm_ioremap_wc - Managed ioremap_wc()
9334644524SAbhilash Kesavan  * @dev: Generic device to remap IO address for
946524754eSLorenzo Pieralisi  * @offset: Resource address to map
9534644524SAbhilash Kesavan  * @size: Size of map
9634644524SAbhilash Kesavan  *
9734644524SAbhilash Kesavan  * Managed ioremap_wc().  Map is automatically unmapped on driver detach.
9834644524SAbhilash Kesavan  */
devm_ioremap_wc(struct device * dev,resource_size_t offset,resource_size_t size)9934644524SAbhilash Kesavan void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
10034644524SAbhilash Kesavan 			      resource_size_t size)
10134644524SAbhilash Kesavan {
1021b723413SYisheng Xie 	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
10334644524SAbhilash Kesavan }
10434644524SAbhilash Kesavan EXPORT_SYMBOL(devm_ioremap_wc);
10534644524SAbhilash Kesavan 
10634644524SAbhilash Kesavan /**
1075ea81769SAl Viro  * devm_iounmap - Managed iounmap()
1085ea81769SAl Viro  * @dev: Generic device to unmap for
1095ea81769SAl Viro  * @addr: Address to unmap
1105ea81769SAl Viro  *
1115ea81769SAl Viro  * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
1125ea81769SAl Viro  */
devm_iounmap(struct device * dev,void __iomem * addr)1135ea81769SAl Viro void devm_iounmap(struct device *dev, void __iomem *addr)
1145ea81769SAl Viro {
1155ea81769SAl Viro 	WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
116b104d6a5SSteven Rostedt 			       (__force void *)addr));
117ae891a1bSMaxin B John 	iounmap(addr);
1185ea81769SAl Viro }
1195ea81769SAl Viro EXPORT_SYMBOL(devm_iounmap);
1205ea81769SAl Viro 
1216e924822SBartosz Golaszewski static void __iomem *
__devm_ioremap_resource(struct device * dev,const struct resource * res,enum devm_ioremap_type type)1226e924822SBartosz Golaszewski __devm_ioremap_resource(struct device *dev, const struct resource *res,
1236e924822SBartosz Golaszewski 			enum devm_ioremap_type type)
1246e924822SBartosz Golaszewski {
1256e924822SBartosz Golaszewski 	resource_size_t size;
1266e924822SBartosz Golaszewski 	void __iomem *dest_ptr;
12735bd8c07SVladimir Oltean 	char *pretty_name;
1286e924822SBartosz Golaszewski 
1296e924822SBartosz Golaszewski 	BUG_ON(!dev);
1306e924822SBartosz Golaszewski 
1316e924822SBartosz Golaszewski 	if (!res || resource_type(res) != IORESOURCE_MEM) {
132*875e0c31SBen Dooks 		dev_err(dev, "invalid resource %pR\n", res);
1336e924822SBartosz Golaszewski 		return IOMEM_ERR_PTR(-EINVAL);
1346e924822SBartosz Golaszewski 	}
1356e924822SBartosz Golaszewski 
1367c566bb5SHector Martin 	if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
1377c566bb5SHector Martin 		type = DEVM_IOREMAP_NP;
1387c566bb5SHector Martin 
1396e924822SBartosz Golaszewski 	size = resource_size(res);
1406e924822SBartosz Golaszewski 
14135bd8c07SVladimir Oltean 	if (res->name)
14235bd8c07SVladimir Oltean 		pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
14335bd8c07SVladimir Oltean 					     dev_name(dev), res->name);
14435bd8c07SVladimir Oltean 	else
14535bd8c07SVladimir Oltean 		pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
1465c3e241fSZhen Lei 	if (!pretty_name) {
1475c3e241fSZhen Lei 		dev_err(dev, "can't generate pretty name for resource %pR\n", res);
14835bd8c07SVladimir Oltean 		return IOMEM_ERR_PTR(-ENOMEM);
1495c3e241fSZhen Lei 	}
15035bd8c07SVladimir Oltean 
15135bd8c07SVladimir Oltean 	if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
1526e924822SBartosz Golaszewski 		dev_err(dev, "can't request region for resource %pR\n", res);
1536e924822SBartosz Golaszewski 		return IOMEM_ERR_PTR(-EBUSY);
1546e924822SBartosz Golaszewski 	}
1556e924822SBartosz Golaszewski 
1566e924822SBartosz Golaszewski 	dest_ptr = __devm_ioremap(dev, res->start, size, type);
1576e924822SBartosz Golaszewski 	if (!dest_ptr) {
1586e924822SBartosz Golaszewski 		dev_err(dev, "ioremap failed for resource %pR\n", res);
1596e924822SBartosz Golaszewski 		devm_release_mem_region(dev, res->start, size);
1606e924822SBartosz Golaszewski 		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
1616e924822SBartosz Golaszewski 	}
1626e924822SBartosz Golaszewski 
1636e924822SBartosz Golaszewski 	return dest_ptr;
1646e924822SBartosz Golaszewski }
1656e924822SBartosz Golaszewski 
16672f8c0bfSWolfram Sang /**
16775096579SThierry Reding  * devm_ioremap_resource() - check, request region, and ioremap resource
16875096579SThierry Reding  * @dev: generic device to handle the resource for
16975096579SThierry Reding  * @res: resource to be handled
17075096579SThierry Reding  *
17192b19ff5SDan Williams  * Checks that a resource is a valid memory region, requests the memory
17292b19ff5SDan Williams  * region and ioremaps it. All operations are managed and will be undone
17392b19ff5SDan Williams  * on driver detach.
17475096579SThierry Reding  *
1750c7a6b91SStephen Boyd  * Usage example:
17675096579SThierry Reding  *
17775096579SThierry Reding  *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
17875096579SThierry Reding  *	base = devm_ioremap_resource(&pdev->dev, res);
17975096579SThierry Reding  *	if (IS_ERR(base))
18075096579SThierry Reding  *		return PTR_ERR(base);
1810c7a6b91SStephen Boyd  *
1820c7a6b91SStephen Boyd  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
1830c7a6b91SStephen Boyd  * on failure.
18475096579SThierry Reding  */
devm_ioremap_resource(struct device * dev,const struct resource * res)185eef778c9SArnd Bergmann void __iomem *devm_ioremap_resource(struct device *dev,
186eef778c9SArnd Bergmann 				    const struct resource *res)
18775096579SThierry Reding {
1886e924822SBartosz Golaszewski 	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
18975096579SThierry Reding }
19075096579SThierry Reding EXPORT_SYMBOL(devm_ioremap_resource);
19175096579SThierry Reding 
192b873af62SBartosz Golaszewski /**
193b873af62SBartosz Golaszewski  * devm_ioremap_resource_wc() - write-combined variant of
194b873af62SBartosz Golaszewski  *				devm_ioremap_resource()
195b873af62SBartosz Golaszewski  * @dev: generic device to handle the resource for
196b873af62SBartosz Golaszewski  * @res: resource to be handled
197b873af62SBartosz Golaszewski  *
1980c7a6b91SStephen Boyd  * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
1990c7a6b91SStephen Boyd  * on failure.
200b873af62SBartosz Golaszewski  */
devm_ioremap_resource_wc(struct device * dev,const struct resource * res)201b873af62SBartosz Golaszewski void __iomem *devm_ioremap_resource_wc(struct device *dev,
202b873af62SBartosz Golaszewski 				       const struct resource *res)
203b873af62SBartosz Golaszewski {
204b873af62SBartosz Golaszewski 	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
205b873af62SBartosz Golaszewski }
206b873af62SBartosz Golaszewski 
207d5e83827SBenjamin Herrenschmidt /*
208d5e83827SBenjamin Herrenschmidt  * devm_of_iomap - Requests a resource and maps the memory mapped IO
209d5e83827SBenjamin Herrenschmidt  *		   for a given device_node managed by a given device
210d5e83827SBenjamin Herrenschmidt  *
211d5e83827SBenjamin Herrenschmidt  * Checks that a resource is a valid memory region, requests the memory
212d5e83827SBenjamin Herrenschmidt  * region and ioremaps it. All operations are managed and will be undone
213d5e83827SBenjamin Herrenschmidt  * on driver detach of the device.
214d5e83827SBenjamin Herrenschmidt  *
215d5e83827SBenjamin Herrenschmidt  * This is to be used when a device requests/maps resources described
216d5e83827SBenjamin Herrenschmidt  * by other device tree nodes (children or otherwise).
217d5e83827SBenjamin Herrenschmidt  *
218d5e83827SBenjamin Herrenschmidt  * @dev:	The device "managing" the resource
219d5e83827SBenjamin Herrenschmidt  * @node:       The device-tree node where the resource resides
220d5e83827SBenjamin Herrenschmidt  * @index:	index of the MMIO range in the "reg" property
221d5e83827SBenjamin Herrenschmidt  * @size:	Returns the size of the resource (pass NULL if not needed)
2220c7a6b91SStephen Boyd  *
2230c7a6b91SStephen Boyd  * Usage example:
224d5e83827SBenjamin Herrenschmidt  *
225d5e83827SBenjamin Herrenschmidt  *	base = devm_of_iomap(&pdev->dev, node, 0, NULL);
226d5e83827SBenjamin Herrenschmidt  *	if (IS_ERR(base))
227d5e83827SBenjamin Herrenschmidt  *		return PTR_ERR(base);
2287ae731a8SDan Carpenter  *
2297ae731a8SDan Carpenter  * Please Note: This is not a one-to-one replacement for of_iomap() because the
2307ae731a8SDan Carpenter  * of_iomap() function does not track whether the region is already mapped.  If
2317ae731a8SDan Carpenter  * two drivers try to map the same memory, the of_iomap() function will succeed
23228d9fdf0SRandy Dunlap  * but the devm_of_iomap() function will return -EBUSY.
2337ae731a8SDan Carpenter  *
2340c7a6b91SStephen Boyd  * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
2350c7a6b91SStephen Boyd  * error code on failure.
236d5e83827SBenjamin Herrenschmidt  */
devm_of_iomap(struct device * dev,struct device_node * node,int index,resource_size_t * size)237d5e83827SBenjamin Herrenschmidt void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
238d5e83827SBenjamin Herrenschmidt 			    resource_size_t *size)
239d5e83827SBenjamin Herrenschmidt {
240d5e83827SBenjamin Herrenschmidt 	struct resource res;
241d5e83827SBenjamin Herrenschmidt 
242d5e83827SBenjamin Herrenschmidt 	if (of_address_to_resource(node, index, &res))
243d5e83827SBenjamin Herrenschmidt 		return IOMEM_ERR_PTR(-EINVAL);
244d5e83827SBenjamin Herrenschmidt 	if (size)
245d5e83827SBenjamin Herrenschmidt 		*size = resource_size(&res);
246d5e83827SBenjamin Herrenschmidt 	return devm_ioremap_resource(dev, &res);
247d5e83827SBenjamin Herrenschmidt }
248d5e83827SBenjamin Herrenschmidt EXPORT_SYMBOL(devm_of_iomap);
249d5e83827SBenjamin Herrenschmidt 
250ce816fa8SUwe Kleine-König #ifdef CONFIG_HAS_IOPORT_MAP
2515ea81769SAl Viro /*
2525ea81769SAl Viro  * Generic iomap devres
2535ea81769SAl Viro  */
devm_ioport_map_release(struct device * dev,void * res)2545ea81769SAl Viro static void devm_ioport_map_release(struct device *dev, void *res)
2555ea81769SAl Viro {
2565ea81769SAl Viro 	ioport_unmap(*(void __iomem **)res);
2575ea81769SAl Viro }
2585ea81769SAl Viro 
devm_ioport_map_match(struct device * dev,void * res,void * match_data)2595ea81769SAl Viro static int devm_ioport_map_match(struct device *dev, void *res,
2605ea81769SAl Viro 				 void *match_data)
2615ea81769SAl Viro {
2625ea81769SAl Viro 	return *(void **)res == match_data;
2635ea81769SAl Viro }
2645ea81769SAl Viro 
2655ea81769SAl Viro /**
2665ea81769SAl Viro  * devm_ioport_map - Managed ioport_map()
2675ea81769SAl Viro  * @dev: Generic device to map ioport for
2685ea81769SAl Viro  * @port: Port to map
2695ea81769SAl Viro  * @nr: Number of ports to map
2705ea81769SAl Viro  *
2715ea81769SAl Viro  * Managed ioport_map().  Map is automatically unmapped on driver
2725ea81769SAl Viro  * detach.
2730c7a6b91SStephen Boyd  *
2740c7a6b91SStephen Boyd  * Return: a pointer to the remapped memory or NULL on failure.
2755ea81769SAl Viro  */
devm_ioport_map(struct device * dev,unsigned long port,unsigned int nr)2765ea81769SAl Viro void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
2775ea81769SAl Viro 			       unsigned int nr)
2785ea81769SAl Viro {
2795ea81769SAl Viro 	void __iomem **ptr, *addr;
2805ea81769SAl Viro 
28155656016SMark-PK Tsai 	ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
28255656016SMark-PK Tsai 				dev_to_node(dev));
2835ea81769SAl Viro 	if (!ptr)
2845ea81769SAl Viro 		return NULL;
2855ea81769SAl Viro 
2865ea81769SAl Viro 	addr = ioport_map(port, nr);
2875ea81769SAl Viro 	if (addr) {
2885ea81769SAl Viro 		*ptr = addr;
2895ea81769SAl Viro 		devres_add(dev, ptr);
2905ea81769SAl Viro 	} else
2915ea81769SAl Viro 		devres_free(ptr);
2925ea81769SAl Viro 
2935ea81769SAl Viro 	return addr;
2945ea81769SAl Viro }
2955ea81769SAl Viro EXPORT_SYMBOL(devm_ioport_map);
2965ea81769SAl Viro 
2975ea81769SAl Viro /**
2985ea81769SAl Viro  * devm_ioport_unmap - Managed ioport_unmap()
2995ea81769SAl Viro  * @dev: Generic device to unmap for
3005ea81769SAl Viro  * @addr: Address to unmap
3015ea81769SAl Viro  *
3025ea81769SAl Viro  * Managed ioport_unmap().  @addr must have been mapped using
3035ea81769SAl Viro  * devm_ioport_map().
3045ea81769SAl Viro  */
devm_ioport_unmap(struct device * dev,void __iomem * addr)3055ea81769SAl Viro void devm_ioport_unmap(struct device *dev, void __iomem *addr)
3065ea81769SAl Viro {
3075ea81769SAl Viro 	ioport_unmap(addr);
3085ea81769SAl Viro 	WARN_ON(devres_destroy(dev, devm_ioport_map_release,
309b104d6a5SSteven Rostedt 			       devm_ioport_map_match, (__force void *)addr));
3105ea81769SAl Viro }
3115ea81769SAl Viro EXPORT_SYMBOL(devm_ioport_unmap);
312ce816fa8SUwe Kleine-König #endif /* CONFIG_HAS_IOPORT_MAP */
3135ea81769SAl Viro 
3145ea81769SAl Viro #ifdef CONFIG_PCI
3155ea81769SAl Viro /*
3165ea81769SAl Viro  * PCI iomap devres
3175ea81769SAl Viro  */
318c9c13ba4SDenis Efremov #define PCIM_IOMAP_MAX	PCI_STD_NUM_BARS
3195ea81769SAl Viro 
3205ea81769SAl Viro struct pcim_iomap_devres {
3215ea81769SAl Viro 	void __iomem *table[PCIM_IOMAP_MAX];
3225ea81769SAl Viro };
3235ea81769SAl Viro 
pcim_iomap_release(struct device * gendev,void * res)3245ea81769SAl Viro static void pcim_iomap_release(struct device *gendev, void *res)
3255ea81769SAl Viro {
32620af74efSGeliang Tang 	struct pci_dev *dev = to_pci_dev(gendev);
3275ea81769SAl Viro 	struct pcim_iomap_devres *this = res;
3285ea81769SAl Viro 	int i;
3295ea81769SAl Viro 
3305ea81769SAl Viro 	for (i = 0; i < PCIM_IOMAP_MAX; i++)
3315ea81769SAl Viro 		if (this->table[i])
3325ea81769SAl Viro 			pci_iounmap(dev, this->table[i]);
3335ea81769SAl Viro }
3345ea81769SAl Viro 
3355ea81769SAl Viro /**
3365ea81769SAl Viro  * pcim_iomap_table - access iomap allocation table
3375ea81769SAl Viro  * @pdev: PCI device to access iomap table for
3385ea81769SAl Viro  *
3395ea81769SAl Viro  * Access iomap allocation table for @dev.  If iomap table doesn't
3405ea81769SAl Viro  * exist and @pdev is managed, it will be allocated.  All iomaps
3415ea81769SAl Viro  * recorded in the iomap table are automatically unmapped on driver
3425ea81769SAl Viro  * detach.
3435ea81769SAl Viro  *
3445ea81769SAl Viro  * This function might sleep when the table is first allocated but can
3459dbbc3b9SZhen Lei  * be safely called without context and guaranteed to succeed once
3465ea81769SAl Viro  * allocated.
3475ea81769SAl Viro  */
pcim_iomap_table(struct pci_dev * pdev)3485ea81769SAl Viro void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
3495ea81769SAl Viro {
3505ea81769SAl Viro 	struct pcim_iomap_devres *dr, *new_dr;
3515ea81769SAl Viro 
3525ea81769SAl Viro 	dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
3535ea81769SAl Viro 	if (dr)
3545ea81769SAl Viro 		return dr->table;
3555ea81769SAl Viro 
35655656016SMark-PK Tsai 	new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
35755656016SMark-PK Tsai 				   dev_to_node(&pdev->dev));
3585ea81769SAl Viro 	if (!new_dr)
3595ea81769SAl Viro 		return NULL;
3605ea81769SAl Viro 	dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
3615ea81769SAl Viro 	return dr->table;
3625ea81769SAl Viro }
3635ea81769SAl Viro EXPORT_SYMBOL(pcim_iomap_table);
3645ea81769SAl Viro 
3655ea81769SAl Viro /**
3665ea81769SAl Viro  * pcim_iomap - Managed pcim_iomap()
3675ea81769SAl Viro  * @pdev: PCI device to iomap for
3685ea81769SAl Viro  * @bar: BAR to iomap
3695ea81769SAl Viro  * @maxlen: Maximum length of iomap
3705ea81769SAl Viro  *
3715ea81769SAl Viro  * Managed pci_iomap().  Map is automatically unmapped on driver
3725ea81769SAl Viro  * detach.
3735ea81769SAl Viro  */
pcim_iomap(struct pci_dev * pdev,int bar,unsigned long maxlen)3745ea81769SAl Viro void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
3755ea81769SAl Viro {
3765ea81769SAl Viro 	void __iomem **tbl;
3775ea81769SAl Viro 
3785ea81769SAl Viro 	BUG_ON(bar >= PCIM_IOMAP_MAX);
3795ea81769SAl Viro 
3805ea81769SAl Viro 	tbl = (void __iomem **)pcim_iomap_table(pdev);
3815ea81769SAl Viro 	if (!tbl || tbl[bar])	/* duplicate mappings not allowed */
3825ea81769SAl Viro 		return NULL;
3835ea81769SAl Viro 
3845ea81769SAl Viro 	tbl[bar] = pci_iomap(pdev, bar, maxlen);
3855ea81769SAl Viro 	return tbl[bar];
3865ea81769SAl Viro }
3875ea81769SAl Viro EXPORT_SYMBOL(pcim_iomap);
3885ea81769SAl Viro 
3895ea81769SAl Viro /**
3905ea81769SAl Viro  * pcim_iounmap - Managed pci_iounmap()
3915ea81769SAl Viro  * @pdev: PCI device to iounmap for
3925ea81769SAl Viro  * @addr: Address to unmap
3935ea81769SAl Viro  *
3945ea81769SAl Viro  * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
3955ea81769SAl Viro  */
pcim_iounmap(struct pci_dev * pdev,void __iomem * addr)3965ea81769SAl Viro void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
3975ea81769SAl Viro {
3985ea81769SAl Viro 	void __iomem **tbl;
3995ea81769SAl Viro 	int i;
4005ea81769SAl Viro 
4015ea81769SAl Viro 	pci_iounmap(pdev, addr);
4025ea81769SAl Viro 
4035ea81769SAl Viro 	tbl = (void __iomem **)pcim_iomap_table(pdev);
4045ea81769SAl Viro 	BUG_ON(!tbl);
4055ea81769SAl Viro 
4065ea81769SAl Viro 	for (i = 0; i < PCIM_IOMAP_MAX; i++)
4075ea81769SAl Viro 		if (tbl[i] == addr) {
4085ea81769SAl Viro 			tbl[i] = NULL;
4095ea81769SAl Viro 			return;
4105ea81769SAl Viro 		}
4115ea81769SAl Viro 	WARN_ON(1);
4125ea81769SAl Viro }
4135ea81769SAl Viro EXPORT_SYMBOL(pcim_iounmap);
4145ea81769SAl Viro 
4155ea81769SAl Viro /**
4165ea81769SAl Viro  * pcim_iomap_regions - Request and iomap PCI BARs
4175ea81769SAl Viro  * @pdev: PCI device to map IO resources for
4185ea81769SAl Viro  * @mask: Mask of BARs to request and iomap
4195ea81769SAl Viro  * @name: Name used when requesting regions
4205ea81769SAl Viro  *
4215ea81769SAl Viro  * Request and iomap regions specified by @mask.
4225ea81769SAl Viro  */
pcim_iomap_regions(struct pci_dev * pdev,int mask,const char * name)423fb7ebfe4SYinghai Lu int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
4245ea81769SAl Viro {
4255ea81769SAl Viro 	void __iomem * const *iomap;
4265ea81769SAl Viro 	int i, rc;
4275ea81769SAl Viro 
4285ea81769SAl Viro 	iomap = pcim_iomap_table(pdev);
4295ea81769SAl Viro 	if (!iomap)
4305ea81769SAl Viro 		return -ENOMEM;
4315ea81769SAl Viro 
4325ea81769SAl Viro 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4335ea81769SAl Viro 		unsigned long len;
4345ea81769SAl Viro 
4355ea81769SAl Viro 		if (!(mask & (1 << i)))
4365ea81769SAl Viro 			continue;
4375ea81769SAl Viro 
4385ea81769SAl Viro 		rc = -EINVAL;
4395ea81769SAl Viro 		len = pci_resource_len(pdev, i);
4405ea81769SAl Viro 		if (!len)
4415ea81769SAl Viro 			goto err_inval;
4425ea81769SAl Viro 
4435ea81769SAl Viro 		rc = pci_request_region(pdev, i, name);
4445ea81769SAl Viro 		if (rc)
445fb4d64e7SFrederik Deweerdt 			goto err_inval;
4465ea81769SAl Viro 
4475ea81769SAl Viro 		rc = -ENOMEM;
4485ea81769SAl Viro 		if (!pcim_iomap(pdev, i, 0))
449fb4d64e7SFrederik Deweerdt 			goto err_region;
4505ea81769SAl Viro 	}
4515ea81769SAl Viro 
4525ea81769SAl Viro 	return 0;
4535ea81769SAl Viro 
4545ea81769SAl Viro  err_region:
4555ea81769SAl Viro 	pci_release_region(pdev, i);
4565ea81769SAl Viro  err_inval:
4575ea81769SAl Viro 	while (--i >= 0) {
458fb4d64e7SFrederik Deweerdt 		if (!(mask & (1 << i)))
459fb4d64e7SFrederik Deweerdt 			continue;
4605ea81769SAl Viro 		pcim_iounmap(pdev, iomap[i]);
4615ea81769SAl Viro 		pci_release_region(pdev, i);
4625ea81769SAl Viro 	}
4635ea81769SAl Viro 
4645ea81769SAl Viro 	return rc;
4655ea81769SAl Viro }
4665ea81769SAl Viro EXPORT_SYMBOL(pcim_iomap_regions);
467ec04b075STejun Heo 
468ec04b075STejun Heo /**
469916fbfb7STejun Heo  * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
470916fbfb7STejun Heo  * @pdev: PCI device to map IO resources for
471916fbfb7STejun Heo  * @mask: Mask of BARs to iomap
472916fbfb7STejun Heo  * @name: Name used when requesting regions
473916fbfb7STejun Heo  *
474916fbfb7STejun Heo  * Request all PCI BARs and iomap regions specified by @mask.
475916fbfb7STejun Heo  */
pcim_iomap_regions_request_all(struct pci_dev * pdev,int mask,const char * name)476fb7ebfe4SYinghai Lu int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
477916fbfb7STejun Heo 				   const char *name)
478916fbfb7STejun Heo {
479916fbfb7STejun Heo 	int request_mask = ((1 << 6) - 1) & ~mask;
480916fbfb7STejun Heo 	int rc;
481916fbfb7STejun Heo 
482916fbfb7STejun Heo 	rc = pci_request_selected_regions(pdev, request_mask, name);
483916fbfb7STejun Heo 	if (rc)
484916fbfb7STejun Heo 		return rc;
485916fbfb7STejun Heo 
486916fbfb7STejun Heo 	rc = pcim_iomap_regions(pdev, mask, name);
487916fbfb7STejun Heo 	if (rc)
488916fbfb7STejun Heo 		pci_release_selected_regions(pdev, request_mask);
489916fbfb7STejun Heo 	return rc;
490916fbfb7STejun Heo }
491916fbfb7STejun Heo EXPORT_SYMBOL(pcim_iomap_regions_request_all);
492916fbfb7STejun Heo 
493916fbfb7STejun Heo /**
494ec04b075STejun Heo  * pcim_iounmap_regions - Unmap and release PCI BARs
495ec04b075STejun Heo  * @pdev: PCI device to map IO resources for
496ec04b075STejun Heo  * @mask: Mask of BARs to unmap and release
497ec04b075STejun Heo  *
4984d45ada3SKulikov Vasiliy  * Unmap and release regions specified by @mask.
499ec04b075STejun Heo  */
pcim_iounmap_regions(struct pci_dev * pdev,int mask)500fb7ebfe4SYinghai Lu void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
501ec04b075STejun Heo {
502ec04b075STejun Heo 	void __iomem * const *iomap;
503ec04b075STejun Heo 	int i;
504ec04b075STejun Heo 
505ec04b075STejun Heo 	iomap = pcim_iomap_table(pdev);
506ec04b075STejun Heo 	if (!iomap)
507ec04b075STejun Heo 		return;
508ec04b075STejun Heo 
5091f35d04aSDan Carpenter 	for (i = 0; i < PCIM_IOMAP_MAX; i++) {
510ec04b075STejun Heo 		if (!(mask & (1 << i)))
511ec04b075STejun Heo 			continue;
512ec04b075STejun Heo 
513ec04b075STejun Heo 		pcim_iounmap(pdev, iomap[i]);
514ec04b075STejun Heo 		pci_release_region(pdev, i);
515ec04b075STejun Heo 	}
516ec04b075STejun Heo }
517ec04b075STejun Heo EXPORT_SYMBOL(pcim_iounmap_regions);
518571806a9SWolfram Sang #endif /* CONFIG_PCI */
5193229b906SThomas Zimmermann 
devm_arch_phys_ac_add_release(struct device * dev,void * res)5203229b906SThomas Zimmermann static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
5213229b906SThomas Zimmermann {
5223229b906SThomas Zimmermann 	arch_phys_wc_del(*((int *)res));
5233229b906SThomas Zimmermann }
5243229b906SThomas Zimmermann 
5253229b906SThomas Zimmermann /**
5263229b906SThomas Zimmermann  * devm_arch_phys_wc_add - Managed arch_phys_wc_add()
5273229b906SThomas Zimmermann  * @dev: Managed device
5283229b906SThomas Zimmermann  * @base: Memory base address
5293229b906SThomas Zimmermann  * @size: Size of memory range
5303229b906SThomas Zimmermann  *
5313229b906SThomas Zimmermann  * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
5323229b906SThomas Zimmermann  * See arch_phys_wc_add() for more information.
5333229b906SThomas Zimmermann  */
devm_arch_phys_wc_add(struct device * dev,unsigned long base,unsigned long size)5343229b906SThomas Zimmermann int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
5353229b906SThomas Zimmermann {
5363229b906SThomas Zimmermann 	int *mtrr;
5373229b906SThomas Zimmermann 	int ret;
5383229b906SThomas Zimmermann 
53955656016SMark-PK Tsai 	mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
54055656016SMark-PK Tsai 				 dev_to_node(dev));
5413229b906SThomas Zimmermann 	if (!mtrr)
5423229b906SThomas Zimmermann 		return -ENOMEM;
5433229b906SThomas Zimmermann 
5443229b906SThomas Zimmermann 	ret = arch_phys_wc_add(base, size);
5453229b906SThomas Zimmermann 	if (ret < 0) {
5463229b906SThomas Zimmermann 		devres_free(mtrr);
5473229b906SThomas Zimmermann 		return ret;
5483229b906SThomas Zimmermann 	}
5493229b906SThomas Zimmermann 
5503229b906SThomas Zimmermann 	*mtrr = ret;
5513229b906SThomas Zimmermann 	devres_add(dev, mtrr);
5523229b906SThomas Zimmermann 
5533229b906SThomas Zimmermann 	return ret;
5543229b906SThomas Zimmermann }
5553229b906SThomas Zimmermann EXPORT_SYMBOL(devm_arch_phys_wc_add);
556c8223107SThomas Zimmermann 
557c8223107SThomas Zimmermann struct arch_io_reserve_memtype_wc_devres {
558c8223107SThomas Zimmermann 	resource_size_t start;
559c8223107SThomas Zimmermann 	resource_size_t size;
560c8223107SThomas Zimmermann };
561c8223107SThomas Zimmermann 
devm_arch_io_free_memtype_wc_release(struct device * dev,void * res)562c8223107SThomas Zimmermann static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
563c8223107SThomas Zimmermann {
564c8223107SThomas Zimmermann 	const struct arch_io_reserve_memtype_wc_devres *this = res;
565c8223107SThomas Zimmermann 
566c8223107SThomas Zimmermann 	arch_io_free_memtype_wc(this->start, this->size);
567c8223107SThomas Zimmermann }
568c8223107SThomas Zimmermann 
569c8223107SThomas Zimmermann /**
570c8223107SThomas Zimmermann  * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
571c8223107SThomas Zimmermann  * @dev: Managed device
572c8223107SThomas Zimmermann  * @start: Memory base address
573c8223107SThomas Zimmermann  * @size: Size of memory range
574c8223107SThomas Zimmermann  *
575c8223107SThomas Zimmermann  * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
576c8223107SThomas Zimmermann  * and sets up a release callback See arch_io_reserve_memtype_wc() for more
577c8223107SThomas Zimmermann  * information.
578c8223107SThomas Zimmermann  */
devm_arch_io_reserve_memtype_wc(struct device * dev,resource_size_t start,resource_size_t size)579c8223107SThomas Zimmermann int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
580c8223107SThomas Zimmermann 				    resource_size_t size)
581c8223107SThomas Zimmermann {
582c8223107SThomas Zimmermann 	struct arch_io_reserve_memtype_wc_devres *dr;
583c8223107SThomas Zimmermann 	int ret;
584c8223107SThomas Zimmermann 
58555656016SMark-PK Tsai 	dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
58655656016SMark-PK Tsai 			       dev_to_node(dev));
587c8223107SThomas Zimmermann 	if (!dr)
588c8223107SThomas Zimmermann 		return -ENOMEM;
589c8223107SThomas Zimmermann 
590c8223107SThomas Zimmermann 	ret = arch_io_reserve_memtype_wc(start, size);
591c8223107SThomas Zimmermann 	if (ret < 0) {
592c8223107SThomas Zimmermann 		devres_free(dr);
593c8223107SThomas Zimmermann 		return ret;
594c8223107SThomas Zimmermann 	}
595c8223107SThomas Zimmermann 
596c8223107SThomas Zimmermann 	dr->start = start;
597c8223107SThomas Zimmermann 	dr->size = size;
598c8223107SThomas Zimmermann 	devres_add(dev, dr);
599c8223107SThomas Zimmermann 
600c8223107SThomas Zimmermann 	return ret;
601c8223107SThomas Zimmermann }
602c8223107SThomas Zimmermann EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);
603