xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c (revision 5ee9cd065836e5934710ca35653bce7905add20b)
1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2135cba0dSThomas Hellstrom /**************************************************************************
3135cba0dSThomas Hellstrom  *
4dff96888SDirk Hohndel (VMware)  * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
5135cba0dSThomas Hellstrom  *
6135cba0dSThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
7135cba0dSThomas Hellstrom  * copy of this software and associated documentation files (the
8135cba0dSThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
9135cba0dSThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
10135cba0dSThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
11135cba0dSThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
12135cba0dSThomas Hellstrom  * the following conditions:
13135cba0dSThomas Hellstrom  *
14135cba0dSThomas Hellstrom  * The above copyright notice and this permission notice (including the
15135cba0dSThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
16135cba0dSThomas Hellstrom  * of the Software.
17135cba0dSThomas Hellstrom  *
18135cba0dSThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19135cba0dSThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20135cba0dSThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21135cba0dSThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22135cba0dSThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23135cba0dSThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24135cba0dSThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25135cba0dSThomas Hellstrom  *
26135cba0dSThomas Hellstrom  **************************************************************************/
27135cba0dSThomas Hellstrom /*
28135cba0dSThomas Hellstrom  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29135cba0dSThomas Hellstrom  */
30135cba0dSThomas Hellstrom 
31135cba0dSThomas Hellstrom #include "vmwgfx_drv.h"
32760285e7SDavid Howells #include <drm/ttm/ttm_placement.h>
33135cba0dSThomas Hellstrom #include <linux/idr.h>
34135cba0dSThomas Hellstrom #include <linux/spinlock.h>
35135cba0dSThomas Hellstrom #include <linux/kernel.h>
36135cba0dSThomas Hellstrom 
37135cba0dSThomas Hellstrom struct vmwgfx_gmrid_man {
389de59bc2SDave Airlie 	struct ttm_resource_manager manager;
39135cba0dSThomas Hellstrom 	spinlock_t lock;
40135cba0dSThomas Hellstrom 	struct ida gmr_ida;
41135cba0dSThomas Hellstrom 	uint32_t max_gmr_ids;
42fb17f189SThomas Hellstrom 	uint32_t max_gmr_pages;
43fb17f189SThomas Hellstrom 	uint32_t used_gmr_pages;
44f4708c16SZack Rusin 	uint8_t type;
45135cba0dSThomas Hellstrom };
46135cba0dSThomas Hellstrom 
to_gmrid_manager(struct ttm_resource_manager * man)479de59bc2SDave Airlie static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
489488e46aSDave Airlie {
499488e46aSDave Airlie 	return container_of(man, struct vmwgfx_gmrid_man, manager);
509488e46aSDave Airlie }
519488e46aSDave Airlie 
vmw_gmrid_man_get_node(struct ttm_resource_manager * man,struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource ** res)529de59bc2SDave Airlie static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
53135cba0dSThomas Hellstrom 				  struct ttm_buffer_object *bo,
54f1217ed0SChristian König 				  const struct ttm_place *place,
55cb1c8146SChristian König 				  struct ttm_resource **res)
56135cba0dSThomas Hellstrom {
579488e46aSDave Airlie 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
58135cba0dSThomas Hellstrom 	int id;
59135cba0dSThomas Hellstrom 
60cb1c8146SChristian König 	*res = kmalloc(sizeof(**res), GFP_KERNEL);
61cb1c8146SChristian König 	if (!*res)
62d3bcb4b0SChristian König 		return -ENOMEM;
63d3bcb4b0SChristian König 
64cb1c8146SChristian König 	ttm_resource_init(bo, place, *res);
65d3bcb4b0SChristian König 
664eb085e4SMatthew Wilcox 	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
67*83e0f220SZhipeng Lu 	if (id < 0) {
68*83e0f220SZhipeng Lu 		ttm_resource_fini(man, *res);
69*83e0f220SZhipeng Lu 		kfree(*res);
70f54c4442SZack Rusin 		return id;
71*83e0f220SZhipeng Lu 	}
724eb085e4SMatthew Wilcox 
73135cba0dSThomas Hellstrom 	spin_lock(&gman->lock);
74135cba0dSThomas Hellstrom 
75fb17f189SThomas Hellstrom 	if (gman->max_gmr_pages > 0) {
76e3c92eb4SSomalapuram Amaranath 		gman->used_gmr_pages += PFN_UP((*res)->size);
77cfdc3458SZack Rusin 		/*
78cfdc3458SZack Rusin 		 * Because the graphics memory is a soft limit we can try to
79cfdc3458SZack Rusin 		 * expand it instead of letting the userspace apps crash.
80cfdc3458SZack Rusin 		 * We're just going to have a sane limit (half of RAM)
81cfdc3458SZack Rusin 		 * on the number of MOB's that we create and will try to keep
82cfdc3458SZack Rusin 		 * the system running until we reach that.
83cfdc3458SZack Rusin 		 */
84cfdc3458SZack Rusin 		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) {
85cfdc3458SZack Rusin 			const unsigned long max_graphics_pages = totalram_pages() / 2;
86cfdc3458SZack Rusin 			uint32_t new_max_pages = 0;
87cfdc3458SZack Rusin 
88cfdc3458SZack Rusin 			DRM_WARN("vmwgfx: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
89cfdc3458SZack Rusin 			vmw_host_printf("vmwgfx, warning: mob memory overflow. Consider increasing guest RAM and graphicsMemory.\n");
90cfdc3458SZack Rusin 
91cfdc3458SZack Rusin 			if (gman->max_gmr_pages > (max_graphics_pages / 2)) {
92cfdc3458SZack Rusin 				DRM_WARN("vmwgfx: guest requires more than half of RAM for graphics.\n");
93cfdc3458SZack Rusin 				new_max_pages = max_graphics_pages;
94cfdc3458SZack Rusin 			} else
95cfdc3458SZack Rusin 				new_max_pages = gman->max_gmr_pages * 2;
96cfdc3458SZack Rusin 			if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) {
97cfdc3458SZack Rusin 				DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n",
98cfdc3458SZack Rusin 					 ((new_max_pages) << (PAGE_SHIFT - 10)));
99cfdc3458SZack Rusin 
100cfdc3458SZack Rusin 				gman->max_gmr_pages = new_max_pages;
101cfdc3458SZack Rusin 			} else {
102cfdc3458SZack Rusin 				char buf[256];
103cfdc3458SZack Rusin 				snprintf(buf, sizeof(buf),
104cfdc3458SZack Rusin 					 "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n",
105cfdc3458SZack Rusin 					 ((gman->max_gmr_pages) << (PAGE_SHIFT - 10)));
106cfdc3458SZack Rusin 				vmw_host_printf(buf);
107cfdc3458SZack Rusin 				DRM_WARN("%s", buf);
1084eb085e4SMatthew Wilcox 				goto nospace;
109135cba0dSThomas Hellstrom 			}
110cfdc3458SZack Rusin 		}
111cfdc3458SZack Rusin 	}
112135cba0dSThomas Hellstrom 
113cb1c8146SChristian König 	(*res)->start = id;
114135cba0dSThomas Hellstrom 
115fb17f189SThomas Hellstrom 	spin_unlock(&gman->lock);
116fb17f189SThomas Hellstrom 	return 0;
117fb17f189SThomas Hellstrom 
1184eb085e4SMatthew Wilcox nospace:
119e3c92eb4SSomalapuram Amaranath 	gman->used_gmr_pages -= PFN_UP((*res)->size);
120fb17f189SThomas Hellstrom 	spin_unlock(&gman->lock);
1214eb085e4SMatthew Wilcox 	ida_free(&gman->gmr_ida, id);
122de3688e4SChristian König 	ttm_resource_fini(man, *res);
123cb1c8146SChristian König 	kfree(*res);
12458e4d686SChristian König 	return -ENOSPC;
125135cba0dSThomas Hellstrom }
126135cba0dSThomas Hellstrom 
vmw_gmrid_man_put_node(struct ttm_resource_manager * man,struct ttm_resource * res)1279de59bc2SDave Airlie static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
128cb1c8146SChristian König 				   struct ttm_resource *res)
129135cba0dSThomas Hellstrom {
1309488e46aSDave Airlie 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
131135cba0dSThomas Hellstrom 
132cb1c8146SChristian König 	ida_free(&gman->gmr_ida, res->start);
133135cba0dSThomas Hellstrom 	spin_lock(&gman->lock);
134e3c92eb4SSomalapuram Amaranath 	gman->used_gmr_pages -= PFN_UP(res->size);
135135cba0dSThomas Hellstrom 	spin_unlock(&gman->lock);
136de3688e4SChristian König 	ttm_resource_fini(man, res);
137cb1c8146SChristian König 	kfree(res);
138135cba0dSThomas Hellstrom }
139135cba0dSThomas Hellstrom 
vmw_gmrid_man_debug(struct ttm_resource_manager * man,struct drm_printer * printer)140f4708c16SZack Rusin static void vmw_gmrid_man_debug(struct ttm_resource_manager *man,
141f4708c16SZack Rusin 				struct drm_printer *printer)
142f4708c16SZack Rusin {
143f4708c16SZack Rusin 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
144f4708c16SZack Rusin 
145f4708c16SZack Rusin 	BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB);
146f4708c16SZack Rusin 
147f4708c16SZack Rusin 	drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n",
148f4708c16SZack Rusin 		   (gman->type == VMW_PL_MOB) ? "Mob" : "GMR",
149f4708c16SZack Rusin 		   gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids);
150f4708c16SZack Rusin }
151f4708c16SZack Rusin 
1529de59bc2SDave Airlie static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
15362161778SDave Airlie 
vmw_gmrid_man_init(struct vmw_private * dev_priv,int type)15462161778SDave Airlie int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
155135cba0dSThomas Hellstrom {
1569de59bc2SDave Airlie 	struct ttm_resource_manager *man;
157135cba0dSThomas Hellstrom 	struct vmwgfx_gmrid_man *gman =
158135cba0dSThomas Hellstrom 		kzalloc(sizeof(*gman), GFP_KERNEL);
159135cba0dSThomas Hellstrom 
1601a4adb05SRavikant B Sharma 	if (unlikely(!gman))
161135cba0dSThomas Hellstrom 		return -ENOMEM;
162135cba0dSThomas Hellstrom 
1639488e46aSDave Airlie 	man = &gman->manager;
1649488e46aSDave Airlie 
16562161778SDave Airlie 	man->func = &vmw_gmrid_manager_func;
16662161778SDave Airlie 	man->use_tt = true;
1673f268ef0SChristian König 	ttm_resource_manager_init(man, &dev_priv->bdev, 0);
168135cba0dSThomas Hellstrom 	spin_lock_init(&gman->lock);
169fb17f189SThomas Hellstrom 	gman->used_gmr_pages = 0;
170135cba0dSThomas Hellstrom 	ida_init(&gman->gmr_ida);
171f4708c16SZack Rusin 	gman->type = type;
1726da768aaSThomas Hellstrom 
17362161778SDave Airlie 	switch (type) {
1746da768aaSThomas Hellstrom 	case VMW_PL_GMR:
1756da768aaSThomas Hellstrom 		gman->max_gmr_ids = dev_priv->max_gmr_ids;
1766da768aaSThomas Hellstrom 		gman->max_gmr_pages = dev_priv->max_gmr_pages;
1776da768aaSThomas Hellstrom 		break;
1786da768aaSThomas Hellstrom 	case VMW_PL_MOB:
1796da768aaSThomas Hellstrom 		gman->max_gmr_ids = VMWGFX_NUM_MOB;
1806da768aaSThomas Hellstrom 		gman->max_gmr_pages = dev_priv->max_mob_pages;
1816da768aaSThomas Hellstrom 		break;
1826da768aaSThomas Hellstrom 	default:
1836da768aaSThomas Hellstrom 		BUG();
1846da768aaSThomas Hellstrom 	}
1859488e46aSDave Airlie 	ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager);
1869de59bc2SDave Airlie 	ttm_resource_manager_set_used(man, true);
187135cba0dSThomas Hellstrom 	return 0;
188135cba0dSThomas Hellstrom }
189135cba0dSThomas Hellstrom 
vmw_gmrid_man_fini(struct vmw_private * dev_priv,int type)1906eee6675SDave Airlie void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
191135cba0dSThomas Hellstrom {
1929de59bc2SDave Airlie 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type);
1939488e46aSDave Airlie 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
194135cba0dSThomas Hellstrom 
1959de59bc2SDave Airlie 	ttm_resource_manager_set_used(man, false);
1966eee6675SDave Airlie 
1974ce032d6SChristian König 	ttm_resource_manager_evict_all(&dev_priv->bdev, man);
1986eee6675SDave Airlie 
1999de59bc2SDave Airlie 	ttm_resource_manager_cleanup(man);
2009488e46aSDave Airlie 
2019488e46aSDave Airlie 	ttm_set_driver_manager(&dev_priv->bdev, type, NULL);
202135cba0dSThomas Hellstrom 	ida_destroy(&gman->gmr_ida);
203135cba0dSThomas Hellstrom 	kfree(gman);
2046eee6675SDave Airlie 
205135cba0dSThomas Hellstrom }
206135cba0dSThomas Hellstrom 
2079de59bc2SDave Airlie static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
208e92ae67dSChristian König 	.alloc = vmw_gmrid_man_get_node,
209e92ae67dSChristian König 	.free = vmw_gmrid_man_put_node,
210f4708c16SZack Rusin 	.debug = vmw_gmrid_man_debug
211135cba0dSThomas Hellstrom };
212