1c6603c74SDaniel Vetter // SPDX-License-Identifier: GPL-2.0 2c6603c74SDaniel Vetter /* 3c6603c74SDaniel Vetter * Copyright (C) 2020 Intel 4c6603c74SDaniel Vetter * 5c6603c74SDaniel Vetter * Based on drivers/base/devres.c 6c6603c74SDaniel Vetter */ 7c6603c74SDaniel Vetter 8c6603c74SDaniel Vetter #include <drm/drm_managed.h> 9c6603c74SDaniel Vetter 10c6603c74SDaniel Vetter #include <linux/list.h> 11c6603c74SDaniel Vetter #include <linux/slab.h> 12c6603c74SDaniel Vetter #include <linux/spinlock.h> 13c6603c74SDaniel Vetter 14c6603c74SDaniel Vetter #include <drm/drm_device.h> 15c6603c74SDaniel Vetter #include <drm/drm_print.h> 16c6603c74SDaniel Vetter 17c6603c74SDaniel Vetter /** 18c6603c74SDaniel Vetter * DOC: managed resources 19c6603c74SDaniel Vetter * 20c6603c74SDaniel Vetter * Inspired by struct &device managed resources, but tied to the lifetime of 21c6603c74SDaniel Vetter * struct &drm_device, which can outlive the underlying physical device, usually 22c6603c74SDaniel Vetter * when userspace has some open files and other handles to resources still open. 23c6603c74SDaniel Vetter */ 24c6603c74SDaniel Vetter struct drmres_node { 25c6603c74SDaniel Vetter struct list_head entry; 26c6603c74SDaniel Vetter drmres_release_t release; 27c6603c74SDaniel Vetter const char *name; 28c6603c74SDaniel Vetter size_t size; 29c6603c74SDaniel Vetter }; 30c6603c74SDaniel Vetter 31c6603c74SDaniel Vetter struct drmres { 32c6603c74SDaniel Vetter struct drmres_node node; 33c6603c74SDaniel Vetter /* 34c6603c74SDaniel Vetter * Some archs want to perform DMA into kmalloc caches 35c6603c74SDaniel Vetter * and need a guaranteed alignment larger than 36c6603c74SDaniel Vetter * the alignment of a 64-bit integer. 37c6603c74SDaniel Vetter * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 38c6603c74SDaniel Vetter * buffer alignment as if it was allocated by plain kmalloc(). 39c6603c74SDaniel Vetter */ 40c6603c74SDaniel Vetter u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 41c6603c74SDaniel Vetter }; 42c6603c74SDaniel Vetter 43c6603c74SDaniel Vetter static void free_dr(struct drmres *dr) 44c6603c74SDaniel Vetter { 45c6603c74SDaniel Vetter kfree_const(dr->node.name); 46c6603c74SDaniel Vetter kfree(dr); 47c6603c74SDaniel Vetter } 48c6603c74SDaniel Vetter 49c6603c74SDaniel Vetter void drm_managed_release(struct drm_device *dev) 50c6603c74SDaniel Vetter { 51c6603c74SDaniel Vetter struct drmres *dr, *tmp; 52c6603c74SDaniel Vetter 53c6603c74SDaniel Vetter drm_dbg_drmres(dev, "drmres release begin\n"); 54c6603c74SDaniel Vetter list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) { 55c6603c74SDaniel Vetter drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n", 56c6603c74SDaniel Vetter dr, dr->node.name, dr->node.size); 57c6603c74SDaniel Vetter 58c6603c74SDaniel Vetter if (dr->node.release) 59c6603c74SDaniel Vetter dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL); 60c6603c74SDaniel Vetter 61c6603c74SDaniel Vetter list_del(&dr->node.entry); 62c6603c74SDaniel Vetter free_dr(dr); 63c6603c74SDaniel Vetter } 64c6603c74SDaniel Vetter drm_dbg_drmres(dev, "drmres release end\n"); 65c6603c74SDaniel Vetter } 66c6603c74SDaniel Vetter 67c6603c74SDaniel Vetter /* 68c6603c74SDaniel Vetter * Always inline so that kmalloc_track_caller tracks the actual interesting 69c6603c74SDaniel Vetter * caller outside of drm_managed.c. 70c6603c74SDaniel Vetter */ 71c6603c74SDaniel Vetter static __always_inline struct drmres * alloc_dr(drmres_release_t release, 72c6603c74SDaniel Vetter size_t size, gfp_t gfp, int nid) 73c6603c74SDaniel Vetter { 74c6603c74SDaniel Vetter size_t tot_size; 75c6603c74SDaniel Vetter struct drmres *dr; 76c6603c74SDaniel Vetter 77c6603c74SDaniel Vetter /* We must catch any near-SIZE_MAX cases that could overflow. */ 78c6603c74SDaniel Vetter if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size))) 79c6603c74SDaniel Vetter return NULL; 80c6603c74SDaniel Vetter 81c6603c74SDaniel Vetter dr = kmalloc_node_track_caller(tot_size, gfp, nid); 82c6603c74SDaniel Vetter if (unlikely(!dr)) 83c6603c74SDaniel Vetter return NULL; 84c6603c74SDaniel Vetter 85c6603c74SDaniel Vetter memset(dr, 0, offsetof(struct drmres, data)); 86c6603c74SDaniel Vetter 87c6603c74SDaniel Vetter INIT_LIST_HEAD(&dr->node.entry); 88c6603c74SDaniel Vetter dr->node.release = release; 89c6603c74SDaniel Vetter dr->node.size = size; 90c6603c74SDaniel Vetter 91c6603c74SDaniel Vetter return dr; 92c6603c74SDaniel Vetter } 93c6603c74SDaniel Vetter 94c6603c74SDaniel Vetter static void del_dr(struct drm_device *dev, struct drmres *dr) 95c6603c74SDaniel Vetter { 96c6603c74SDaniel Vetter list_del_init(&dr->node.entry); 97c6603c74SDaniel Vetter 98c6603c74SDaniel Vetter drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n", 99c6603c74SDaniel Vetter dr, dr->node.name, (unsigned long) dr->node.size); 100c6603c74SDaniel Vetter } 101c6603c74SDaniel Vetter 102c6603c74SDaniel Vetter static void add_dr(struct drm_device *dev, struct drmres *dr) 103c6603c74SDaniel Vetter { 104c6603c74SDaniel Vetter unsigned long flags; 105c6603c74SDaniel Vetter 106c6603c74SDaniel Vetter spin_lock_irqsave(&dev->managed.lock, flags); 107c6603c74SDaniel Vetter list_add(&dr->node.entry, &dev->managed.resources); 108c6603c74SDaniel Vetter spin_unlock_irqrestore(&dev->managed.lock, flags); 109c6603c74SDaniel Vetter 110c6603c74SDaniel Vetter drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n", 111c6603c74SDaniel Vetter dr, dr->node.name, (unsigned long) dr->node.size); 112c6603c74SDaniel Vetter } 113c6603c74SDaniel Vetter 114c6603c74SDaniel Vetter void drmm_add_final_kfree(struct drm_device *dev, void *container) 115c6603c74SDaniel Vetter { 116c6603c74SDaniel Vetter WARN_ON(dev->managed.final_kfree); 117c6603c74SDaniel Vetter WARN_ON(dev < (struct drm_device *) container); 118c6603c74SDaniel Vetter WARN_ON(dev + 1 >= 119c6603c74SDaniel Vetter (struct drm_device *) (container + ksize(container))); 120c6603c74SDaniel Vetter dev->managed.final_kfree = container; 121c6603c74SDaniel Vetter } 122c6603c74SDaniel Vetter EXPORT_SYMBOL(drmm_add_final_kfree); 123c6603c74SDaniel Vetter 124c6603c74SDaniel Vetter int __drmm_add_action(struct drm_device *dev, 125c6603c74SDaniel Vetter drmres_release_t action, 126c6603c74SDaniel Vetter void *data, const char *name) 127c6603c74SDaniel Vetter { 128c6603c74SDaniel Vetter struct drmres *dr; 129c6603c74SDaniel Vetter void **void_ptr; 130c6603c74SDaniel Vetter 131c6603c74SDaniel Vetter dr = alloc_dr(action, data ? sizeof(void*) : 0, 132c6603c74SDaniel Vetter GFP_KERNEL | __GFP_ZERO, 133c6603c74SDaniel Vetter dev_to_node(dev->dev)); 134c6603c74SDaniel Vetter if (!dr) { 135c6603c74SDaniel Vetter drm_dbg_drmres(dev, "failed to add action %s for %p\n", 136c6603c74SDaniel Vetter name, data); 137c6603c74SDaniel Vetter return -ENOMEM; 138c6603c74SDaniel Vetter } 139c6603c74SDaniel Vetter 140c6603c74SDaniel Vetter dr->node.name = kstrdup_const(name, GFP_KERNEL); 141c6603c74SDaniel Vetter if (data) { 142c6603c74SDaniel Vetter void_ptr = (void **)&dr->data; 143c6603c74SDaniel Vetter *void_ptr = data; 144c6603c74SDaniel Vetter } 145c6603c74SDaniel Vetter 146c6603c74SDaniel Vetter add_dr(dev, dr); 147c6603c74SDaniel Vetter 148c6603c74SDaniel Vetter return 0; 149c6603c74SDaniel Vetter } 150c6603c74SDaniel Vetter EXPORT_SYMBOL(__drmm_add_action); 151c6603c74SDaniel Vetter 152c6603c74SDaniel Vetter void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) 153c6603c74SDaniel Vetter { 154c6603c74SDaniel Vetter struct drmres *dr; 155c6603c74SDaniel Vetter 156c6603c74SDaniel Vetter dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev)); 157c6603c74SDaniel Vetter if (!dr) { 158c6603c74SDaniel Vetter drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n", 159c6603c74SDaniel Vetter size, gfp); 160c6603c74SDaniel Vetter return NULL; 161c6603c74SDaniel Vetter } 162c6603c74SDaniel Vetter dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL); 163c6603c74SDaniel Vetter 164c6603c74SDaniel Vetter add_dr(dev, dr); 165c6603c74SDaniel Vetter 166c6603c74SDaniel Vetter return dr->data; 167c6603c74SDaniel Vetter } 168c6603c74SDaniel Vetter EXPORT_SYMBOL(drmm_kmalloc); 169c6603c74SDaniel Vetter 170*a5c71fdbSDaniel Vetter char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp) 171*a5c71fdbSDaniel Vetter { 172*a5c71fdbSDaniel Vetter size_t size; 173*a5c71fdbSDaniel Vetter char *buf; 174*a5c71fdbSDaniel Vetter 175*a5c71fdbSDaniel Vetter if (!s) 176*a5c71fdbSDaniel Vetter return NULL; 177*a5c71fdbSDaniel Vetter 178*a5c71fdbSDaniel Vetter size = strlen(s) + 1; 179*a5c71fdbSDaniel Vetter buf = drmm_kmalloc(dev, size, gfp); 180*a5c71fdbSDaniel Vetter if (buf) 181*a5c71fdbSDaniel Vetter memcpy(buf, s, size); 182*a5c71fdbSDaniel Vetter return buf; 183*a5c71fdbSDaniel Vetter } 184*a5c71fdbSDaniel Vetter EXPORT_SYMBOL_GPL(drmm_kstrdup); 185*a5c71fdbSDaniel Vetter 186c6603c74SDaniel Vetter void drmm_kfree(struct drm_device *dev, void *data) 187c6603c74SDaniel Vetter { 188c6603c74SDaniel Vetter struct drmres *dr_match = NULL, *dr; 189c6603c74SDaniel Vetter unsigned long flags; 190c6603c74SDaniel Vetter 191c6603c74SDaniel Vetter if (!data) 192c6603c74SDaniel Vetter return; 193c6603c74SDaniel Vetter 194c6603c74SDaniel Vetter spin_lock_irqsave(&dev->managed.lock, flags); 195c6603c74SDaniel Vetter list_for_each_entry(dr, &dev->managed.resources, node.entry) { 196c6603c74SDaniel Vetter if (dr->data == data) { 197c6603c74SDaniel Vetter dr_match = dr; 198c6603c74SDaniel Vetter del_dr(dev, dr_match); 199c6603c74SDaniel Vetter break; 200c6603c74SDaniel Vetter } 201c6603c74SDaniel Vetter } 202c6603c74SDaniel Vetter spin_unlock_irqrestore(&dev->managed.lock, flags); 203c6603c74SDaniel Vetter 204c6603c74SDaniel Vetter if (WARN_ON(!dr_match)) 205c6603c74SDaniel Vetter return; 206c6603c74SDaniel Vetter 207c6603c74SDaniel Vetter free_dr(dr_match); 208c6603c74SDaniel Vetter } 209c6603c74SDaniel Vetter EXPORT_SYMBOL(drmm_kfree); 210