1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, NVIDIA Corporation. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/kref.h> 8 #include <linux/of.h> 9 #include <linux/of_platform.h> 10 #include <linux/pid.h> 11 #include <linux/slab.h> 12 13 #include "context.h" 14 #include "dev.h" 15 16 int host1x_memory_context_list_init(struct host1x *host1x) 17 { 18 struct host1x_memory_context_list *cdl = &host1x->context_list; 19 struct device_node *node = host1x->dev->of_node; 20 struct host1x_memory_context *ctx; 21 unsigned int i; 22 int err; 23 24 cdl->devs = NULL; 25 cdl->len = 0; 26 mutex_init(&cdl->lock); 27 28 err = of_property_count_u32_elems(node, "iommu-map"); 29 if (err < 0) 30 return 0; 31 32 cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL); 33 if (!cdl->devs) 34 return -ENOMEM; 35 cdl->len = err / 4; 36 37 for (i = 0; i < cdl->len; i++) { 38 struct iommu_fwspec *fwspec; 39 40 ctx = &cdl->devs[i]; 41 42 ctx->host = host1x; 43 44 device_initialize(&ctx->dev); 45 46 /* 47 * Due to an issue with T194 NVENC, only 38 bits can be used. 48 * Anyway, 256GiB of IOVA ought to be enough for anyone. 49 */ 50 ctx->dma_mask = DMA_BIT_MASK(38); 51 ctx->dev.dma_mask = &ctx->dma_mask; 52 ctx->dev.coherent_dma_mask = ctx->dma_mask; 53 dev_set_name(&ctx->dev, "host1x-ctx.%d", i); 54 ctx->dev.bus = &host1x_context_device_bus_type; 55 ctx->dev.parent = host1x->dev; 56 57 dma_set_max_seg_size(&ctx->dev, UINT_MAX); 58 59 err = device_add(&ctx->dev); 60 if (err) { 61 dev_err(host1x->dev, "could not add context device %d: %d\n", i, err); 62 goto del_devices; 63 } 64 65 err = of_dma_configure_id(&ctx->dev, node, true, &i); 66 if (err) { 67 dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n", 68 i, err); 69 device_del(&ctx->dev); 70 goto del_devices; 71 } 72 73 fwspec = dev_iommu_fwspec_get(&ctx->dev); 74 if (!fwspec || !device_iommu_mapped(&ctx->dev)) { 75 dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i); 76 device_del(&ctx->dev); 77 goto del_devices; 78 } 79 80 ctx->stream_id = fwspec->ids[0] & 0xffff; 81 } 82 83 return 0; 84 85 del_devices: 86 while (i--) 87 device_del(&cdl->devs[i].dev); 88 89 kfree(cdl->devs); 90 cdl->len = 0; 91 92 return err; 93 } 94 95 void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl) 96 { 97 unsigned int i; 98 99 for (i = 0; i < cdl->len; i++) 100 device_del(&cdl->devs[i].dev); 101 102 kfree(cdl->devs); 103 cdl->len = 0; 104 } 105 106 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x, 107 struct device *dev, 108 struct pid *pid) 109 { 110 struct host1x_memory_context_list *cdl = &host1x->context_list; 111 struct host1x_memory_context *free = NULL; 112 int i; 113 114 if (!cdl->len) 115 return ERR_PTR(-EOPNOTSUPP); 116 117 mutex_lock(&cdl->lock); 118 119 for (i = 0; i < cdl->len; i++) { 120 struct host1x_memory_context *cd = &cdl->devs[i]; 121 122 if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev) 123 continue; 124 125 if (cd->owner == pid) { 126 refcount_inc(&cd->ref); 127 mutex_unlock(&cdl->lock); 128 return cd; 129 } else if (!cd->owner && !free) { 130 free = cd; 131 } 132 } 133 134 if (!free) { 135 mutex_unlock(&cdl->lock); 136 return ERR_PTR(-EBUSY); 137 } 138 139 refcount_set(&free->ref, 1); 140 free->owner = get_pid(pid); 141 142 mutex_unlock(&cdl->lock); 143 144 return free; 145 } 146 EXPORT_SYMBOL_GPL(host1x_memory_context_alloc); 147 148 void host1x_memory_context_get(struct host1x_memory_context *cd) 149 { 150 refcount_inc(&cd->ref); 151 } 152 EXPORT_SYMBOL_GPL(host1x_memory_context_get); 153 154 void host1x_memory_context_put(struct host1x_memory_context *cd) 155 { 156 struct host1x_memory_context_list *cdl = &cd->host->context_list; 157 158 if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) { 159 put_pid(cd->owner); 160 cd->owner = NULL; 161 mutex_unlock(&cdl->lock); 162 } 163 } 164 EXPORT_SYMBOL_GPL(host1x_memory_context_put); 165