1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "qxl_drv.h" 23 #include "qxl_object.h" 24 25 /* 26 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 27 * into 256 byte chunks for now - gives 16 cmds per page. 28 * 29 * use an ida to index into the chunks? 30 */ 31 /* manage releaseables */ 32 /* stack them 16 high for now -drawable object is 191 */ 33 #define RELEASE_SIZE 256 34 #define RELEASES_PER_BO (4096 / RELEASE_SIZE) 35 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ 36 #define SURFACE_RELEASE_SIZE 128 37 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE) 38 39 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 40 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 41 uint64_t 42 qxl_release_alloc(struct qxl_device *qdev, int type, 43 struct qxl_release **ret) 44 { 45 struct qxl_release *release; 46 int handle; 47 size_t size = sizeof(*release); 48 int idr_ret; 49 50 release = kmalloc(size, GFP_KERNEL); 51 if (!release) { 52 DRM_ERROR("Out of memory\n"); 53 return 0; 54 } 55 release->type = type; 56 release->bo_count = 0; 57 release->release_offset = 0; 58 release->surface_release_id = 0; 59 60 idr_preload(GFP_KERNEL); 61 spin_lock(&qdev->release_idr_lock); 62 idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 63 spin_unlock(&qdev->release_idr_lock); 64 idr_preload_end(); 65 handle = idr_ret; 66 if (idr_ret < 0) 67 goto release_fail; 68 *ret = release; 69 QXL_INFO(qdev, "allocated release %lld\n", handle); 70 release->id = handle; 71 release_fail: 72 73 return handle; 74 } 75 76 void 77 qxl_release_free(struct qxl_device *qdev, 78 struct qxl_release *release) 79 { 80 int i; 81 82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, 83 release->type, release->bo_count); 84 85 if (release->surface_release_id) 86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 87 88 for (i = 0 ; i < release->bo_count; ++i) { 89 QXL_INFO(qdev, "release %llx\n", 90 release->bos[i]->tbo.addr_space_offset 91 - DRM_FILE_OFFSET); 92 qxl_fence_remove_release(&release->bos[i]->fence, release->id); 93 qxl_bo_unref(&release->bos[i]); 94 } 95 spin_lock(&qdev->release_idr_lock); 96 idr_remove(&qdev->release_idr, release->id); 97 spin_unlock(&qdev->release_idr_lock); 98 kfree(release); 99 } 100 101 void 102 qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, 103 struct qxl_bo *bo) 104 { 105 int i; 106 for (i = 0; i < release->bo_count; i++) 107 if (release->bos[i] == bo) 108 return; 109 110 if (release->bo_count >= QXL_MAX_RES) { 111 DRM_ERROR("exceeded max resource on a qxl_release item\n"); 112 return; 113 } 114 release->bos[release->bo_count++] = qxl_bo_ref(bo); 115 } 116 117 static int qxl_release_bo_alloc(struct qxl_device *qdev, 118 struct qxl_bo **bo) 119 { 120 int ret; 121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, 122 bo); 123 return ret; 124 } 125 126 int qxl_release_reserve(struct qxl_device *qdev, 127 struct qxl_release *release, bool no_wait) 128 { 129 int ret; 130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { 131 ret = qxl_bo_reserve(release->bos[0], no_wait); 132 if (ret) 133 return ret; 134 } 135 return 0; 136 } 137 138 void qxl_release_unreserve(struct qxl_device *qdev, 139 struct qxl_release *release) 140 { 141 if (atomic_dec_and_test(&release->bos[0]->reserve_count)) 142 qxl_bo_unreserve(release->bos[0]); 143 } 144 145 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 146 enum qxl_surface_cmd_type surface_cmd_type, 147 struct qxl_release *create_rel, 148 struct qxl_release **release) 149 { 150 int ret; 151 152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 153 int idr_ret; 154 struct qxl_bo *bo; 155 union qxl_release_info *info; 156 157 /* stash the release after the create command */ 158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 159 bo = qxl_bo_ref(create_rel->bos[0]); 160 161 (*release)->release_offset = create_rel->release_offset + 64; 162 163 qxl_release_add_res(qdev, *release, bo); 164 165 ret = qxl_release_reserve(qdev, *release, false); 166 if (ret) { 167 DRM_ERROR("release reserve failed\n"); 168 goto out_unref; 169 } 170 info = qxl_release_map(qdev, *release); 171 info->id = idr_ret; 172 qxl_release_unmap(qdev, *release, info); 173 174 175 out_unref: 176 qxl_bo_unref(&bo); 177 return ret; 178 } 179 180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 181 QXL_RELEASE_SURFACE_CMD, release, NULL); 182 } 183 184 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 185 int type, struct qxl_release **release, 186 struct qxl_bo **rbo) 187 { 188 struct qxl_bo *bo; 189 int idr_ret; 190 int ret; 191 union qxl_release_info *info; 192 int cur_idx; 193 194 if (type == QXL_RELEASE_DRAWABLE) 195 cur_idx = 0; 196 else if (type == QXL_RELEASE_SURFACE_CMD) 197 cur_idx = 1; 198 else if (type == QXL_RELEASE_CURSOR_CMD) 199 cur_idx = 2; 200 else { 201 DRM_ERROR("got illegal type: %d\n", type); 202 return -EINVAL; 203 } 204 205 idr_ret = qxl_release_alloc(qdev, type, release); 206 207 mutex_lock(&qdev->release_mutex); 208 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 209 qxl_bo_unref(&qdev->current_release_bo[cur_idx]); 210 qdev->current_release_bo_offset[cur_idx] = 0; 211 qdev->current_release_bo[cur_idx] = NULL; 212 } 213 if (!qdev->current_release_bo[cur_idx]) { 214 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); 215 if (ret) { 216 mutex_unlock(&qdev->release_mutex); 217 return ret; 218 } 219 220 /* pin releases bo's they are too messy to evict */ 221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); 222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); 223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); 224 } 225 226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 227 228 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 229 qdev->current_release_bo_offset[cur_idx]++; 230 231 if (rbo) 232 *rbo = bo; 233 234 qxl_release_add_res(qdev, *release, bo); 235 236 ret = qxl_release_reserve(qdev, *release, false); 237 mutex_unlock(&qdev->release_mutex); 238 if (ret) 239 goto out_unref; 240 241 info = qxl_release_map(qdev, *release); 242 info->id = idr_ret; 243 qxl_release_unmap(qdev, *release, info); 244 245 out_unref: 246 qxl_bo_unref(&bo); 247 return ret; 248 } 249 250 int qxl_fence_releaseable(struct qxl_device *qdev, 251 struct qxl_release *release) 252 { 253 int i, ret; 254 for (i = 0; i < release->bo_count; i++) { 255 if (!release->bos[i]->tbo.sync_obj) 256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; 257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); 258 if (ret) 259 return ret; 260 } 261 return 0; 262 } 263 264 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 265 uint64_t id) 266 { 267 struct qxl_release *release; 268 269 spin_lock(&qdev->release_idr_lock); 270 release = idr_find(&qdev->release_idr, id); 271 spin_unlock(&qdev->release_idr_lock); 272 if (!release) { 273 DRM_ERROR("failed to find id in release_idr\n"); 274 return NULL; 275 } 276 if (release->bo_count < 1) { 277 DRM_ERROR("read a released resource with 0 bos\n"); 278 return NULL; 279 } 280 return release; 281 } 282 283 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 284 struct qxl_release *release) 285 { 286 void *ptr; 287 union qxl_release_info *info; 288 struct qxl_bo *bo = release->bos[0]; 289 290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 291 info = ptr + (release->release_offset & ~PAGE_SIZE); 292 return info; 293 } 294 295 void qxl_release_unmap(struct qxl_device *qdev, 296 struct qxl_release *release, 297 union qxl_release_info *info) 298 { 299 struct qxl_bo *bo = release->bos[0]; 300 void *ptr; 301 302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 304 } 305