1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "qxl_drv.h" 23 #include "qxl_object.h" 24 25 /* 26 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 27 * into 256 byte chunks for now - gives 16 cmds per page. 28 * 29 * use an ida to index into the chunks? 30 */ 31 /* manage releaseables */ 32 /* stack them 16 high for now -drawable object is 191 */ 33 #define RELEASE_SIZE 256 34 #define RELEASES_PER_BO (4096 / RELEASE_SIZE) 35 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ 36 #define SURFACE_RELEASE_SIZE 128 37 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE) 38 39 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 40 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 41 42 static uint64_t 43 qxl_release_alloc(struct qxl_device *qdev, int type, 44 struct qxl_release **ret) 45 { 46 struct qxl_release *release; 47 int handle; 48 size_t size = sizeof(*release); 49 int idr_ret; 50 51 release = kmalloc(size, GFP_KERNEL); 52 if (!release) { 53 DRM_ERROR("Out of memory\n"); 54 return 0; 55 } 56 release->type = type; 57 release->release_offset = 0; 58 release->surface_release_id = 0; 59 INIT_LIST_HEAD(&release->bos); 60 61 idr_preload(GFP_KERNEL); 62 spin_lock(&qdev->release_idr_lock); 63 idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 64 spin_unlock(&qdev->release_idr_lock); 65 idr_preload_end(); 66 handle = idr_ret; 67 if (idr_ret < 0) 68 goto release_fail; 69 *ret = release; 70 QXL_INFO(qdev, "allocated release %lld\n", handle); 71 release->id = handle; 72 release_fail: 73 74 return handle; 75 } 76 77 void 78 qxl_release_free(struct qxl_device *qdev, 79 struct qxl_release *release) 80 { 81 struct qxl_bo_list *entry, *tmp; 82 QXL_INFO(qdev, "release %d, type %d\n", release->id, 83 release->type); 84 85 if (release->surface_release_id) 86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 87 88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { 89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 90 QXL_INFO(qdev, "release %llx\n", 91 drm_vma_node_offset_addr(&entry->tv.bo->vma_node) 92 - DRM_FILE_OFFSET); 93 qxl_fence_remove_release(&bo->fence, release->id); 94 qxl_bo_unref(&bo); 95 } 96 spin_lock(&qdev->release_idr_lock); 97 idr_remove(&qdev->release_idr, release->id); 98 spin_unlock(&qdev->release_idr_lock); 99 kfree(release); 100 } 101 102 static int qxl_release_bo_alloc(struct qxl_device *qdev, 103 struct qxl_bo **bo) 104 { 105 int ret; 106 /* pin releases bo's they are too messy to evict */ 107 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, 108 QXL_GEM_DOMAIN_VRAM, NULL, 109 bo); 110 return ret; 111 } 112 113 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) 114 { 115 struct qxl_bo_list *entry; 116 117 list_for_each_entry(entry, &release->bos, tv.head) { 118 if (entry->tv.bo == &bo->tbo) 119 return 0; 120 } 121 122 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 123 if (!entry) 124 return -ENOMEM; 125 126 qxl_bo_ref(bo); 127 entry->tv.bo = &bo->tbo; 128 list_add_tail(&entry->tv.head, &release->bos); 129 return 0; 130 } 131 132 static int qxl_release_validate_bo(struct qxl_bo *bo) 133 { 134 int ret; 135 136 if (!bo->pin_count) { 137 qxl_ttm_placement_from_domain(bo, bo->type, false); 138 ret = ttm_bo_validate(&bo->tbo, &bo->placement, 139 true, false); 140 if (ret) 141 return ret; 142 } 143 144 /* allocate a surface for reserved + validated buffers */ 145 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 146 if (ret) 147 return ret; 148 return 0; 149 } 150 151 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) 152 { 153 int ret; 154 struct qxl_bo_list *entry; 155 156 /* if only one object on the release its the release itself 157 since these objects are pinned no need to reserve */ 158 if (list_is_singular(&release->bos)) 159 return 0; 160 161 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); 162 if (ret) 163 return ret; 164 165 list_for_each_entry(entry, &release->bos, tv.head) { 166 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 167 168 ret = qxl_release_validate_bo(bo); 169 if (ret) { 170 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 171 return ret; 172 } 173 } 174 return 0; 175 } 176 177 void qxl_release_backoff_reserve_list(struct qxl_release *release) 178 { 179 /* if only one object on the release its the release itself 180 since these objects are pinned no need to reserve */ 181 if (list_is_singular(&release->bos)) 182 return; 183 184 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 185 } 186 187 188 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 189 enum qxl_surface_cmd_type surface_cmd_type, 190 struct qxl_release *create_rel, 191 struct qxl_release **release) 192 { 193 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 194 int idr_ret; 195 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); 196 struct qxl_bo *bo; 197 union qxl_release_info *info; 198 199 /* stash the release after the create command */ 200 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 201 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); 202 203 (*release)->release_offset = create_rel->release_offset + 64; 204 205 qxl_release_list_add(*release, bo); 206 207 info = qxl_release_map(qdev, *release); 208 info->id = idr_ret; 209 qxl_release_unmap(qdev, *release, info); 210 211 qxl_bo_unref(&bo); 212 return 0; 213 } 214 215 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 216 QXL_RELEASE_SURFACE_CMD, release, NULL); 217 } 218 219 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 220 int type, struct qxl_release **release, 221 struct qxl_bo **rbo) 222 { 223 struct qxl_bo *bo; 224 int idr_ret; 225 int ret = 0; 226 union qxl_release_info *info; 227 int cur_idx; 228 229 if (type == QXL_RELEASE_DRAWABLE) 230 cur_idx = 0; 231 else if (type == QXL_RELEASE_SURFACE_CMD) 232 cur_idx = 1; 233 else if (type == QXL_RELEASE_CURSOR_CMD) 234 cur_idx = 2; 235 else { 236 DRM_ERROR("got illegal type: %d\n", type); 237 return -EINVAL; 238 } 239 240 idr_ret = qxl_release_alloc(qdev, type, release); 241 242 mutex_lock(&qdev->release_mutex); 243 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 244 qxl_bo_unref(&qdev->current_release_bo[cur_idx]); 245 qdev->current_release_bo_offset[cur_idx] = 0; 246 qdev->current_release_bo[cur_idx] = NULL; 247 } 248 if (!qdev->current_release_bo[cur_idx]) { 249 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); 250 if (ret) { 251 mutex_unlock(&qdev->release_mutex); 252 return ret; 253 } 254 } 255 256 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 257 258 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 259 qdev->current_release_bo_offset[cur_idx]++; 260 261 if (rbo) 262 *rbo = bo; 263 264 mutex_unlock(&qdev->release_mutex); 265 266 qxl_release_list_add(*release, bo); 267 268 info = qxl_release_map(qdev, *release); 269 info->id = idr_ret; 270 qxl_release_unmap(qdev, *release, info); 271 272 qxl_bo_unref(&bo); 273 return ret; 274 } 275 276 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 277 uint64_t id) 278 { 279 struct qxl_release *release; 280 281 spin_lock(&qdev->release_idr_lock); 282 release = idr_find(&qdev->release_idr, id); 283 spin_unlock(&qdev->release_idr_lock); 284 if (!release) { 285 DRM_ERROR("failed to find id in release_idr\n"); 286 return NULL; 287 } 288 289 return release; 290 } 291 292 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 293 struct qxl_release *release) 294 { 295 void *ptr; 296 union qxl_release_info *info; 297 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 298 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 299 300 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 301 if (!ptr) 302 return NULL; 303 info = ptr + (release->release_offset & ~PAGE_SIZE); 304 return info; 305 } 306 307 void qxl_release_unmap(struct qxl_device *qdev, 308 struct qxl_release *release, 309 union qxl_release_info *info) 310 { 311 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 312 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 313 void *ptr; 314 315 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 316 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 317 } 318 319 void qxl_release_fence_buffer_objects(struct qxl_release *release) 320 { 321 struct ttm_validate_buffer *entry; 322 struct ttm_buffer_object *bo; 323 struct ttm_bo_global *glob; 324 struct ttm_bo_device *bdev; 325 struct ttm_bo_driver *driver; 326 struct qxl_bo *qbo; 327 328 /* if only one object on the release its the release itself 329 since these objects are pinned no need to reserve */ 330 if (list_is_singular(&release->bos)) 331 return; 332 333 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 334 bdev = bo->bdev; 335 driver = bdev->driver; 336 glob = bo->glob; 337 338 spin_lock(&glob->lru_lock); 339 spin_lock(&bdev->fence_lock); 340 341 list_for_each_entry(entry, &release->bos, head) { 342 bo = entry->bo; 343 qbo = to_qxl_bo(bo); 344 345 if (!entry->bo->sync_obj) 346 entry->bo->sync_obj = &qbo->fence; 347 348 qxl_fence_add_release_locked(&qbo->fence, release->id); 349 350 ttm_bo_add_to_lru(bo); 351 ww_mutex_unlock(&bo->resv->lock); 352 entry->reserved = false; 353 } 354 spin_unlock(&bdev->fence_lock); 355 spin_unlock(&glob->lru_lock); 356 ww_acquire_fini(&release->ticket); 357 } 358 359