1 /* 2 * Copyright 2013 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Dave Airlie 23 * Alon Levy 24 */ 25 26 #include "qxl_drv.h" 27 #include "qxl_object.h" 28 29 #include <linux/io-mapping.h> 30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 31 { 32 struct qxl_bo *bo; 33 struct qxl_device *qdev; 34 35 bo = to_qxl_bo(tbo); 36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 37 38 qxl_surface_evict(qdev, bo, false); 39 mutex_lock(&qdev->gem.mutex); 40 list_del_init(&bo->list); 41 mutex_unlock(&qdev->gem.mutex); 42 drm_gem_object_release(&bo->gem_base); 43 kfree(bo); 44 } 45 46 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 47 { 48 if (bo->destroy == &qxl_ttm_bo_destroy) 49 return true; 50 return false; 51 } 52 53 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) 54 { 55 u32 c = 0; 56 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; 57 unsigned int i; 58 59 qbo->placement.placement = qbo->placements; 60 qbo->placement.busy_placement = qbo->placements; 61 if (domain == QXL_GEM_DOMAIN_VRAM) 62 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 63 if (domain == QXL_GEM_DOMAIN_SURFACE) 64 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag; 65 if (domain == QXL_GEM_DOMAIN_CPU) 66 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; 67 if (!c) 68 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 69 qbo->placement.num_placement = c; 70 qbo->placement.num_busy_placement = c; 71 for (i = 0; i < c; ++i) { 72 qbo->placements[i].fpfn = 0; 73 qbo->placements[i].lpfn = 0; 74 } 75 } 76 77 int qxl_bo_create(struct qxl_device *qdev, 78 unsigned long size, bool kernel, bool pinned, u32 domain, 79 struct qxl_surface *surf, 80 struct qxl_bo **bo_ptr) 81 { 82 struct qxl_bo *bo; 83 enum ttm_bo_type type; 84 int r; 85 86 if (kernel) 87 type = ttm_bo_type_kernel; 88 else 89 type = ttm_bo_type_device; 90 *bo_ptr = NULL; 91 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); 92 if (bo == NULL) 93 return -ENOMEM; 94 size = roundup(size, PAGE_SIZE); 95 r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size); 96 if (unlikely(r)) { 97 kfree(bo); 98 return r; 99 } 100 bo->type = domain; 101 bo->pin_count = pinned ? 1 : 0; 102 bo->surface_id = 0; 103 INIT_LIST_HEAD(&bo->list); 104 105 if (surf) 106 bo->surf = *surf; 107 108 qxl_ttm_placement_from_domain(bo, domain, pinned); 109 110 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 111 &bo->placement, 0, !kernel, size, 112 NULL, NULL, &qxl_ttm_bo_destroy); 113 if (unlikely(r != 0)) { 114 if (r != -ERESTARTSYS) 115 dev_err(qdev->ddev.dev, 116 "object_init failed for (%lu, 0x%08X)\n", 117 size, domain); 118 return r; 119 } 120 *bo_ptr = bo; 121 return 0; 122 } 123 124 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) 125 { 126 bool is_iomem; 127 int r; 128 129 if (bo->kptr) { 130 if (ptr) 131 *ptr = bo->kptr; 132 return 0; 133 } 134 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 135 if (r) 136 return r; 137 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 138 if (ptr) 139 *ptr = bo->kptr; 140 return 0; 141 } 142 143 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, 144 struct qxl_bo *bo, int page_offset) 145 { 146 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 147 void *rptr; 148 int ret; 149 struct io_mapping *map; 150 151 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 152 map = qdev->vram_mapping; 153 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV) 154 map = qdev->surface_mapping; 155 else 156 goto fallback; 157 158 (void) ttm_mem_io_lock(man, false); 159 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); 160 ttm_mem_io_unlock(man); 161 162 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); 163 fallback: 164 if (bo->kptr) { 165 rptr = bo->kptr + (page_offset * PAGE_SIZE); 166 return rptr; 167 } 168 169 ret = qxl_bo_kmap(bo, &rptr); 170 if (ret) 171 return NULL; 172 173 rptr += page_offset * PAGE_SIZE; 174 return rptr; 175 } 176 177 void qxl_bo_kunmap(struct qxl_bo *bo) 178 { 179 if (bo->kptr == NULL) 180 return; 181 bo->kptr = NULL; 182 ttm_bo_kunmap(&bo->kmap); 183 } 184 185 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, 186 struct qxl_bo *bo, void *pmap) 187 { 188 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 189 struct io_mapping *map; 190 191 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 192 map = qdev->vram_mapping; 193 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV) 194 map = qdev->surface_mapping; 195 else 196 goto fallback; 197 198 io_mapping_unmap_atomic(pmap); 199 200 (void) ttm_mem_io_lock(man, false); 201 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); 202 ttm_mem_io_unlock(man); 203 return ; 204 fallback: 205 qxl_bo_kunmap(bo); 206 } 207 208 void qxl_bo_unref(struct qxl_bo **bo) 209 { 210 if ((*bo) == NULL) 211 return; 212 213 drm_gem_object_put_unlocked(&(*bo)->gem_base); 214 *bo = NULL; 215 } 216 217 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) 218 { 219 drm_gem_object_get(&bo->gem_base); 220 return bo; 221 } 222 223 static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 224 { 225 struct ttm_operation_ctx ctx = { false, false }; 226 struct drm_device *ddev = bo->gem_base.dev; 227 int r; 228 229 if (bo->pin_count) { 230 bo->pin_count++; 231 if (gpu_addr) 232 *gpu_addr = qxl_bo_gpu_offset(bo); 233 return 0; 234 } 235 qxl_ttm_placement_from_domain(bo, domain, true); 236 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 237 if (likely(r == 0)) { 238 bo->pin_count = 1; 239 if (gpu_addr != NULL) 240 *gpu_addr = qxl_bo_gpu_offset(bo); 241 } 242 if (unlikely(r != 0)) 243 dev_err(ddev->dev, "%p pin failed\n", bo); 244 return r; 245 } 246 247 static int __qxl_bo_unpin(struct qxl_bo *bo) 248 { 249 struct ttm_operation_ctx ctx = { false, false }; 250 struct drm_device *ddev = bo->gem_base.dev; 251 int r, i; 252 253 if (!bo->pin_count) { 254 dev_warn(ddev->dev, "%p unpin not necessary\n", bo); 255 return 0; 256 } 257 bo->pin_count--; 258 if (bo->pin_count) 259 return 0; 260 for (i = 0; i < bo->placement.num_placement; i++) 261 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 262 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 263 if (unlikely(r != 0)) 264 dev_err(ddev->dev, "%p validate failed for unpin\n", bo); 265 return r; 266 } 267 268 /* 269 * Reserve the BO before pinning the object. If the BO was reserved 270 * beforehand, use the internal version directly __qxl_bo_pin. 271 * 272 */ 273 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 274 { 275 int r; 276 277 r = qxl_bo_reserve(bo, false); 278 if (r) 279 return r; 280 281 r = __qxl_bo_pin(bo, bo->type, NULL); 282 qxl_bo_unreserve(bo); 283 return r; 284 } 285 286 /* 287 * Reserve the BO before pinning the object. If the BO was reserved 288 * beforehand, use the internal version directly __qxl_bo_unpin. 289 * 290 */ 291 int qxl_bo_unpin(struct qxl_bo *bo) 292 { 293 int r; 294 295 r = qxl_bo_reserve(bo, false); 296 if (r) 297 return r; 298 299 r = __qxl_bo_unpin(bo); 300 qxl_bo_unreserve(bo); 301 return r; 302 } 303 304 void qxl_bo_force_delete(struct qxl_device *qdev) 305 { 306 struct qxl_bo *bo, *n; 307 308 if (list_empty(&qdev->gem.objects)) 309 return; 310 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n"); 311 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 312 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n", 313 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 314 *((unsigned long *)&bo->gem_base.refcount)); 315 mutex_lock(&qdev->gem.mutex); 316 list_del_init(&bo->list); 317 mutex_unlock(&qdev->gem.mutex); 318 /* this should unref the ttm bo */ 319 drm_gem_object_put_unlocked(&bo->gem_base); 320 } 321 } 322 323 int qxl_bo_init(struct qxl_device *qdev) 324 { 325 return qxl_ttm_init(qdev); 326 } 327 328 void qxl_bo_fini(struct qxl_device *qdev) 329 { 330 qxl_ttm_fini(qdev); 331 } 332 333 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) 334 { 335 int ret; 336 337 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { 338 /* allocate a surface id for this surface now */ 339 ret = qxl_surface_id_alloc(qdev, bo); 340 if (ret) 341 return ret; 342 343 ret = qxl_hw_surface_alloc(qdev, bo, NULL); 344 if (ret) 345 return ret; 346 } 347 return 0; 348 } 349 350 int qxl_surf_evict(struct qxl_device *qdev) 351 { 352 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV); 353 } 354 355 int qxl_vram_evict(struct qxl_device *qdev) 356 { 357 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); 358 } 359