1f64122c1SDave Airlie /* 2f64122c1SDave Airlie * Copyright 2013 Red Hat Inc. 3f64122c1SDave Airlie * 4f64122c1SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 5f64122c1SDave Airlie * copy of this software and associated documentation files (the "Software"), 6f64122c1SDave Airlie * to deal in the Software without restriction, including without limitation 7f64122c1SDave Airlie * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8f64122c1SDave Airlie * and/or sell copies of the Software, and to permit persons to whom the 9f64122c1SDave Airlie * Software is furnished to do so, subject to the following conditions: 10f64122c1SDave Airlie * 11f64122c1SDave Airlie * The above copyright notice and this permission notice shall be included in 12f64122c1SDave Airlie * all copies or substantial portions of the Software. 13f64122c1SDave Airlie * 14f64122c1SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15f64122c1SDave Airlie * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16f64122c1SDave Airlie * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17f64122c1SDave Airlie * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18f64122c1SDave Airlie * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19f64122c1SDave Airlie * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20f64122c1SDave Airlie * OTHER DEALINGS IN THE SOFTWARE. 21f64122c1SDave Airlie * 22f64122c1SDave Airlie * Authors: Dave Airlie 23f64122c1SDave Airlie * Alon Levy 24f64122c1SDave Airlie */ 25f64122c1SDave Airlie 26f64122c1SDave Airlie #include "qxl_drv.h" 27f64122c1SDave Airlie #include "qxl_object.h" 28f64122c1SDave Airlie 29f64122c1SDave Airlie #include <linux/io-mapping.h> 30f64122c1SDave Airlie static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 31f64122c1SDave Airlie { 32f64122c1SDave Airlie struct qxl_bo *bo; 33f64122c1SDave Airlie struct qxl_device *qdev; 34f64122c1SDave Airlie 35f64122c1SDave Airlie bo = container_of(tbo, struct qxl_bo, tbo); 36f64122c1SDave Airlie qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 37f64122c1SDave Airlie 38f64122c1SDave Airlie qxl_surface_evict(qdev, bo, false); 39f64122c1SDave Airlie qxl_fence_fini(&bo->fence); 40f64122c1SDave Airlie mutex_lock(&qdev->gem.mutex); 41f64122c1SDave Airlie list_del_init(&bo->list); 42f64122c1SDave Airlie mutex_unlock(&qdev->gem.mutex); 43f64122c1SDave Airlie drm_gem_object_release(&bo->gem_base); 44f64122c1SDave Airlie kfree(bo); 45f64122c1SDave Airlie } 46f64122c1SDave Airlie 47f64122c1SDave Airlie bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 48f64122c1SDave Airlie { 49f64122c1SDave Airlie if (bo->destroy == &qxl_ttm_bo_destroy) 50f64122c1SDave Airlie return true; 51f64122c1SDave Airlie return false; 52f64122c1SDave Airlie } 53f64122c1SDave Airlie 54f64122c1SDave Airlie void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 55f64122c1SDave Airlie { 56f64122c1SDave Airlie u32 c = 0; 57f64122c1SDave Airlie 58f64122c1SDave Airlie qbo->placement.fpfn = 0; 59f64122c1SDave Airlie qbo->placement.lpfn = 0; 60f64122c1SDave Airlie qbo->placement.placement = qbo->placements; 61f64122c1SDave Airlie qbo->placement.busy_placement = qbo->placements; 6262c8ba7cSDave Airlie if (domain == QXL_GEM_DOMAIN_VRAM) 63f64122c1SDave Airlie qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; 6462c8ba7cSDave Airlie if (domain == QXL_GEM_DOMAIN_SURFACE) 65f64122c1SDave Airlie qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; 6662c8ba7cSDave Airlie if (domain == QXL_GEM_DOMAIN_CPU) 67f64122c1SDave Airlie qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68f64122c1SDave Airlie if (!c) 69f64122c1SDave Airlie qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 70f64122c1SDave Airlie qbo->placement.num_placement = c; 71f64122c1SDave Airlie qbo->placement.num_busy_placement = c; 72f64122c1SDave Airlie } 73f64122c1SDave Airlie 74f64122c1SDave Airlie 75f64122c1SDave Airlie int qxl_bo_create(struct qxl_device *qdev, 76f64122c1SDave Airlie unsigned long size, bool kernel, u32 domain, 77f64122c1SDave Airlie struct qxl_surface *surf, 78f64122c1SDave Airlie struct qxl_bo **bo_ptr) 79f64122c1SDave Airlie { 80f64122c1SDave Airlie struct qxl_bo *bo; 81f64122c1SDave Airlie enum ttm_bo_type type; 82f64122c1SDave Airlie int r; 83f64122c1SDave Airlie 84f64122c1SDave Airlie if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) 85f64122c1SDave Airlie qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; 86f64122c1SDave Airlie if (kernel) 87f64122c1SDave Airlie type = ttm_bo_type_kernel; 88f64122c1SDave Airlie else 89f64122c1SDave Airlie type = ttm_bo_type_device; 90f64122c1SDave Airlie *bo_ptr = NULL; 91f64122c1SDave Airlie bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); 92f64122c1SDave Airlie if (bo == NULL) 93f64122c1SDave Airlie return -ENOMEM; 94f64122c1SDave Airlie size = roundup(size, PAGE_SIZE); 95f64122c1SDave Airlie r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); 96f64122c1SDave Airlie if (unlikely(r)) { 97f64122c1SDave Airlie kfree(bo); 98f64122c1SDave Airlie return r; 99f64122c1SDave Airlie } 100f64122c1SDave Airlie bo->gem_base.driver_private = NULL; 101f64122c1SDave Airlie bo->type = domain; 102f64122c1SDave Airlie bo->pin_count = 0; 103f64122c1SDave Airlie bo->surface_id = 0; 104f64122c1SDave Airlie qxl_fence_init(qdev, &bo->fence); 105f64122c1SDave Airlie INIT_LIST_HEAD(&bo->list); 106f64122c1SDave Airlie atomic_set(&bo->reserve_count, 0); 107f64122c1SDave Airlie if (surf) 108f64122c1SDave Airlie bo->surf = *surf; 109f64122c1SDave Airlie 110f64122c1SDave Airlie qxl_ttm_placement_from_domain(bo, domain); 111f64122c1SDave Airlie 112f64122c1SDave Airlie r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 113f64122c1SDave Airlie &bo->placement, 0, !kernel, NULL, size, 114f64122c1SDave Airlie NULL, &qxl_ttm_bo_destroy); 115f64122c1SDave Airlie if (unlikely(r != 0)) { 116f64122c1SDave Airlie if (r != -ERESTARTSYS) 117f64122c1SDave Airlie dev_err(qdev->dev, 118f64122c1SDave Airlie "object_init failed for (%lu, 0x%08X)\n", 119f64122c1SDave Airlie size, domain); 120f64122c1SDave Airlie return r; 121f64122c1SDave Airlie } 122f64122c1SDave Airlie *bo_ptr = bo; 123f64122c1SDave Airlie return 0; 124f64122c1SDave Airlie } 125f64122c1SDave Airlie 126f64122c1SDave Airlie int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) 127f64122c1SDave Airlie { 128f64122c1SDave Airlie bool is_iomem; 129f64122c1SDave Airlie int r; 130f64122c1SDave Airlie 131f64122c1SDave Airlie if (bo->kptr) { 132f64122c1SDave Airlie if (ptr) 133f64122c1SDave Airlie *ptr = bo->kptr; 134f64122c1SDave Airlie return 0; 135f64122c1SDave Airlie } 136f64122c1SDave Airlie r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 137f64122c1SDave Airlie if (r) 138f64122c1SDave Airlie return r; 139f64122c1SDave Airlie bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 140f64122c1SDave Airlie if (ptr) 141f64122c1SDave Airlie *ptr = bo->kptr; 142f64122c1SDave Airlie return 0; 143f64122c1SDave Airlie } 144f64122c1SDave Airlie 145f64122c1SDave Airlie void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, 146f64122c1SDave Airlie struct qxl_bo *bo, int page_offset) 147f64122c1SDave Airlie { 148f64122c1SDave Airlie struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 149f64122c1SDave Airlie void *rptr; 150f64122c1SDave Airlie int ret; 151f64122c1SDave Airlie struct io_mapping *map; 152f64122c1SDave Airlie 153f64122c1SDave Airlie if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 154f64122c1SDave Airlie map = qdev->vram_mapping; 155f64122c1SDave Airlie else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) 156f64122c1SDave Airlie map = qdev->surface_mapping; 157f64122c1SDave Airlie else 158f64122c1SDave Airlie goto fallback; 159f64122c1SDave Airlie 160f64122c1SDave Airlie (void) ttm_mem_io_lock(man, false); 161f64122c1SDave Airlie ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); 162f64122c1SDave Airlie ttm_mem_io_unlock(man); 163f64122c1SDave Airlie 164f64122c1SDave Airlie return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); 165f64122c1SDave Airlie fallback: 166f64122c1SDave Airlie if (bo->kptr) { 167f64122c1SDave Airlie rptr = bo->kptr + (page_offset * PAGE_SIZE); 168f64122c1SDave Airlie return rptr; 169f64122c1SDave Airlie } 170f64122c1SDave Airlie 171f64122c1SDave Airlie ret = qxl_bo_kmap(bo, &rptr); 172f64122c1SDave Airlie if (ret) 173f64122c1SDave Airlie return NULL; 174f64122c1SDave Airlie 175f64122c1SDave Airlie rptr += page_offset * PAGE_SIZE; 176f64122c1SDave Airlie return rptr; 177f64122c1SDave Airlie } 178f64122c1SDave Airlie 179f64122c1SDave Airlie void qxl_bo_kunmap(struct qxl_bo *bo) 180f64122c1SDave Airlie { 181f64122c1SDave Airlie if (bo->kptr == NULL) 182f64122c1SDave Airlie return; 183f64122c1SDave Airlie bo->kptr = NULL; 184f64122c1SDave Airlie ttm_bo_kunmap(&bo->kmap); 185f64122c1SDave Airlie } 186f64122c1SDave Airlie 187f64122c1SDave Airlie void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, 188f64122c1SDave Airlie struct qxl_bo *bo, void *pmap) 189f64122c1SDave Airlie { 190f64122c1SDave Airlie struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 191f64122c1SDave Airlie struct io_mapping *map; 192f64122c1SDave Airlie 193f64122c1SDave Airlie if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 194f64122c1SDave Airlie map = qdev->vram_mapping; 195f64122c1SDave Airlie else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) 196f64122c1SDave Airlie map = qdev->surface_mapping; 197f64122c1SDave Airlie else 198f64122c1SDave Airlie goto fallback; 199f64122c1SDave Airlie 200f64122c1SDave Airlie io_mapping_unmap_atomic(pmap); 201f64122c1SDave Airlie 202f64122c1SDave Airlie (void) ttm_mem_io_lock(man, false); 203f64122c1SDave Airlie ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); 204f64122c1SDave Airlie ttm_mem_io_unlock(man); 205f64122c1SDave Airlie return ; 206f64122c1SDave Airlie fallback: 207f64122c1SDave Airlie qxl_bo_kunmap(bo); 208f64122c1SDave Airlie } 209f64122c1SDave Airlie 210f64122c1SDave Airlie void qxl_bo_unref(struct qxl_bo **bo) 211f64122c1SDave Airlie { 212f64122c1SDave Airlie struct ttm_buffer_object *tbo; 213f64122c1SDave Airlie 214f64122c1SDave Airlie if ((*bo) == NULL) 215f64122c1SDave Airlie return; 216f64122c1SDave Airlie tbo = &((*bo)->tbo); 217f64122c1SDave Airlie ttm_bo_unref(&tbo); 218f64122c1SDave Airlie if (tbo == NULL) 219f64122c1SDave Airlie *bo = NULL; 220f64122c1SDave Airlie } 221f64122c1SDave Airlie 222f64122c1SDave Airlie struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) 223f64122c1SDave Airlie { 224f64122c1SDave Airlie ttm_bo_reference(&bo->tbo); 225f64122c1SDave Airlie return bo; 226f64122c1SDave Airlie } 227f64122c1SDave Airlie 228f64122c1SDave Airlie int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 229f64122c1SDave Airlie { 230f64122c1SDave Airlie struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 231f64122c1SDave Airlie int r, i; 232f64122c1SDave Airlie 233f64122c1SDave Airlie if (bo->pin_count) { 234f64122c1SDave Airlie bo->pin_count++; 235f64122c1SDave Airlie if (gpu_addr) 236f64122c1SDave Airlie *gpu_addr = qxl_bo_gpu_offset(bo); 237f64122c1SDave Airlie return 0; 238f64122c1SDave Airlie } 239f64122c1SDave Airlie qxl_ttm_placement_from_domain(bo, domain); 240f64122c1SDave Airlie for (i = 0; i < bo->placement.num_placement; i++) 241f64122c1SDave Airlie bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 242f64122c1SDave Airlie r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 243f64122c1SDave Airlie if (likely(r == 0)) { 244f64122c1SDave Airlie bo->pin_count = 1; 245f64122c1SDave Airlie if (gpu_addr != NULL) 246f64122c1SDave Airlie *gpu_addr = qxl_bo_gpu_offset(bo); 247f64122c1SDave Airlie } 248f64122c1SDave Airlie if (unlikely(r != 0)) 249f64122c1SDave Airlie dev_err(qdev->dev, "%p pin failed\n", bo); 250f64122c1SDave Airlie return r; 251f64122c1SDave Airlie } 252f64122c1SDave Airlie 253f64122c1SDave Airlie int qxl_bo_unpin(struct qxl_bo *bo) 254f64122c1SDave Airlie { 255f64122c1SDave Airlie struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 256f64122c1SDave Airlie int r, i; 257f64122c1SDave Airlie 258f64122c1SDave Airlie if (!bo->pin_count) { 259f64122c1SDave Airlie dev_warn(qdev->dev, "%p unpin not necessary\n", bo); 260f64122c1SDave Airlie return 0; 261f64122c1SDave Airlie } 262f64122c1SDave Airlie bo->pin_count--; 263f64122c1SDave Airlie if (bo->pin_count) 264f64122c1SDave Airlie return 0; 265f64122c1SDave Airlie for (i = 0; i < bo->placement.num_placement; i++) 266f64122c1SDave Airlie bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 267f64122c1SDave Airlie r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 268f64122c1SDave Airlie if (unlikely(r != 0)) 269f64122c1SDave Airlie dev_err(qdev->dev, "%p validate failed for unpin\n", bo); 270f64122c1SDave Airlie return r; 271f64122c1SDave Airlie } 272f64122c1SDave Airlie 273f64122c1SDave Airlie void qxl_bo_force_delete(struct qxl_device *qdev) 274f64122c1SDave Airlie { 275f64122c1SDave Airlie struct qxl_bo *bo, *n; 276f64122c1SDave Airlie 277f64122c1SDave Airlie if (list_empty(&qdev->gem.objects)) 278f64122c1SDave Airlie return; 279f64122c1SDave Airlie dev_err(qdev->dev, "Userspace still has active objects !\n"); 280f64122c1SDave Airlie list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 281f64122c1SDave Airlie mutex_lock(&qdev->ddev->struct_mutex); 282f64122c1SDave Airlie dev_err(qdev->dev, "%p %p %lu %lu force free\n", 283f64122c1SDave Airlie &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 284f64122c1SDave Airlie *((unsigned long *)&bo->gem_base.refcount)); 285f64122c1SDave Airlie mutex_lock(&qdev->gem.mutex); 286f64122c1SDave Airlie list_del_init(&bo->list); 287f64122c1SDave Airlie mutex_unlock(&qdev->gem.mutex); 288f64122c1SDave Airlie /* this should unref the ttm bo */ 289f64122c1SDave Airlie drm_gem_object_unreference(&bo->gem_base); 290f64122c1SDave Airlie mutex_unlock(&qdev->ddev->struct_mutex); 291f64122c1SDave Airlie } 292f64122c1SDave Airlie } 293f64122c1SDave Airlie 294f64122c1SDave Airlie int qxl_bo_init(struct qxl_device *qdev) 295f64122c1SDave Airlie { 296f64122c1SDave Airlie return qxl_ttm_init(qdev); 297f64122c1SDave Airlie } 298f64122c1SDave Airlie 299f64122c1SDave Airlie void qxl_bo_fini(struct qxl_device *qdev) 300f64122c1SDave Airlie { 301f64122c1SDave Airlie qxl_ttm_fini(qdev); 302f64122c1SDave Airlie } 303f64122c1SDave Airlie 304f64122c1SDave Airlie int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) 305f64122c1SDave Airlie { 306f64122c1SDave Airlie int ret; 307f64122c1SDave Airlie if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { 308f64122c1SDave Airlie /* allocate a surface id for this surface now */ 309f64122c1SDave Airlie ret = qxl_surface_id_alloc(qdev, bo); 310f64122c1SDave Airlie if (ret) 311f64122c1SDave Airlie return ret; 312f64122c1SDave Airlie 313f64122c1SDave Airlie ret = qxl_hw_surface_alloc(qdev, bo, NULL); 314f64122c1SDave Airlie if (ret) 315f64122c1SDave Airlie return ret; 316f64122c1SDave Airlie } 317f64122c1SDave Airlie return 0; 318f64122c1SDave Airlie } 319f64122c1SDave Airlie 320f64122c1SDave Airlie void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) 321f64122c1SDave Airlie { 322f64122c1SDave Airlie struct qxl_bo_list *entry, *sf; 323f64122c1SDave Airlie 324f64122c1SDave Airlie list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { 325f64122c1SDave Airlie qxl_bo_unreserve(entry->bo); 326f64122c1SDave Airlie list_del(&entry->lhead); 327f64122c1SDave Airlie kfree(entry); 328f64122c1SDave Airlie } 329f64122c1SDave Airlie } 330f64122c1SDave Airlie 331f64122c1SDave Airlie int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) 332f64122c1SDave Airlie { 333f64122c1SDave Airlie struct qxl_bo_list *entry; 334f64122c1SDave Airlie int ret; 335f64122c1SDave Airlie 336f64122c1SDave Airlie list_for_each_entry(entry, &reloc_list->bos, lhead) { 337f64122c1SDave Airlie if (entry->bo == bo) 338f64122c1SDave Airlie return 0; 339f64122c1SDave Airlie } 340f64122c1SDave Airlie 341f64122c1SDave Airlie entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 342f64122c1SDave Airlie if (!entry) 343f64122c1SDave Airlie return -ENOMEM; 344f64122c1SDave Airlie 345f64122c1SDave Airlie entry->bo = bo; 346f64122c1SDave Airlie list_add(&entry->lhead, &reloc_list->bos); 347f64122c1SDave Airlie 348f64122c1SDave Airlie ret = qxl_bo_reserve(bo, false); 349f64122c1SDave Airlie if (ret) 350f64122c1SDave Airlie return ret; 351f64122c1SDave Airlie 352f64122c1SDave Airlie if (!bo->pin_count) { 353f64122c1SDave Airlie qxl_ttm_placement_from_domain(bo, bo->type); 354f64122c1SDave Airlie ret = ttm_bo_validate(&bo->tbo, &bo->placement, 355f64122c1SDave Airlie true, false); 356f64122c1SDave Airlie if (ret) 357f64122c1SDave Airlie return ret; 358f64122c1SDave Airlie } 359f64122c1SDave Airlie 360f64122c1SDave Airlie /* allocate a surface for reserved + validated buffers */ 361f64122c1SDave Airlie ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 362f64122c1SDave Airlie if (ret) 363f64122c1SDave Airlie return ret; 364f64122c1SDave Airlie return 0; 365f64122c1SDave Airlie } 366