1f64122c1SDave Airlie /* 2f64122c1SDave Airlie * Copyright 2013 Red Hat Inc. 3f64122c1SDave Airlie * 4f64122c1SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 5f64122c1SDave Airlie * copy of this software and associated documentation files (the "Software"), 6f64122c1SDave Airlie * to deal in the Software without restriction, including without limitation 7f64122c1SDave Airlie * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8f64122c1SDave Airlie * and/or sell copies of the Software, and to permit persons to whom the 9f64122c1SDave Airlie * Software is furnished to do so, subject to the following conditions: 10f64122c1SDave Airlie * 11f64122c1SDave Airlie * The above copyright notice and this permission notice shall be included in 12f64122c1SDave Airlie * all copies or substantial portions of the Software. 13f64122c1SDave Airlie * 14f64122c1SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15f64122c1SDave Airlie * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16f64122c1SDave Airlie * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17f64122c1SDave Airlie * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18f64122c1SDave Airlie * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19f64122c1SDave Airlie * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20f64122c1SDave Airlie * OTHER DEALINGS IN THE SOFTWARE. 21f64122c1SDave Airlie * 22f64122c1SDave Airlie * Authors: Dave Airlie 23f64122c1SDave Airlie * Alon Levy 24f64122c1SDave Airlie */ 25f64122c1SDave Airlie 26f64122c1SDave Airlie #include "qxl_drv.h" 27f64122c1SDave Airlie #include "qxl_object.h" 28f64122c1SDave Airlie 29f64122c1SDave Airlie #include <linux/io-mapping.h> 30f64122c1SDave Airlie static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 31f64122c1SDave Airlie { 32f64122c1SDave Airlie struct qxl_bo *bo; 33f64122c1SDave Airlie struct qxl_device *qdev; 34f64122c1SDave Airlie 35f64122c1SDave Airlie bo = container_of(tbo, struct qxl_bo, tbo); 36f64122c1SDave Airlie qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 37f64122c1SDave Airlie 38f64122c1SDave Airlie qxl_surface_evict(qdev, bo, false); 39f64122c1SDave Airlie mutex_lock(&qdev->gem.mutex); 40f64122c1SDave Airlie list_del_init(&bo->list); 41f64122c1SDave Airlie mutex_unlock(&qdev->gem.mutex); 42f64122c1SDave Airlie drm_gem_object_release(&bo->gem_base); 43f64122c1SDave Airlie kfree(bo); 44f64122c1SDave Airlie } 45f64122c1SDave Airlie 46f64122c1SDave Airlie bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 47f64122c1SDave Airlie { 48f64122c1SDave Airlie if (bo->destroy == &qxl_ttm_bo_destroy) 49f64122c1SDave Airlie return true; 50f64122c1SDave Airlie return false; 51f64122c1SDave Airlie } 52f64122c1SDave Airlie 534f49ec92SDave Airlie void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) 54f64122c1SDave Airlie { 55f64122c1SDave Airlie u32 c = 0; 564f49ec92SDave Airlie u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; 57f1217ed0SChristian König unsigned i; 58f64122c1SDave Airlie 59f64122c1SDave Airlie qbo->placement.placement = qbo->placements; 60f64122c1SDave Airlie qbo->placement.busy_placement = qbo->placements; 6162c8ba7cSDave Airlie if (domain == QXL_GEM_DOMAIN_VRAM) 62f1217ed0SChristian König qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 6362c8ba7cSDave Airlie if (domain == QXL_GEM_DOMAIN_SURFACE) 64f1217ed0SChristian König qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; 6562c8ba7cSDave Airlie if (domain == QXL_GEM_DOMAIN_CPU) 66f1217ed0SChristian König qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; 67f64122c1SDave Airlie if (!c) 68f1217ed0SChristian König qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 69f64122c1SDave Airlie qbo->placement.num_placement = c; 70f64122c1SDave Airlie qbo->placement.num_busy_placement = c; 71f1217ed0SChristian König for (i = 0; i < c; ++i) { 72f1217ed0SChristian König qbo->placements[i].fpfn = 0; 73f1217ed0SChristian König qbo->placements[i].lpfn = 0; 74f1217ed0SChristian König } 75f64122c1SDave Airlie } 76f64122c1SDave Airlie 77f64122c1SDave Airlie 78f64122c1SDave Airlie int qxl_bo_create(struct qxl_device *qdev, 794f49ec92SDave Airlie unsigned long size, bool kernel, bool pinned, u32 domain, 80f64122c1SDave Airlie struct qxl_surface *surf, 81f64122c1SDave Airlie struct qxl_bo **bo_ptr) 82f64122c1SDave Airlie { 83f64122c1SDave Airlie struct qxl_bo *bo; 84f64122c1SDave Airlie enum ttm_bo_type type; 85f64122c1SDave Airlie int r; 86f64122c1SDave Airlie 87f64122c1SDave Airlie if (kernel) 88f64122c1SDave Airlie type = ttm_bo_type_kernel; 89f64122c1SDave Airlie else 90f64122c1SDave Airlie type = ttm_bo_type_device; 91f64122c1SDave Airlie *bo_ptr = NULL; 92f64122c1SDave Airlie bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); 93f64122c1SDave Airlie if (bo == NULL) 94f64122c1SDave Airlie return -ENOMEM; 95f64122c1SDave Airlie size = roundup(size, PAGE_SIZE); 96f64122c1SDave Airlie r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); 97f64122c1SDave Airlie if (unlikely(r)) { 98f64122c1SDave Airlie kfree(bo); 99f64122c1SDave Airlie return r; 100f64122c1SDave Airlie } 101f64122c1SDave Airlie bo->type = domain; 1024f49ec92SDave Airlie bo->pin_count = pinned ? 1 : 0; 103f64122c1SDave Airlie bo->surface_id = 0; 104f64122c1SDave Airlie INIT_LIST_HEAD(&bo->list); 1058002db63SDave Airlie 106f64122c1SDave Airlie if (surf) 107f64122c1SDave Airlie bo->surf = *surf; 108f64122c1SDave Airlie 1094f49ec92SDave Airlie qxl_ttm_placement_from_domain(bo, domain, pinned); 110f64122c1SDave Airlie 111f64122c1SDave Airlie r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 112f64122c1SDave Airlie &bo->placement, 0, !kernel, NULL, size, 113f4f4e3e3SMaarten Lankhorst NULL, NULL, &qxl_ttm_bo_destroy); 114f64122c1SDave Airlie if (unlikely(r != 0)) { 115f64122c1SDave Airlie if (r != -ERESTARTSYS) 116f64122c1SDave Airlie dev_err(qdev->dev, 117f64122c1SDave Airlie "object_init failed for (%lu, 0x%08X)\n", 118f64122c1SDave Airlie size, domain); 119f64122c1SDave Airlie return r; 120f64122c1SDave Airlie } 121f64122c1SDave Airlie *bo_ptr = bo; 122f64122c1SDave Airlie return 0; 123f64122c1SDave Airlie } 124f64122c1SDave Airlie 125f64122c1SDave Airlie int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) 126f64122c1SDave Airlie { 127f64122c1SDave Airlie bool is_iomem; 128f64122c1SDave Airlie int r; 129f64122c1SDave Airlie 130f64122c1SDave Airlie if (bo->kptr) { 131f64122c1SDave Airlie if (ptr) 132f64122c1SDave Airlie *ptr = bo->kptr; 133f64122c1SDave Airlie return 0; 134f64122c1SDave Airlie } 135f64122c1SDave Airlie r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 136f64122c1SDave Airlie if (r) 137f64122c1SDave Airlie return r; 138f64122c1SDave Airlie bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 139f64122c1SDave Airlie if (ptr) 140f64122c1SDave Airlie *ptr = bo->kptr; 141f64122c1SDave Airlie return 0; 142f64122c1SDave Airlie } 143f64122c1SDave Airlie 144f64122c1SDave Airlie void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, 145f64122c1SDave Airlie struct qxl_bo *bo, int page_offset) 146f64122c1SDave Airlie { 147f64122c1SDave Airlie struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 148f64122c1SDave Airlie void *rptr; 149f64122c1SDave Airlie int ret; 150f64122c1SDave Airlie struct io_mapping *map; 151f64122c1SDave Airlie 152f64122c1SDave Airlie if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 153f64122c1SDave Airlie map = qdev->vram_mapping; 154f64122c1SDave Airlie else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) 155f64122c1SDave Airlie map = qdev->surface_mapping; 156f64122c1SDave Airlie else 157f64122c1SDave Airlie goto fallback; 158f64122c1SDave Airlie 159f64122c1SDave Airlie (void) ttm_mem_io_lock(man, false); 160f64122c1SDave Airlie ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); 161f64122c1SDave Airlie ttm_mem_io_unlock(man); 162f64122c1SDave Airlie 163f64122c1SDave Airlie return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); 164f64122c1SDave Airlie fallback: 165f64122c1SDave Airlie if (bo->kptr) { 166f64122c1SDave Airlie rptr = bo->kptr + (page_offset * PAGE_SIZE); 167f64122c1SDave Airlie return rptr; 168f64122c1SDave Airlie } 169f64122c1SDave Airlie 170f64122c1SDave Airlie ret = qxl_bo_kmap(bo, &rptr); 171f64122c1SDave Airlie if (ret) 172f64122c1SDave Airlie return NULL; 173f64122c1SDave Airlie 174f64122c1SDave Airlie rptr += page_offset * PAGE_SIZE; 175f64122c1SDave Airlie return rptr; 176f64122c1SDave Airlie } 177f64122c1SDave Airlie 178f64122c1SDave Airlie void qxl_bo_kunmap(struct qxl_bo *bo) 179f64122c1SDave Airlie { 180f64122c1SDave Airlie if (bo->kptr == NULL) 181f64122c1SDave Airlie return; 182f64122c1SDave Airlie bo->kptr = NULL; 183f64122c1SDave Airlie ttm_bo_kunmap(&bo->kmap); 184f64122c1SDave Airlie } 185f64122c1SDave Airlie 186f64122c1SDave Airlie void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, 187f64122c1SDave Airlie struct qxl_bo *bo, void *pmap) 188f64122c1SDave Airlie { 189f64122c1SDave Airlie struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 190f64122c1SDave Airlie struct io_mapping *map; 191f64122c1SDave Airlie 192f64122c1SDave Airlie if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 193f64122c1SDave Airlie map = qdev->vram_mapping; 194f64122c1SDave Airlie else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) 195f64122c1SDave Airlie map = qdev->surface_mapping; 196f64122c1SDave Airlie else 197f64122c1SDave Airlie goto fallback; 198f64122c1SDave Airlie 199f64122c1SDave Airlie io_mapping_unmap_atomic(pmap); 200f64122c1SDave Airlie 201f64122c1SDave Airlie (void) ttm_mem_io_lock(man, false); 202f64122c1SDave Airlie ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); 203f64122c1SDave Airlie ttm_mem_io_unlock(man); 204f64122c1SDave Airlie return ; 205f64122c1SDave Airlie fallback: 206f64122c1SDave Airlie qxl_bo_kunmap(bo); 207f64122c1SDave Airlie } 208f64122c1SDave Airlie 209f64122c1SDave Airlie void qxl_bo_unref(struct qxl_bo **bo) 210f64122c1SDave Airlie { 211f64122c1SDave Airlie if ((*bo) == NULL) 212f64122c1SDave Airlie return; 213e07154e2SFrediano Ziglio 214e07154e2SFrediano Ziglio drm_gem_object_unreference_unlocked(&(*bo)->gem_base); 215f64122c1SDave Airlie *bo = NULL; 216f64122c1SDave Airlie } 217f64122c1SDave Airlie 218f64122c1SDave Airlie struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) 219f64122c1SDave Airlie { 220e07154e2SFrediano Ziglio drm_gem_object_reference(&bo->gem_base); 221f64122c1SDave Airlie return bo; 222f64122c1SDave Airlie } 223f64122c1SDave Airlie 224f64122c1SDave Airlie int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 225f64122c1SDave Airlie { 226f64122c1SDave Airlie struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 2274f49ec92SDave Airlie int r; 228f64122c1SDave Airlie 229f64122c1SDave Airlie if (bo->pin_count) { 230f64122c1SDave Airlie bo->pin_count++; 231f64122c1SDave Airlie if (gpu_addr) 232f64122c1SDave Airlie *gpu_addr = qxl_bo_gpu_offset(bo); 233f64122c1SDave Airlie return 0; 234f64122c1SDave Airlie } 2354f49ec92SDave Airlie qxl_ttm_placement_from_domain(bo, domain, true); 236f64122c1SDave Airlie r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 237f64122c1SDave Airlie if (likely(r == 0)) { 238f64122c1SDave Airlie bo->pin_count = 1; 239f64122c1SDave Airlie if (gpu_addr != NULL) 240f64122c1SDave Airlie *gpu_addr = qxl_bo_gpu_offset(bo); 241f64122c1SDave Airlie } 242f64122c1SDave Airlie if (unlikely(r != 0)) 243f64122c1SDave Airlie dev_err(qdev->dev, "%p pin failed\n", bo); 244f64122c1SDave Airlie return r; 245f64122c1SDave Airlie } 246f64122c1SDave Airlie 247f64122c1SDave Airlie int qxl_bo_unpin(struct qxl_bo *bo) 248f64122c1SDave Airlie { 249f64122c1SDave Airlie struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 250f64122c1SDave Airlie int r, i; 251f64122c1SDave Airlie 252f64122c1SDave Airlie if (!bo->pin_count) { 253f64122c1SDave Airlie dev_warn(qdev->dev, "%p unpin not necessary\n", bo); 254f64122c1SDave Airlie return 0; 255f64122c1SDave Airlie } 256f64122c1SDave Airlie bo->pin_count--; 257f64122c1SDave Airlie if (bo->pin_count) 258f64122c1SDave Airlie return 0; 259f64122c1SDave Airlie for (i = 0; i < bo->placement.num_placement; i++) 260f1217ed0SChristian König bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 261f64122c1SDave Airlie r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 262f64122c1SDave Airlie if (unlikely(r != 0)) 263f64122c1SDave Airlie dev_err(qdev->dev, "%p validate failed for unpin\n", bo); 264f64122c1SDave Airlie return r; 265f64122c1SDave Airlie } 266f64122c1SDave Airlie 267f64122c1SDave Airlie void qxl_bo_force_delete(struct qxl_device *qdev) 268f64122c1SDave Airlie { 269f64122c1SDave Airlie struct qxl_bo *bo, *n; 270f64122c1SDave Airlie 271f64122c1SDave Airlie if (list_empty(&qdev->gem.objects)) 272f64122c1SDave Airlie return; 273f64122c1SDave Airlie dev_err(qdev->dev, "Userspace still has active objects !\n"); 274f64122c1SDave Airlie list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 275f64122c1SDave Airlie mutex_lock(&qdev->ddev->struct_mutex); 276f64122c1SDave Airlie dev_err(qdev->dev, "%p %p %lu %lu force free\n", 277f64122c1SDave Airlie &bo->gem_base, bo, (unsigned long)bo->gem_base.size, 278f64122c1SDave Airlie *((unsigned long *)&bo->gem_base.refcount)); 279f64122c1SDave Airlie mutex_lock(&qdev->gem.mutex); 280f64122c1SDave Airlie list_del_init(&bo->list); 281f64122c1SDave Airlie mutex_unlock(&qdev->gem.mutex); 282f64122c1SDave Airlie /* this should unref the ttm bo */ 283f64122c1SDave Airlie drm_gem_object_unreference(&bo->gem_base); 284f64122c1SDave Airlie mutex_unlock(&qdev->ddev->struct_mutex); 285f64122c1SDave Airlie } 286f64122c1SDave Airlie } 287f64122c1SDave Airlie 288f64122c1SDave Airlie int qxl_bo_init(struct qxl_device *qdev) 289f64122c1SDave Airlie { 290f64122c1SDave Airlie return qxl_ttm_init(qdev); 291f64122c1SDave Airlie } 292f64122c1SDave Airlie 293f64122c1SDave Airlie void qxl_bo_fini(struct qxl_device *qdev) 294f64122c1SDave Airlie { 295f64122c1SDave Airlie qxl_ttm_fini(qdev); 296f64122c1SDave Airlie } 297f64122c1SDave Airlie 298f64122c1SDave Airlie int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) 299f64122c1SDave Airlie { 300f64122c1SDave Airlie int ret; 301f64122c1SDave Airlie if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { 302f64122c1SDave Airlie /* allocate a surface id for this surface now */ 303f64122c1SDave Airlie ret = qxl_surface_id_alloc(qdev, bo); 304f64122c1SDave Airlie if (ret) 305f64122c1SDave Airlie return ret; 306f64122c1SDave Airlie 307f64122c1SDave Airlie ret = qxl_hw_surface_alloc(qdev, bo, NULL); 308f64122c1SDave Airlie if (ret) 309f64122c1SDave Airlie return ret; 310f64122c1SDave Airlie } 311f64122c1SDave Airlie return 0; 312f64122c1SDave Airlie } 313f64122c1SDave Airlie 314b86487a6SDave Airlie int qxl_surf_evict(struct qxl_device *qdev) 315b86487a6SDave Airlie { 316b86487a6SDave Airlie return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); 317b86487a6SDave Airlie } 318d84300bfSDave Airlie 319d84300bfSDave Airlie int qxl_vram_evict(struct qxl_device *qdev) 320d84300bfSDave Airlie { 321d84300bfSDave Airlie return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); 322d84300bfSDave Airlie } 323