1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include "drmP.h" 29 #include "drm.h" 30 #include "radeon_drm.h" 31 #include "radeon.h" 32 33 int radeon_gem_object_init(struct drm_gem_object *obj) 34 { 35 /* we do nothings here */ 36 return 0; 37 } 38 39 void radeon_gem_object_free(struct drm_gem_object *gobj) 40 { 41 struct radeon_bo *robj = gobj->driver_private; 42 43 gobj->driver_private = NULL; 44 if (robj) { 45 radeon_bo_unref(&robj); 46 } 47 48 drm_gem_object_release(gobj); 49 kfree(gobj); 50 } 51 52 int radeon_gem_object_create(struct radeon_device *rdev, int size, 53 int alignment, int initial_domain, 54 bool discardable, bool kernel, 55 struct drm_gem_object **obj) 56 { 57 struct drm_gem_object *gobj; 58 struct radeon_bo *robj; 59 int r; 60 61 *obj = NULL; 62 gobj = drm_gem_object_alloc(rdev->ddev, size); 63 if (!gobj) { 64 return -ENOMEM; 65 } 66 /* At least align on page size */ 67 if (alignment < PAGE_SIZE) { 68 alignment = PAGE_SIZE; 69 } 70 r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); 71 if (r) { 72 if (r != -ERESTARTSYS) 73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 74 size, initial_domain, alignment, r); 75 drm_gem_object_unreference_unlocked(gobj); 76 return r; 77 } 78 gobj->driver_private = robj; 79 *obj = gobj; 80 return 0; 81 } 82 83 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 84 uint64_t *gpu_addr) 85 { 86 struct radeon_bo *robj = obj->driver_private; 87 int r; 88 89 r = radeon_bo_reserve(robj, false); 90 if (unlikely(r != 0)) 91 return r; 92 r = radeon_bo_pin(robj, pin_domain, gpu_addr); 93 radeon_bo_unreserve(robj); 94 return r; 95 } 96 97 void radeon_gem_object_unpin(struct drm_gem_object *obj) 98 { 99 struct radeon_bo *robj = obj->driver_private; 100 int r; 101 102 r = radeon_bo_reserve(robj, false); 103 if (likely(r == 0)) { 104 radeon_bo_unpin(robj); 105 radeon_bo_unreserve(robj); 106 } 107 } 108 109 int radeon_gem_set_domain(struct drm_gem_object *gobj, 110 uint32_t rdomain, uint32_t wdomain) 111 { 112 struct radeon_bo *robj; 113 uint32_t domain; 114 int r; 115 116 /* FIXME: reeimplement */ 117 robj = gobj->driver_private; 118 /* work out where to validate the buffer to */ 119 domain = wdomain; 120 if (!domain) { 121 domain = rdomain; 122 } 123 if (!domain) { 124 /* Do nothings */ 125 printk(KERN_WARNING "Set domain withou domain !\n"); 126 return 0; 127 } 128 if (domain == RADEON_GEM_DOMAIN_CPU) { 129 /* Asking for cpu access wait for object idle */ 130 r = radeon_bo_wait(robj, NULL, false); 131 if (r) { 132 printk(KERN_ERR "Failed to wait for object !\n"); 133 return r; 134 } 135 } 136 return 0; 137 } 138 139 int radeon_gem_init(struct radeon_device *rdev) 140 { 141 INIT_LIST_HEAD(&rdev->gem.objects); 142 return 0; 143 } 144 145 void radeon_gem_fini(struct radeon_device *rdev) 146 { 147 radeon_bo_force_delete(rdev); 148 } 149 150 151 /* 152 * GEM ioctls. 153 */ 154 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 155 struct drm_file *filp) 156 { 157 struct radeon_device *rdev = dev->dev_private; 158 struct drm_radeon_gem_info *args = data; 159 160 args->vram_size = rdev->mc.real_vram_size; 161 args->vram_visible = rdev->mc.real_vram_size; 162 if (rdev->stollen_vga_memory) 163 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 164 args->vram_visible -= radeon_fbdev_total_size(rdev); 165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - 166 RADEON_IB_POOL_SIZE*64*1024; 167 return 0; 168 } 169 170 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 171 struct drm_file *filp) 172 { 173 /* TODO: implement */ 174 DRM_ERROR("unimplemented %s\n", __func__); 175 return -ENOSYS; 176 } 177 178 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 179 struct drm_file *filp) 180 { 181 /* TODO: implement */ 182 DRM_ERROR("unimplemented %s\n", __func__); 183 return -ENOSYS; 184 } 185 186 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 187 struct drm_file *filp) 188 { 189 struct radeon_device *rdev = dev->dev_private; 190 struct drm_radeon_gem_create *args = data; 191 struct drm_gem_object *gobj; 192 uint32_t handle; 193 int r; 194 195 /* create a gem object to contain this object in */ 196 args->size = roundup(args->size, PAGE_SIZE); 197 r = radeon_gem_object_create(rdev, args->size, args->alignment, 198 args->initial_domain, false, 199 false, &gobj); 200 if (r) { 201 return r; 202 } 203 r = drm_gem_handle_create(filp, gobj, &handle); 204 /* drop reference from allocate - handle holds it now */ 205 drm_gem_object_unreference_unlocked(gobj); 206 if (r) { 207 return r; 208 } 209 args->handle = handle; 210 return 0; 211 } 212 213 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 214 struct drm_file *filp) 215 { 216 /* transition the BO to a domain - 217 * just validate the BO into a certain domain */ 218 struct drm_radeon_gem_set_domain *args = data; 219 struct drm_gem_object *gobj; 220 struct radeon_bo *robj; 221 int r; 222 223 /* for now if someone requests domain CPU - 224 * just make sure the buffer is finished with */ 225 226 /* just do a BO wait for now */ 227 gobj = drm_gem_object_lookup(dev, filp, args->handle); 228 if (gobj == NULL) { 229 return -ENOENT; 230 } 231 robj = gobj->driver_private; 232 233 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 234 235 drm_gem_object_unreference_unlocked(gobj); 236 return r; 237 } 238 239 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 240 struct drm_file *filp) 241 { 242 struct drm_radeon_gem_mmap *args = data; 243 struct drm_gem_object *gobj; 244 struct radeon_bo *robj; 245 246 gobj = drm_gem_object_lookup(dev, filp, args->handle); 247 if (gobj == NULL) { 248 return -ENOENT; 249 } 250 robj = gobj->driver_private; 251 args->addr_ptr = radeon_bo_mmap_offset(robj); 252 drm_gem_object_unreference_unlocked(gobj); 253 return 0; 254 } 255 256 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 257 struct drm_file *filp) 258 { 259 struct drm_radeon_gem_busy *args = data; 260 struct drm_gem_object *gobj; 261 struct radeon_bo *robj; 262 int r; 263 uint32_t cur_placement = 0; 264 265 gobj = drm_gem_object_lookup(dev, filp, args->handle); 266 if (gobj == NULL) { 267 return -ENOENT; 268 } 269 robj = gobj->driver_private; 270 r = radeon_bo_wait(robj, &cur_placement, true); 271 switch (cur_placement) { 272 case TTM_PL_VRAM: 273 args->domain = RADEON_GEM_DOMAIN_VRAM; 274 break; 275 case TTM_PL_TT: 276 args->domain = RADEON_GEM_DOMAIN_GTT; 277 break; 278 case TTM_PL_SYSTEM: 279 args->domain = RADEON_GEM_DOMAIN_CPU; 280 default: 281 break; 282 } 283 drm_gem_object_unreference_unlocked(gobj); 284 return r; 285 } 286 287 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 288 struct drm_file *filp) 289 { 290 struct drm_radeon_gem_wait_idle *args = data; 291 struct drm_gem_object *gobj; 292 struct radeon_bo *robj; 293 int r; 294 295 gobj = drm_gem_object_lookup(dev, filp, args->handle); 296 if (gobj == NULL) { 297 return -ENOENT; 298 } 299 robj = gobj->driver_private; 300 r = radeon_bo_wait(robj, NULL, false); 301 /* callback hw specific functions if any */ 302 if (robj->rdev->asic->ioctl_wait_idle) 303 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 304 drm_gem_object_unreference_unlocked(gobj); 305 return r; 306 } 307 308 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 309 struct drm_file *filp) 310 { 311 struct drm_radeon_gem_set_tiling *args = data; 312 struct drm_gem_object *gobj; 313 struct radeon_bo *robj; 314 int r = 0; 315 316 DRM_DEBUG("%d \n", args->handle); 317 gobj = drm_gem_object_lookup(dev, filp, args->handle); 318 if (gobj == NULL) 319 return -ENOENT; 320 robj = gobj->driver_private; 321 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 322 drm_gem_object_unreference_unlocked(gobj); 323 return r; 324 } 325 326 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 327 struct drm_file *filp) 328 { 329 struct drm_radeon_gem_get_tiling *args = data; 330 struct drm_gem_object *gobj; 331 struct radeon_bo *rbo; 332 int r = 0; 333 334 DRM_DEBUG("\n"); 335 gobj = drm_gem_object_lookup(dev, filp, args->handle); 336 if (gobj == NULL) 337 return -ENOENT; 338 rbo = gobj->driver_private; 339 r = radeon_bo_reserve(rbo, false); 340 if (unlikely(r != 0)) 341 goto out; 342 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 343 radeon_bo_unreserve(rbo); 344 out: 345 drm_gem_object_unreference_unlocked(gobj); 346 return r; 347 } 348