1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include "drmP.h" 29 #include "drm.h" 30 #include "radeon_drm.h" 31 #include "radeon.h" 32 33 int radeon_gem_object_init(struct drm_gem_object *obj) 34 { 35 /* we do nothings here */ 36 return 0; 37 } 38 39 void radeon_gem_object_free(struct drm_gem_object *gobj) 40 { 41 struct radeon_object *robj = gobj->driver_private; 42 43 gobj->driver_private = NULL; 44 if (robj) { 45 radeon_object_unref(&robj); 46 } 47 } 48 49 int radeon_gem_object_create(struct radeon_device *rdev, int size, 50 int alignment, int initial_domain, 51 bool discardable, bool kernel, 52 bool interruptible, 53 struct drm_gem_object **obj) 54 { 55 struct drm_gem_object *gobj; 56 struct radeon_object *robj; 57 int r; 58 59 *obj = NULL; 60 gobj = drm_gem_object_alloc(rdev->ddev, size); 61 if (!gobj) { 62 return -ENOMEM; 63 } 64 /* At least align on page size */ 65 if (alignment < PAGE_SIZE) { 66 alignment = PAGE_SIZE; 67 } 68 r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, 69 interruptible, &robj); 70 if (r) { 71 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", 72 size, initial_domain, alignment); 73 mutex_lock(&rdev->ddev->struct_mutex); 74 drm_gem_object_unreference(gobj); 75 mutex_unlock(&rdev->ddev->struct_mutex); 76 return r; 77 } 78 gobj->driver_private = robj; 79 *obj = gobj; 80 return 0; 81 } 82 83 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 84 uint64_t *gpu_addr) 85 { 86 struct radeon_object *robj = obj->driver_private; 87 uint32_t flags; 88 89 switch (pin_domain) { 90 case RADEON_GEM_DOMAIN_VRAM: 91 flags = TTM_PL_FLAG_VRAM; 92 break; 93 case RADEON_GEM_DOMAIN_GTT: 94 flags = TTM_PL_FLAG_TT; 95 break; 96 default: 97 flags = TTM_PL_FLAG_SYSTEM; 98 break; 99 } 100 return radeon_object_pin(robj, flags, gpu_addr); 101 } 102 103 void radeon_gem_object_unpin(struct drm_gem_object *obj) 104 { 105 struct radeon_object *robj = obj->driver_private; 106 radeon_object_unpin(robj); 107 } 108 109 int radeon_gem_set_domain(struct drm_gem_object *gobj, 110 uint32_t rdomain, uint32_t wdomain) 111 { 112 struct radeon_object *robj; 113 uint32_t domain; 114 int r; 115 116 /* FIXME: reeimplement */ 117 robj = gobj->driver_private; 118 /* work out where to validate the buffer to */ 119 domain = wdomain; 120 if (!domain) { 121 domain = rdomain; 122 } 123 if (!domain) { 124 /* Do nothings */ 125 printk(KERN_WARNING "Set domain withou domain !\n"); 126 return 0; 127 } 128 if (domain == RADEON_GEM_DOMAIN_CPU) { 129 /* Asking for cpu access wait for object idle */ 130 r = radeon_object_wait(robj); 131 if (r) { 132 printk(KERN_ERR "Failed to wait for object !\n"); 133 return r; 134 } 135 } 136 return 0; 137 } 138 139 int radeon_gem_init(struct radeon_device *rdev) 140 { 141 INIT_LIST_HEAD(&rdev->gem.objects); 142 return 0; 143 } 144 145 void radeon_gem_fini(struct radeon_device *rdev) 146 { 147 radeon_object_force_delete(rdev); 148 } 149 150 151 /* 152 * GEM ioctls. 153 */ 154 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 155 struct drm_file *filp) 156 { 157 struct radeon_device *rdev = dev->dev_private; 158 struct drm_radeon_gem_info *args = data; 159 160 args->vram_size = rdev->mc.real_vram_size; 161 /* FIXME: report somethings that makes sense */ 162 args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); 163 args->gart_size = rdev->mc.gtt_size; 164 return 0; 165 } 166 167 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 168 struct drm_file *filp) 169 { 170 /* TODO: implement */ 171 DRM_ERROR("unimplemented %s\n", __func__); 172 return -ENOSYS; 173 } 174 175 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 176 struct drm_file *filp) 177 { 178 /* TODO: implement */ 179 DRM_ERROR("unimplemented %s\n", __func__); 180 return -ENOSYS; 181 } 182 183 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 184 struct drm_file *filp) 185 { 186 struct radeon_device *rdev = dev->dev_private; 187 struct drm_radeon_gem_create *args = data; 188 struct drm_gem_object *gobj; 189 uint32_t handle; 190 int r; 191 192 /* create a gem object to contain this object in */ 193 args->size = roundup(args->size, PAGE_SIZE); 194 r = radeon_gem_object_create(rdev, args->size, args->alignment, 195 args->initial_domain, false, 196 false, true, &gobj); 197 if (r) { 198 return r; 199 } 200 r = drm_gem_handle_create(filp, gobj, &handle); 201 if (r) { 202 mutex_lock(&dev->struct_mutex); 203 drm_gem_object_unreference(gobj); 204 mutex_unlock(&dev->struct_mutex); 205 return r; 206 } 207 mutex_lock(&dev->struct_mutex); 208 drm_gem_object_handle_unreference(gobj); 209 mutex_unlock(&dev->struct_mutex); 210 args->handle = handle; 211 return 0; 212 } 213 214 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 215 struct drm_file *filp) 216 { 217 /* transition the BO to a domain - 218 * just validate the BO into a certain domain */ 219 struct drm_radeon_gem_set_domain *args = data; 220 struct drm_gem_object *gobj; 221 struct radeon_object *robj; 222 int r; 223 224 /* for now if someone requests domain CPU - 225 * just make sure the buffer is finished with */ 226 227 /* just do a BO wait for now */ 228 gobj = drm_gem_object_lookup(dev, filp, args->handle); 229 if (gobj == NULL) { 230 return -EINVAL; 231 } 232 robj = gobj->driver_private; 233 234 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 235 236 mutex_lock(&dev->struct_mutex); 237 drm_gem_object_unreference(gobj); 238 mutex_unlock(&dev->struct_mutex); 239 return r; 240 } 241 242 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 243 struct drm_file *filp) 244 { 245 struct drm_radeon_gem_mmap *args = data; 246 struct drm_gem_object *gobj; 247 struct radeon_object *robj; 248 int r; 249 250 gobj = drm_gem_object_lookup(dev, filp, args->handle); 251 if (gobj == NULL) { 252 return -EINVAL; 253 } 254 robj = gobj->driver_private; 255 r = radeon_object_mmap(robj, &args->addr_ptr); 256 mutex_lock(&dev->struct_mutex); 257 drm_gem_object_unreference(gobj); 258 mutex_unlock(&dev->struct_mutex); 259 return r; 260 } 261 262 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 263 struct drm_file *filp) 264 { 265 struct drm_radeon_gem_busy *args = data; 266 struct drm_gem_object *gobj; 267 struct radeon_object *robj; 268 int r; 269 uint32_t cur_placement; 270 271 gobj = drm_gem_object_lookup(dev, filp, args->handle); 272 if (gobj == NULL) { 273 return -EINVAL; 274 } 275 robj = gobj->driver_private; 276 r = radeon_object_busy_domain(robj, &cur_placement); 277 switch (cur_placement) { 278 case TTM_PL_VRAM: 279 args->domain = RADEON_GEM_DOMAIN_VRAM; 280 break; 281 case TTM_PL_TT: 282 args->domain = RADEON_GEM_DOMAIN_GTT; 283 break; 284 case TTM_PL_SYSTEM: 285 args->domain = RADEON_GEM_DOMAIN_CPU; 286 default: 287 break; 288 } 289 mutex_lock(&dev->struct_mutex); 290 drm_gem_object_unreference(gobj); 291 mutex_unlock(&dev->struct_mutex); 292 return r; 293 } 294 295 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 296 struct drm_file *filp) 297 { 298 struct drm_radeon_gem_wait_idle *args = data; 299 struct drm_gem_object *gobj; 300 struct radeon_object *robj; 301 int r; 302 303 gobj = drm_gem_object_lookup(dev, filp, args->handle); 304 if (gobj == NULL) { 305 return -EINVAL; 306 } 307 robj = gobj->driver_private; 308 r = radeon_object_wait(robj); 309 mutex_lock(&dev->struct_mutex); 310 drm_gem_object_unreference(gobj); 311 mutex_unlock(&dev->struct_mutex); 312 return r; 313 } 314 315 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 316 struct drm_file *filp) 317 { 318 struct drm_radeon_gem_set_tiling *args = data; 319 struct drm_gem_object *gobj; 320 struct radeon_object *robj; 321 int r = 0; 322 323 DRM_DEBUG("%d \n", args->handle); 324 gobj = drm_gem_object_lookup(dev, filp, args->handle); 325 if (gobj == NULL) 326 return -EINVAL; 327 robj = gobj->driver_private; 328 radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); 329 mutex_lock(&dev->struct_mutex); 330 drm_gem_object_unreference(gobj); 331 mutex_unlock(&dev->struct_mutex); 332 return r; 333 } 334 335 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 336 struct drm_file *filp) 337 { 338 struct drm_radeon_gem_get_tiling *args = data; 339 struct drm_gem_object *gobj; 340 struct radeon_object *robj; 341 int r = 0; 342 343 DRM_DEBUG("\n"); 344 gobj = drm_gem_object_lookup(dev, filp, args->handle); 345 if (gobj == NULL) 346 return -EINVAL; 347 robj = gobj->driver_private; 348 radeon_object_get_tiling_flags(robj, &args->tiling_flags, 349 &args->pitch); 350 mutex_lock(&dev->struct_mutex); 351 drm_gem_object_unreference(gobj); 352 mutex_unlock(&dev->struct_mutex); 353 return r; 354 } 355