1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_vm.h" 27 #include "amdgpu_atomfirmware.h" 28 #include "atom.h" 29 30 struct amdgpu_vram_mgr { 31 struct drm_mm mm; 32 spinlock_t lock; 33 atomic64_t usage; 34 atomic64_t vis_usage; 35 }; 36 37 /** 38 * DOC: mem_info_vram_total 39 * 40 * The amdgpu driver provides a sysfs API for reporting current total VRAM 41 * available on the device 42 * The file mem_info_vram_total is used for this and returns the total 43 * amount of VRAM in bytes 44 */ 45 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, 46 struct device_attribute *attr, char *buf) 47 { 48 struct drm_device *ddev = dev_get_drvdata(dev); 49 struct amdgpu_device *adev = ddev->dev_private; 50 51 return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); 52 } 53 54 /** 55 * DOC: mem_info_vis_vram_total 56 * 57 * The amdgpu driver provides a sysfs API for reporting current total 58 * visible VRAM available on the device 59 * The file mem_info_vis_vram_total is used for this and returns the total 60 * amount of visible VRAM in bytes 61 */ 62 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, 63 struct device_attribute *attr, char *buf) 64 { 65 struct drm_device *ddev = dev_get_drvdata(dev); 66 struct amdgpu_device *adev = ddev->dev_private; 67 68 return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size); 69 } 70 71 /** 72 * DOC: mem_info_vram_used 73 * 74 * The amdgpu driver provides a sysfs API for reporting current total VRAM 75 * available on the device 76 * The file mem_info_vram_used is used for this and returns the total 77 * amount of currently used VRAM in bytes 78 */ 79 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, 80 struct device_attribute *attr, char *buf) 81 { 82 struct drm_device *ddev = dev_get_drvdata(dev); 83 struct amdgpu_device *adev = ddev->dev_private; 84 85 return snprintf(buf, PAGE_SIZE, "%llu\n", 86 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); 87 } 88 89 /** 90 * DOC: mem_info_vis_vram_used 91 * 92 * The amdgpu driver provides a sysfs API for reporting current total of 93 * used visible VRAM 94 * The file mem_info_vis_vram_used is used for this and returns the total 95 * amount of currently used visible VRAM in bytes 96 */ 97 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, 98 struct device_attribute *attr, char *buf) 99 { 100 struct drm_device *ddev = dev_get_drvdata(dev); 101 struct amdgpu_device *adev = ddev->dev_private; 102 103 return snprintf(buf, PAGE_SIZE, "%llu\n", 104 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); 105 } 106 107 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, 108 struct device_attribute *attr, 109 char *buf) 110 { 111 struct drm_device *ddev = dev_get_drvdata(dev); 112 struct amdgpu_device *adev = ddev->dev_private; 113 114 switch (adev->gmc.vram_vendor) { 115 case SAMSUNG: 116 return snprintf(buf, PAGE_SIZE, "samsung\n"); 117 case INFINEON: 118 return snprintf(buf, PAGE_SIZE, "infineon\n"); 119 case ELPIDA: 120 return snprintf(buf, PAGE_SIZE, "elpida\n"); 121 case ETRON: 122 return snprintf(buf, PAGE_SIZE, "etron\n"); 123 case NANYA: 124 return snprintf(buf, PAGE_SIZE, "nanya\n"); 125 case HYNIX: 126 return snprintf(buf, PAGE_SIZE, "hynix\n"); 127 case MOSEL: 128 return snprintf(buf, PAGE_SIZE, "mosel\n"); 129 case WINBOND: 130 return snprintf(buf, PAGE_SIZE, "winbond\n"); 131 case ESMT: 132 return snprintf(buf, PAGE_SIZE, "esmt\n"); 133 case MICRON: 134 return snprintf(buf, PAGE_SIZE, "micron\n"); 135 default: 136 return snprintf(buf, PAGE_SIZE, "unknown\n"); 137 } 138 } 139 140 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, 141 amdgpu_mem_info_vram_total_show, NULL); 142 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, 143 amdgpu_mem_info_vis_vram_total_show,NULL); 144 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, 145 amdgpu_mem_info_vram_used_show, NULL); 146 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, 147 amdgpu_mem_info_vis_vram_used_show, NULL); 148 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, 149 amdgpu_mem_info_vram_vendor, NULL); 150 151 /** 152 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM 153 * 154 * @man: TTM memory type manager 155 * @p_size: maximum size of VRAM 156 * 157 * Allocate and initialize the VRAM manager. 158 */ 159 static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, 160 unsigned long p_size) 161 { 162 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 163 struct amdgpu_vram_mgr *mgr; 164 int ret; 165 166 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 167 if (!mgr) 168 return -ENOMEM; 169 170 drm_mm_init(&mgr->mm, 0, p_size); 171 spin_lock_init(&mgr->lock); 172 man->priv = mgr; 173 174 /* Add the two VRAM-related sysfs files */ 175 ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total); 176 if (ret) { 177 DRM_ERROR("Failed to create device file mem_info_vram_total\n"); 178 return ret; 179 } 180 ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total); 181 if (ret) { 182 DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n"); 183 return ret; 184 } 185 ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used); 186 if (ret) { 187 DRM_ERROR("Failed to create device file mem_info_vram_used\n"); 188 return ret; 189 } 190 ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used); 191 if (ret) { 192 DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n"); 193 return ret; 194 } 195 ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor); 196 if (ret) { 197 DRM_ERROR("Failed to create device file mem_info_vram_vendor\n"); 198 return ret; 199 } 200 201 return 0; 202 } 203 204 /** 205 * amdgpu_vram_mgr_fini - free and destroy VRAM manager 206 * 207 * @man: TTM memory type manager 208 * 209 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still 210 * allocated inside it. 211 */ 212 static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) 213 { 214 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 215 struct amdgpu_vram_mgr *mgr = man->priv; 216 217 spin_lock(&mgr->lock); 218 drm_mm_takedown(&mgr->mm); 219 spin_unlock(&mgr->lock); 220 kfree(mgr); 221 man->priv = NULL; 222 device_remove_file(adev->dev, &dev_attr_mem_info_vram_total); 223 device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total); 224 device_remove_file(adev->dev, &dev_attr_mem_info_vram_used); 225 device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used); 226 device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor); 227 return 0; 228 } 229 230 /** 231 * amdgpu_vram_mgr_vis_size - Calculate visible node size 232 * 233 * @adev: amdgpu device structure 234 * @node: MM node structure 235 * 236 * Calculate how many bytes of the MM node are inside visible VRAM 237 */ 238 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, 239 struct drm_mm_node *node) 240 { 241 uint64_t start = node->start << PAGE_SHIFT; 242 uint64_t end = (node->size + node->start) << PAGE_SHIFT; 243 244 if (start >= adev->gmc.visible_vram_size) 245 return 0; 246 247 return (end > adev->gmc.visible_vram_size ? 248 adev->gmc.visible_vram_size : end) - start; 249 } 250 251 /** 252 * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size 253 * 254 * @bo: &amdgpu_bo buffer object (must be in VRAM) 255 * 256 * Returns: 257 * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM. 258 */ 259 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) 260 { 261 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 262 struct ttm_mem_reg *mem = &bo->tbo.mem; 263 struct drm_mm_node *nodes = mem->mm_node; 264 unsigned pages = mem->num_pages; 265 u64 usage; 266 267 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 268 return amdgpu_bo_size(bo); 269 270 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 271 return 0; 272 273 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) 274 usage += amdgpu_vram_mgr_vis_size(adev, nodes); 275 276 return usage; 277 } 278 279 /** 280 * amdgpu_vram_mgr_virt_start - update virtual start address 281 * 282 * @mem: ttm_mem_reg to update 283 * @node: just allocated node 284 * 285 * Calculate a virtual BO start address to easily check if everything is CPU 286 * accessible. 287 */ 288 static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, 289 struct drm_mm_node *node) 290 { 291 unsigned long start; 292 293 start = node->start + node->size; 294 if (start > mem->num_pages) 295 start -= mem->num_pages; 296 else 297 start = 0; 298 mem->start = max(mem->start, start); 299 } 300 301 /** 302 * amdgpu_vram_mgr_new - allocate new ranges 303 * 304 * @man: TTM memory type manager 305 * @tbo: TTM BO we need this range for 306 * @place: placement flags and restrictions 307 * @mem: the resulting mem object 308 * 309 * Allocate VRAM for the given BO. 310 */ 311 static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, 312 struct ttm_buffer_object *tbo, 313 const struct ttm_place *place, 314 struct ttm_mem_reg *mem) 315 { 316 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 317 struct amdgpu_vram_mgr *mgr = man->priv; 318 struct drm_mm *mm = &mgr->mm; 319 struct drm_mm_node *nodes; 320 enum drm_mm_insert_mode mode; 321 unsigned long lpfn, num_nodes, pages_per_node, pages_left; 322 uint64_t vis_usage = 0, mem_bytes, max_bytes; 323 unsigned i; 324 int r; 325 326 lpfn = place->lpfn; 327 if (!lpfn) 328 lpfn = man->size; 329 330 max_bytes = adev->gmc.mc_vram_size; 331 if (tbo->type != ttm_bo_type_kernel) 332 max_bytes -= AMDGPU_VM_RESERVED_VRAM; 333 334 /* bail out quickly if there's likely not enough VRAM for this BO */ 335 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; 336 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { 337 atomic64_sub(mem_bytes, &mgr->usage); 338 mem->mm_node = NULL; 339 return 0; 340 } 341 342 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 343 pages_per_node = ~0ul; 344 num_nodes = 1; 345 } else { 346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 347 pages_per_node = HPAGE_PMD_NR; 348 #else 349 /* default to 2MB */ 350 pages_per_node = (2UL << (20UL - PAGE_SHIFT)); 351 #endif 352 pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment); 353 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 354 } 355 356 nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), 357 GFP_KERNEL | __GFP_ZERO); 358 if (!nodes) { 359 atomic64_sub(mem_bytes, &mgr->usage); 360 return -ENOMEM; 361 } 362 363 mode = DRM_MM_INSERT_BEST; 364 if (place->flags & TTM_PL_FLAG_TOPDOWN) 365 mode = DRM_MM_INSERT_HIGH; 366 367 mem->start = 0; 368 pages_left = mem->num_pages; 369 370 spin_lock(&mgr->lock); 371 for (i = 0; pages_left >= pages_per_node; ++i) { 372 unsigned long pages = rounddown_pow_of_two(pages_left); 373 374 r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, 375 pages_per_node, 0, 376 place->fpfn, lpfn, 377 mode); 378 if (unlikely(r)) 379 break; 380 381 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); 382 amdgpu_vram_mgr_virt_start(mem, &nodes[i]); 383 pages_left -= pages; 384 } 385 386 for (; pages_left; ++i) { 387 unsigned long pages = min(pages_left, pages_per_node); 388 uint32_t alignment = mem->page_alignment; 389 390 if (pages == pages_per_node) 391 alignment = pages_per_node; 392 393 r = drm_mm_insert_node_in_range(mm, &nodes[i], 394 pages, alignment, 0, 395 place->fpfn, lpfn, 396 mode); 397 if (unlikely(r)) 398 goto error; 399 400 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); 401 amdgpu_vram_mgr_virt_start(mem, &nodes[i]); 402 pages_left -= pages; 403 } 404 spin_unlock(&mgr->lock); 405 406 atomic64_add(vis_usage, &mgr->vis_usage); 407 408 mem->mm_node = nodes; 409 410 return 0; 411 412 error: 413 while (i--) 414 drm_mm_remove_node(&nodes[i]); 415 spin_unlock(&mgr->lock); 416 atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); 417 418 kvfree(nodes); 419 return r == -ENOSPC ? 0 : r; 420 } 421 422 /** 423 * amdgpu_vram_mgr_del - free ranges 424 * 425 * @man: TTM memory type manager 426 * @tbo: TTM BO we need this range for 427 * @place: placement flags and restrictions 428 * @mem: TTM memory object 429 * 430 * Free the allocated VRAM again. 431 */ 432 static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, 433 struct ttm_mem_reg *mem) 434 { 435 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); 436 struct amdgpu_vram_mgr *mgr = man->priv; 437 struct drm_mm_node *nodes = mem->mm_node; 438 uint64_t usage = 0, vis_usage = 0; 439 unsigned pages = mem->num_pages; 440 441 if (!mem->mm_node) 442 return; 443 444 spin_lock(&mgr->lock); 445 while (pages) { 446 pages -= nodes->size; 447 drm_mm_remove_node(nodes); 448 usage += nodes->size << PAGE_SHIFT; 449 vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); 450 ++nodes; 451 } 452 spin_unlock(&mgr->lock); 453 454 atomic64_sub(usage, &mgr->usage); 455 atomic64_sub(vis_usage, &mgr->vis_usage); 456 457 kvfree(mem->mm_node); 458 mem->mm_node = NULL; 459 } 460 461 /** 462 * amdgpu_vram_mgr_usage - how many bytes are used in this domain 463 * 464 * @man: TTM memory type manager 465 * 466 * Returns how many bytes are used in this domain. 467 */ 468 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) 469 { 470 struct amdgpu_vram_mgr *mgr = man->priv; 471 472 return atomic64_read(&mgr->usage); 473 } 474 475 /** 476 * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part 477 * 478 * @man: TTM memory type manager 479 * 480 * Returns how many bytes are used in the visible part of VRAM 481 */ 482 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) 483 { 484 struct amdgpu_vram_mgr *mgr = man->priv; 485 486 return atomic64_read(&mgr->vis_usage); 487 } 488 489 /** 490 * amdgpu_vram_mgr_debug - dump VRAM table 491 * 492 * @man: TTM memory type manager 493 * @printer: DRM printer to use 494 * 495 * Dump the table content using printk. 496 */ 497 static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, 498 struct drm_printer *printer) 499 { 500 struct amdgpu_vram_mgr *mgr = man->priv; 501 502 spin_lock(&mgr->lock); 503 drm_mm_print(&mgr->mm, printer); 504 spin_unlock(&mgr->lock); 505 506 drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", 507 man->size, amdgpu_vram_mgr_usage(man) >> 20, 508 amdgpu_vram_mgr_vis_usage(man) >> 20); 509 } 510 511 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { 512 .init = amdgpu_vram_mgr_init, 513 .takedown = amdgpu_vram_mgr_fini, 514 .get_node = amdgpu_vram_mgr_new, 515 .put_node = amdgpu_vram_mgr_del, 516 .debug = amdgpu_vram_mgr_debug 517 }; 518