xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c (revision ac8f933664c3a0e2d42f6ee9a2a6d25f87cb23f6)
1  /*
2   * Copyright 2008 Advanced Micro Devices, Inc.
3   * Copyright 2008 Red Hat Inc.
4   * Copyright 2009 Jerome Glisse.
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the "Software"),
8   * to deal in the Software without restriction, including without limitation
9   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10   * and/or sell copies of the Software, and to permit persons to whom the
11   * Software is furnished to do so, subject to the following conditions:
12   *
13   * The above copyright notice and this permission notice shall be included in
14   * all copies or substantial portions of the Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22   * OTHER DEALINGS IN THE SOFTWARE.
23   *
24   * Authors: Dave Airlie
25   *          Alex Deucher
26   *          Jerome Glisse
27   */
28  
29  #include <linux/pci.h>
30  #include <linux/vmalloc.h>
31  
32  #include <drm/amdgpu_drm.h>
33  #ifdef CONFIG_X86
34  #include <asm/set_memory.h>
35  #endif
36  #include "amdgpu.h"
37  #include "amdgpu_reset.h"
38  #include <drm/drm_drv.h>
39  #include <drm/ttm/ttm_tt.h>
40  
41  /*
42   * GART
43   * The GART (Graphics Aperture Remapping Table) is an aperture
44   * in the GPU's address space.  System pages can be mapped into
45   * the aperture and look like contiguous pages from the GPU's
46   * perspective.  A page table maps the pages in the aperture
47   * to the actual backing pages in system memory.
48   *
49   * Radeon GPUs support both an internal GART, as described above,
50   * and AGP.  AGP works similarly, but the GART table is configured
51   * and maintained by the northbridge rather than the driver.
52   * Radeon hw has a separate AGP aperture that is programmed to
53   * point to the AGP aperture provided by the northbridge and the
54   * requests are passed through to the northbridge aperture.
55   * Both AGP and internal GART can be used at the same time, however
56   * that is not currently supported by the driver.
57   *
58   * This file handles the common internal GART management.
59   */
60  
61  /*
62   * Common GART table functions.
63   */
64  
65  /**
66   * amdgpu_gart_dummy_page_init - init dummy page used by the driver
67   *
68   * @adev: amdgpu_device pointer
69   *
70   * Allocate the dummy page used by the driver (all asics).
71   * This dummy page is used by the driver as a filler for gart entries
72   * when pages are taken out of the GART
73   * Returns 0 on sucess, -ENOMEM on failure.
74   */
amdgpu_gart_dummy_page_init(struct amdgpu_device * adev)75  static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
76  {
77  	struct page *dummy_page = ttm_glob.dummy_read_page;
78  
79  	if (adev->dummy_page_addr)
80  		return 0;
81  	adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
82  					     PAGE_SIZE, DMA_BIDIRECTIONAL);
83  	if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
84  		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
85  		adev->dummy_page_addr = 0;
86  		return -ENOMEM;
87  	}
88  	return 0;
89  }
90  
91  /**
92   * amdgpu_gart_dummy_page_fini - free dummy page used by the driver
93   *
94   * @adev: amdgpu_device pointer
95   *
96   * Frees the dummy page used by the driver (all asics).
97   */
amdgpu_gart_dummy_page_fini(struct amdgpu_device * adev)98  void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
99  {
100  	if (!adev->dummy_page_addr)
101  		return;
102  	dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
103  		       DMA_BIDIRECTIONAL);
104  	adev->dummy_page_addr = 0;
105  }
106  
107  /**
108   * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
109   *
110   * @adev: amdgpu_device pointer
111   *
112   * Allocate system memory for GART page table for ASICs that don't have
113   * dedicated VRAM.
114   * Returns 0 for success, error for failure.
115   */
amdgpu_gart_table_ram_alloc(struct amdgpu_device * adev)116  int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
117  {
118  	unsigned int order = get_order(adev->gart.table_size);
119  	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
120  	struct amdgpu_bo *bo = NULL;
121  	struct sg_table *sg = NULL;
122  	struct amdgpu_bo_param bp;
123  	dma_addr_t dma_addr;
124  	struct page *p;
125  	int ret;
126  
127  	if (adev->gart.bo != NULL)
128  		return 0;
129  
130  	p = alloc_pages(gfp_flags, order);
131  	if (!p)
132  		return -ENOMEM;
133  
134  	/* If the hardware does not support UTCL2 snooping of the CPU caches
135  	 * then set_memory_wc() could be used as a workaround to mark the pages
136  	 * as write combine memory.
137  	 */
138  	dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size,
139  				DMA_BIDIRECTIONAL);
140  	if (dma_mapping_error(&adev->pdev->dev, dma_addr)) {
141  		dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n");
142  		__free_pages(p, order);
143  		p = NULL;
144  		return -EFAULT;
145  	}
146  
147  	dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr);
148  	/* Create SG table */
149  	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
150  	if (!sg) {
151  		ret = -ENOMEM;
152  		goto error;
153  	}
154  	ret = sg_alloc_table(sg, 1, GFP_KERNEL);
155  	if (ret)
156  		goto error;
157  
158  	sg_dma_address(sg->sgl) = dma_addr;
159  	sg->sgl->length = adev->gart.table_size;
160  #ifdef CONFIG_NEED_SG_DMA_LENGTH
161  	sg->sgl->dma_length = adev->gart.table_size;
162  #endif
163  	/* Create SG BO */
164  	memset(&bp, 0, sizeof(bp));
165  	bp.size = adev->gart.table_size;
166  	bp.byte_align = PAGE_SIZE;
167  	bp.domain = AMDGPU_GEM_DOMAIN_CPU;
168  	bp.type = ttm_bo_type_sg;
169  	bp.resv = NULL;
170  	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
171  	bp.flags = 0;
172  	ret = amdgpu_bo_create(adev, &bp, &bo);
173  	if (ret)
174  		goto error;
175  
176  	bo->tbo.sg = sg;
177  	bo->tbo.ttm->sg = sg;
178  	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
179  	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
180  
181  	ret = amdgpu_bo_reserve(bo, true);
182  	if (ret) {
183  		dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret);
184  		goto error;
185  	}
186  
187  	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
188  	WARN(ret, "Pinning the GART table failed");
189  	if (ret)
190  		goto error_resv;
191  
192  	adev->gart.bo = bo;
193  	adev->gart.ptr = page_to_virt(p);
194  	/* Make GART table accessible in VMID0 */
195  	ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo);
196  	if (ret)
197  		amdgpu_gart_table_ram_free(adev);
198  	amdgpu_bo_unreserve(bo);
199  
200  	return 0;
201  
202  error_resv:
203  	amdgpu_bo_unreserve(bo);
204  error:
205  	amdgpu_bo_unref(&bo);
206  	if (sg) {
207  		sg_free_table(sg);
208  		kfree(sg);
209  	}
210  	__free_pages(p, order);
211  	return ret;
212  }
213  
214  /**
215   * amdgpu_gart_table_ram_free - free gart page table system ram
216   *
217   * @adev: amdgpu_device pointer
218   *
219   * Free the system memory used for the GART page tableon ASICs that don't
220   * have dedicated VRAM.
221   */
amdgpu_gart_table_ram_free(struct amdgpu_device * adev)222  void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
223  {
224  	unsigned int order = get_order(adev->gart.table_size);
225  	struct sg_table *sg = adev->gart.bo->tbo.sg;
226  	struct page *p;
227  	int ret;
228  
229  	ret = amdgpu_bo_reserve(adev->gart.bo, false);
230  	if (!ret) {
231  		amdgpu_bo_unpin(adev->gart.bo);
232  		amdgpu_bo_unreserve(adev->gart.bo);
233  	}
234  	amdgpu_bo_unref(&adev->gart.bo);
235  	sg_free_table(sg);
236  	kfree(sg);
237  	p = virt_to_page(adev->gart.ptr);
238  	__free_pages(p, order);
239  
240  	adev->gart.ptr = NULL;
241  }
242  
243  /**
244   * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
245   *
246   * @adev: amdgpu_device pointer
247   *
248   * Allocate video memory for GART page table
249   * (pcie r4xx, r5xx+).  These asics require the
250   * gart table to be in video memory.
251   * Returns 0 for success, error for failure.
252   */
amdgpu_gart_table_vram_alloc(struct amdgpu_device * adev)253  int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
254  {
255  	if (adev->gart.bo != NULL)
256  		return 0;
257  
258  	return amdgpu_bo_create_kernel(adev,  adev->gart.table_size, PAGE_SIZE,
259  				       AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
260  				       NULL, (void *)&adev->gart.ptr);
261  }
262  
263  /**
264   * amdgpu_gart_table_vram_free - free gart page table vram
265   *
266   * @adev: amdgpu_device pointer
267   *
268   * Free the video memory used for the GART page table
269   * (pcie r4xx, r5xx+).  These asics require the gart table to
270   * be in video memory.
271   */
amdgpu_gart_table_vram_free(struct amdgpu_device * adev)272  void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
273  {
274  	amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
275  }
276  
277  /*
278   * Common gart functions.
279   */
280  /**
281   * amdgpu_gart_unbind - unbind pages from the gart page table
282   *
283   * @adev: amdgpu_device pointer
284   * @offset: offset into the GPU's gart aperture
285   * @pages: number of pages to unbind
286   *
287   * Unbinds the requested pages from the gart page table and
288   * replaces them with the dummy page (all asics).
289   * Returns 0 for success, -EINVAL for failure.
290   */
amdgpu_gart_unbind(struct amdgpu_device * adev,uint64_t offset,int pages)291  void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
292  			int pages)
293  {
294  	unsigned t;
295  	unsigned p;
296  	int i, j;
297  	u64 page_base;
298  	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
299  	uint64_t flags = 0;
300  	int idx;
301  
302  	if (!adev->gart.ptr)
303  		return;
304  
305  	if (!drm_dev_enter(adev_to_drm(adev), &idx))
306  		return;
307  
308  	t = offset / AMDGPU_GPU_PAGE_SIZE;
309  	p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
310  	for (i = 0; i < pages; i++, p++) {
311  		page_base = adev->dummy_page_addr;
312  		if (!adev->gart.ptr)
313  			continue;
314  
315  		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
316  			amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
317  					       t, page_base, flags);
318  			page_base += AMDGPU_GPU_PAGE_SIZE;
319  		}
320  	}
321  	mb();
322  	amdgpu_device_flush_hdp(adev, NULL);
323  	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
324  		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
325  
326  	drm_dev_exit(idx);
327  }
328  
329  /**
330   * amdgpu_gart_map - map dma_addresses into GART entries
331   *
332   * @adev: amdgpu_device pointer
333   * @offset: offset into the GPU's gart aperture
334   * @pages: number of pages to bind
335   * @dma_addr: DMA addresses of pages
336   * @flags: page table entry flags
337   * @dst: CPU address of the gart table
338   *
339   * Map the dma_addresses into GART entries (all asics).
340   * Returns 0 for success, -EINVAL for failure.
341   */
amdgpu_gart_map(struct amdgpu_device * adev,uint64_t offset,int pages,dma_addr_t * dma_addr,uint64_t flags,void * dst)342  void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
343  		    int pages, dma_addr_t *dma_addr, uint64_t flags,
344  		    void *dst)
345  {
346  	uint64_t page_base;
347  	unsigned i, j, t;
348  	int idx;
349  
350  	if (!drm_dev_enter(adev_to_drm(adev), &idx))
351  		return;
352  
353  	t = offset / AMDGPU_GPU_PAGE_SIZE;
354  
355  	for (i = 0; i < pages; i++) {
356  		page_base = dma_addr[i];
357  		for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
358  			amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
359  			page_base += AMDGPU_GPU_PAGE_SIZE;
360  		}
361  	}
362  	drm_dev_exit(idx);
363  }
364  
365  /**
366   * amdgpu_gart_bind - bind pages into the gart page table
367   *
368   * @adev: amdgpu_device pointer
369   * @offset: offset into the GPU's gart aperture
370   * @pages: number of pages to bind
371   * @dma_addr: DMA addresses of pages
372   * @flags: page table entry flags
373   *
374   * Binds the requested pages to the gart page table
375   * (all asics).
376   * Returns 0 for success, -EINVAL for failure.
377   */
amdgpu_gart_bind(struct amdgpu_device * adev,uint64_t offset,int pages,dma_addr_t * dma_addr,uint64_t flags)378  void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
379  		     int pages, dma_addr_t *dma_addr,
380  		     uint64_t flags)
381  {
382  	if (!adev->gart.ptr)
383  		return;
384  
385  	amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
386  }
387  
388  /**
389   * amdgpu_gart_invalidate_tlb - invalidate gart TLB
390   *
391   * @adev: amdgpu device driver pointer
392   *
393   * Invalidate gart TLB which can be use as a way to flush gart changes
394   *
395   */
amdgpu_gart_invalidate_tlb(struct amdgpu_device * adev)396  void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
397  {
398  	int i;
399  
400  	if (!adev->gart.ptr)
401  		return;
402  
403  	mb();
404  	if (down_read_trylock(&adev->reset_domain->sem)) {
405  		amdgpu_device_flush_hdp(adev, NULL);
406  		up_read(&adev->reset_domain->sem);
407  	}
408  	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
409  		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
410  }
411  
412  /**
413   * amdgpu_gart_init - init the driver info for managing the gart
414   *
415   * @adev: amdgpu_device pointer
416   *
417   * Allocate the dummy page and init the gart driver info (all asics).
418   * Returns 0 for success, error for failure.
419   */
amdgpu_gart_init(struct amdgpu_device * adev)420  int amdgpu_gart_init(struct amdgpu_device *adev)
421  {
422  	int r;
423  
424  	if (adev->dummy_page_addr)
425  		return 0;
426  
427  	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
428  	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
429  		DRM_ERROR("Page size is smaller than GPU page size!\n");
430  		return -EINVAL;
431  	}
432  	r = amdgpu_gart_dummy_page_init(adev);
433  	if (r)
434  		return r;
435  	/* Compute table size */
436  	adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
437  	adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
438  	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
439  		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
440  
441  	return 0;
442  }
443