1 #include <linux/pagemap.h> 2 #include <linux/slab.h> 3 4 #include "nouveau_drm.h" 5 #include "nouveau_ttm.h" 6 7 struct nouveau_sgdma_be { 8 /* this has to be the first field so populate/unpopulated in 9 * nouve_bo.c works properly, otherwise have to move them here 10 */ 11 struct ttm_dma_tt ttm; 12 struct drm_device *dev; 13 struct nouveau_mem *node; 14 }; 15 16 static void 17 nouveau_sgdma_destroy(struct ttm_tt *ttm) 18 { 19 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 20 21 if (ttm) { 22 ttm_dma_tt_fini(&nvbe->ttm); 23 kfree(nvbe); 24 } 25 } 26 27 static int 28 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 29 { 30 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 31 struct nouveau_mem *node = mem->mm_node; 32 33 if (ttm->sg) { 34 node->sg = ttm->sg; 35 node->pages = NULL; 36 } else { 37 node->sg = NULL; 38 node->pages = nvbe->ttm.dma_address; 39 } 40 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; 41 42 nouveau_vm_map(&node->vma[0], node); 43 nvbe->node = node; 44 return 0; 45 } 46 47 static int 48 nv04_sgdma_unbind(struct ttm_tt *ttm) 49 { 50 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 51 nouveau_vm_unmap(&nvbe->node->vma[0]); 52 return 0; 53 } 54 55 static struct ttm_backend_func nv04_sgdma_backend = { 56 .bind = nv04_sgdma_bind, 57 .unbind = nv04_sgdma_unbind, 58 .destroy = nouveau_sgdma_destroy 59 }; 60 61 static int 62 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 63 { 64 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 65 struct nouveau_mem *node = mem->mm_node; 66 67 /* noop: bound in move_notify() */ 68 if (ttm->sg) { 69 node->sg = ttm->sg; 70 node->pages = NULL; 71 } else { 72 node->sg = NULL; 73 node->pages = nvbe->ttm.dma_address; 74 } 75 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; 76 return 0; 77 } 78 79 static int 80 nv50_sgdma_unbind(struct ttm_tt *ttm) 81 { 82 /* noop: unbound in move_notify() */ 83 return 0; 84 } 85 86 static struct ttm_backend_func nv50_sgdma_backend = { 87 .bind = nv50_sgdma_bind, 88 .unbind = nv50_sgdma_unbind, 89 .destroy = nouveau_sgdma_destroy 90 }; 91 92 struct ttm_tt * 93 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, 94 unsigned long size, uint32_t page_flags, 95 struct page *dummy_read_page) 96 { 97 struct nouveau_drm *drm = nouveau_bdev(bdev); 98 struct nouveau_sgdma_be *nvbe; 99 100 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 101 if (!nvbe) 102 return NULL; 103 104 nvbe->dev = drm->dev; 105 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) 106 nvbe->ttm.ttm.func = &nv04_sgdma_backend; 107 else 108 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 109 110 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) 111 return NULL; 112 return &nvbe->ttm.ttm; 113 } 114