1 #include <linux/pagemap.h> 2 #include <linux/slab.h> 3 4 #include <subdev/fb.h> 5 6 #include "nouveau_drm.h" 7 #include "nouveau_ttm.h" 8 9 struct nouveau_sgdma_be { 10 /* this has to be the first field so populate/unpopulated in 11 * nouve_bo.c works properly, otherwise have to move them here 12 */ 13 struct ttm_dma_tt ttm; 14 struct drm_device *dev; 15 struct nouveau_mem *node; 16 }; 17 18 static void 19 nouveau_sgdma_destroy(struct ttm_tt *ttm) 20 { 21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 22 23 if (ttm) { 24 ttm_dma_tt_fini(&nvbe->ttm); 25 kfree(nvbe); 26 } 27 } 28 29 static int 30 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 31 { 32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 33 struct nouveau_mem *node = mem->mm_node; 34 35 if (ttm->sg) { 36 node->sg = ttm->sg; 37 node->pages = NULL; 38 } else { 39 node->sg = NULL; 40 node->pages = nvbe->ttm.dma_address; 41 } 42 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; 43 44 nouveau_vm_map(&node->vma[0], node); 45 nvbe->node = node; 46 return 0; 47 } 48 49 static int 50 nv04_sgdma_unbind(struct ttm_tt *ttm) 51 { 52 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 53 nouveau_vm_unmap(&nvbe->node->vma[0]); 54 return 0; 55 } 56 57 static struct ttm_backend_func nv04_sgdma_backend = { 58 .bind = nv04_sgdma_bind, 59 .unbind = nv04_sgdma_unbind, 60 .destroy = nouveau_sgdma_destroy 61 }; 62 63 static int 64 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 65 { 66 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 67 struct nouveau_mem *node = mem->mm_node; 68 69 /* noop: bound in move_notify() */ 70 if (ttm->sg) { 71 node->sg = ttm->sg; 72 node->pages = NULL; 73 } else { 74 node->sg = NULL; 75 node->pages = nvbe->ttm.dma_address; 76 } 77 node->size = (mem->num_pages << PAGE_SHIFT) >> 12; 78 return 0; 79 } 80 81 static int 82 nv50_sgdma_unbind(struct ttm_tt *ttm) 83 { 84 /* noop: unbound in move_notify() */ 85 return 0; 86 } 87 88 static struct ttm_backend_func nv50_sgdma_backend = { 89 .bind = nv50_sgdma_bind, 90 .unbind = nv50_sgdma_unbind, 91 .destroy = nouveau_sgdma_destroy 92 }; 93 94 struct ttm_tt * 95 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, 96 unsigned long size, uint32_t page_flags, 97 struct page *dummy_read_page) 98 { 99 struct nouveau_drm *drm = nouveau_bdev(bdev); 100 struct nouveau_sgdma_be *nvbe; 101 102 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 103 if (!nvbe) 104 return NULL; 105 106 nvbe->dev = drm->dev; 107 if (nv_device(drm->device)->card_type < NV_50) 108 nvbe->ttm.ttm.func = &nv04_sgdma_backend; 109 else 110 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 111 112 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) 113 return NULL; 114 return &nvbe->ttm.ttm; 115 } 116