1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv04.h"
25 
26 #include <core/ramht.h>
27 #include <engine/gr/nv40.h>
28 
29 /******************************************************************************
30  * instmem subdev implementation
31  *****************************************************************************/
32 
33 static u32
34 nv40_instmem_rd32(struct nvkm_instmem *obj, u32 addr)
35 {
36 	struct nv04_instmem *imem = container_of(obj, typeof(*imem), base);
37 	return ioread32_native(imem->iomem + addr);
38 }
39 
40 static void
41 nv40_instmem_wr32(struct nvkm_instmem *obj, u32 addr, u32 data)
42 {
43 	struct nv04_instmem *imem = container_of(obj, typeof(*imem), base);
44 	iowrite32_native(data, imem->iomem + addr);
45 }
46 
47 static const struct nvkm_instmem_func
48 nv40_instmem_func = {
49 	.rd32 = nv40_instmem_rd32,
50 	.wr32 = nv40_instmem_wr32,
51 };
52 
53 static int
54 nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
55 		  struct nvkm_oclass *oclass, void *data, u32 size,
56 		  struct nvkm_object **pobject)
57 {
58 	struct nvkm_device *device = (void *)parent;
59 	struct nv04_instmem *imem;
60 	int ret, bar, vs;
61 
62 	ret = nvkm_instmem_create(parent, engine, oclass, &imem);
63 	*pobject = nv_object(imem);
64 	if (ret)
65 		return ret;
66 
67 	imem->base.func = &nv40_instmem_func;
68 
69 	/* map bar */
70 	if (nv_device_resource_len(device, 2))
71 		bar = 2;
72 	else
73 		bar = 3;
74 
75 	imem->iomem = ioremap(nv_device_resource_start(device, bar),
76 			      nv_device_resource_len(device, bar));
77 	if (!imem->iomem) {
78 		nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
79 		return -EFAULT;
80 	}
81 
82 	/* PRAMIN aperture maps over the end of vram, reserve enough space
83 	 * to fit graphics contexts for every channel, the magics come
84 	 * from engine/gr/nv40.c
85 	 */
86 	vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
87 	if      (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
88 	else if (device->chipset  < 0x43) imem->base.reserved = 0x4f00 * vs;
89 	else if (nv44_gr_class(imem))  imem->base.reserved = 0x4980 * vs;
90 	else				  imem->base.reserved = 0x4a40 * vs;
91 	imem->base.reserved += 16 * 1024;
92 	imem->base.reserved *= 32;		/* per-channel */
93 	imem->base.reserved += 512 * 1024;	/* pci(e)gart table */
94 	imem->base.reserved += 512 * 1024;	/* object storage */
95 
96 	imem->base.reserved = round_up(imem->base.reserved, 4096);
97 
98 	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
99 	if (ret)
100 		return ret;
101 
102 	/* 0x00000-0x10000: reserve for probable vbios image */
103 	ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
104 			      &imem->vbios);
105 	if (ret)
106 		return ret;
107 
108 	/* 0x10000-0x18000: reserve for RAMHT */
109 	ret = nvkm_ramht_new(nv_object(imem), NULL, 0x08000, 0, &imem->ramht);
110 	if (ret)
111 		return ret;
112 
113 	/* 0x18000-0x18200: reserve for RAMRO
114 	 * 0x18200-0x20000: padding
115 	 */
116 	ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x08000, 0, 0,
117 			      &imem->ramro);
118 	if (ret)
119 		return ret;
120 
121 	/* 0x20000-0x21000: reserve for RAMFC
122 	 * 0x21000-0x40000: padding and some unknown crap
123 	 */
124 	ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x20000, 0,
125 			      NVOBJ_FLAG_ZERO_ALLOC, &imem->ramfc);
126 	if (ret)
127 		return ret;
128 
129 	return 0;
130 }
131 
132 struct nvkm_oclass *
133 nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
134 	.base.handle = NV_SUBDEV(INSTMEM, 0x40),
135 	.base.ofuncs = &(struct nvkm_ofuncs) {
136 		.ctor = nv40_instmem_ctor,
137 		.dtor = nv04_instmem_dtor,
138 		.init = _nvkm_instmem_init,
139 		.fini = _nvkm_instmem_fini,
140 	},
141 	.instobj = &nv04_instobj_oclass.base,
142 }.base;
143