1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <core/enum.h> 27 #include <subdev/fb.h> 28 #include <subdev/timer.h> 29 30 void 31 gf100_ltc_cbc_clear(struct nvkm_ltc_priv *ltc, u32 start, u32 limit) 32 { 33 struct nvkm_device *device = ltc->base.subdev.device; 34 nvkm_wr32(device, 0x17e8cc, start); 35 nvkm_wr32(device, 0x17e8d0, limit); 36 nvkm_wr32(device, 0x17e8c8, 0x00000004); 37 } 38 39 void 40 gf100_ltc_cbc_wait(struct nvkm_ltc_priv *ltc) 41 { 42 int c, s; 43 for (c = 0; c < ltc->ltc_nr; c++) { 44 for (s = 0; s < ltc->lts_nr; s++) 45 nv_wait(ltc, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0); 46 } 47 } 48 49 void 50 gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *ltc, int i, const u32 color[4]) 51 { 52 struct nvkm_device *device = ltc->base.subdev.device; 53 nvkm_mask(device, 0x17ea44, 0x0000000f, i); 54 nvkm_wr32(device, 0x17ea48, color[0]); 55 nvkm_wr32(device, 0x17ea4c, color[1]); 56 nvkm_wr32(device, 0x17ea50, color[2]); 57 nvkm_wr32(device, 0x17ea54, color[3]); 58 } 59 60 void 61 gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *ltc, int i, const u32 depth) 62 { 63 struct nvkm_device *device = ltc->base.subdev.device; 64 nvkm_mask(device, 0x17ea44, 0x0000000f, i); 65 nvkm_wr32(device, 0x17ea58, depth); 66 } 67 68 static const struct nvkm_bitfield 69 gf100_ltc_lts_intr_name[] = { 70 { 0x00000001, "IDLE_ERROR_IQ" }, 71 { 0x00000002, "IDLE_ERROR_CBC" }, 72 { 0x00000004, "IDLE_ERROR_TSTG" }, 73 { 0x00000008, "IDLE_ERROR_DSTG" }, 74 { 0x00000010, "EVICTED_CB" }, 75 { 0x00000020, "ILLEGAL_COMPSTAT" }, 76 { 0x00000040, "BLOCKLINEAR_CB" }, 77 { 0x00000100, "ECC_SEC_ERROR" }, 78 { 0x00000200, "ECC_DED_ERROR" }, 79 { 0x00000400, "DEBUG" }, 80 { 0x00000800, "ATOMIC_TO_Z" }, 81 { 0x00001000, "ILLEGAL_ATOMIC" }, 82 { 0x00002000, "BLKACTIVITY_ERR" }, 83 {} 84 }; 85 86 static void 87 gf100_ltc_lts_intr(struct nvkm_ltc_priv *ltc, int c, int s) 88 { 89 struct nvkm_device *device = ltc->base.subdev.device; 90 u32 base = 0x141000 + (c * 0x2000) + (s * 0x400); 91 u32 intr = nvkm_rd32(device, base + 0x020); 92 u32 stat = intr & 0x0000ffff; 93 94 if (stat) { 95 nv_info(ltc, "LTC%d_LTS%d:", c, s); 96 nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat); 97 pr_cont("\n"); 98 } 99 100 nvkm_wr32(device, base + 0x020, intr); 101 } 102 103 void 104 gf100_ltc_intr(struct nvkm_subdev *subdev) 105 { 106 struct nvkm_ltc_priv *ltc = (void *)subdev; 107 struct nvkm_device *device = ltc->base.subdev.device; 108 u32 mask; 109 110 mask = nvkm_rd32(device, 0x00017c); 111 while (mask) { 112 u32 s, c = __ffs(mask); 113 for (s = 0; s < ltc->lts_nr; s++) 114 gf100_ltc_lts_intr(ltc, c, s); 115 mask &= ~(1 << c); 116 } 117 } 118 119 static int 120 gf100_ltc_init(struct nvkm_object *object) 121 { 122 struct nvkm_ltc_priv *ltc = (void *)object; 123 struct nvkm_device *device = ltc->base.subdev.device; 124 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001); 125 int ret; 126 127 ret = nvkm_ltc_init(ltc); 128 if (ret) 129 return ret; 130 131 nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 132 nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr); 133 nvkm_wr32(device, 0x17e8d4, ltc->tag_base); 134 nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); 135 return 0; 136 } 137 138 void 139 gf100_ltc_dtor(struct nvkm_object *object) 140 { 141 struct nvkm_fb *fb = nvkm_fb(object); 142 struct nvkm_ltc_priv *ltc = (void *)object; 143 144 nvkm_mm_fini(<c->tags); 145 if (fb->ram) 146 nvkm_mm_free(&fb->vram, <c->tag_ram); 147 148 nvkm_ltc_destroy(ltc); 149 } 150 151 /* TODO: Figure out tag memory details and drop the over-cautious allocation. 152 */ 153 int 154 gf100_ltc_init_tag_ram(struct nvkm_fb *fb, struct nvkm_ltc_priv *ltc) 155 { 156 u32 tag_size, tag_margin, tag_align; 157 int ret; 158 159 /* No VRAM, no tags for now. */ 160 if (!fb->ram) { 161 ltc->num_tags = 0; 162 goto mm_init; 163 } 164 165 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 166 ltc->num_tags = (fb->ram->size >> 17) / 4; 167 if (ltc->num_tags > (1 << 17)) 168 ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */ 169 ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */ 170 171 tag_align = ltc->ltc_nr * 0x800; 172 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align; 173 174 /* 4 part 4 sub: 0x2000 bytes for 56 tags */ 175 /* 3 part 4 sub: 0x6000 bytes for 168 tags */ 176 /* 177 * About 147 bytes per tag. Let's be safe and allocate x2, which makes 178 * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags. 179 * 180 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %. 181 */ 182 tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin; 183 tag_size += tag_align; 184 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 185 186 ret = nvkm_mm_tail(&fb->vram, 1, 1, tag_size, tag_size, 1, 187 <c->tag_ram); 188 if (ret) { 189 ltc->num_tags = 0; 190 } else { 191 u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin; 192 193 tag_base += tag_align - 1; 194 do_div(tag_base, tag_align); 195 196 ltc->tag_base = tag_base; 197 } 198 199 mm_init: 200 ret = nvkm_mm_init(<c->tags, 0, ltc->num_tags, 1); 201 return ret; 202 } 203 204 int 205 gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 206 struct nvkm_oclass *oclass, void *data, u32 size, 207 struct nvkm_object **pobject) 208 { 209 struct nvkm_device *device = (void *)parent; 210 struct nvkm_fb *fb = device->fb; 211 struct nvkm_ltc_priv *ltc; 212 u32 parts, mask; 213 int ret, i; 214 215 ret = nvkm_ltc_create(parent, engine, oclass, <c); 216 *pobject = nv_object(ltc); 217 if (ret) 218 return ret; 219 220 parts = nvkm_rd32(device, 0x022438); 221 mask = nvkm_rd32(device, 0x022554); 222 for (i = 0; i < parts; i++) { 223 if (!(mask & (1 << i))) 224 ltc->ltc_nr++; 225 } 226 ltc->lts_nr = nvkm_rd32(device, 0x17e8dc) >> 28; 227 228 ret = gf100_ltc_init_tag_ram(fb, ltc); 229 if (ret) 230 return ret; 231 232 nv_subdev(ltc)->intr = gf100_ltc_intr; 233 return 0; 234 } 235 236 struct nvkm_oclass * 237 gf100_ltc_oclass = &(struct nvkm_ltc_impl) { 238 .base.handle = NV_SUBDEV(LTC, 0xc0), 239 .base.ofuncs = &(struct nvkm_ofuncs) { 240 .ctor = gf100_ltc_ctor, 241 .dtor = gf100_ltc_dtor, 242 .init = gf100_ltc_init, 243 .fini = _nvkm_ltc_fini, 244 }, 245 .intr = gf100_ltc_intr, 246 .cbc_clear = gf100_ltc_cbc_clear, 247 .cbc_wait = gf100_ltc_cbc_wait, 248 .zbc = 16, 249 .zbc_clear_color = gf100_ltc_zbc_clear_color, 250 .zbc_clear_depth = gf100_ltc_zbc_clear_depth, 251 }.base; 252