1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <core/memory.h> 27 #include <subdev/fb.h> 28 #include <subdev/timer.h> 29 30 void 31 gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit) 32 { 33 struct nvkm_device *device = ltc->subdev.device; 34 nvkm_wr32(device, 0x17e8cc, start); 35 nvkm_wr32(device, 0x17e8d0, limit); 36 nvkm_wr32(device, 0x17e8c8, 0x00000004); 37 } 38 39 void 40 gf100_ltc_cbc_wait(struct nvkm_ltc *ltc) 41 { 42 struct nvkm_device *device = ltc->subdev.device; 43 int c, s; 44 for (c = 0; c < ltc->ltc_nr; c++) { 45 for (s = 0; s < ltc->lts_nr; s++) { 46 const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400); 47 nvkm_msec(device, 2000, 48 if (!nvkm_rd32(device, addr)) 49 break; 50 ); 51 } 52 } 53 } 54 55 void 56 gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4]) 57 { 58 struct nvkm_device *device = ltc->subdev.device; 59 nvkm_mask(device, 0x17ea44, 0x0000000f, i); 60 nvkm_wr32(device, 0x17ea48, color[0]); 61 nvkm_wr32(device, 0x17ea4c, color[1]); 62 nvkm_wr32(device, 0x17ea50, color[2]); 63 nvkm_wr32(device, 0x17ea54, color[3]); 64 } 65 66 void 67 gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) 68 { 69 struct nvkm_device *device = ltc->subdev.device; 70 nvkm_mask(device, 0x17ea44, 0x0000000f, i); 71 nvkm_wr32(device, 0x17ea58, depth); 72 } 73 74 const struct nvkm_bitfield 75 gf100_ltc_lts_intr_name[] = { 76 { 0x00000001, "IDLE_ERROR_IQ" }, 77 { 0x00000002, "IDLE_ERROR_CBC" }, 78 { 0x00000004, "IDLE_ERROR_TSTG" }, 79 { 0x00000008, "IDLE_ERROR_DSTG" }, 80 { 0x00000010, "EVICTED_CB" }, 81 { 0x00000020, "ILLEGAL_COMPSTAT" }, 82 { 0x00000040, "BLOCKLINEAR_CB" }, 83 { 0x00000100, "ECC_SEC_ERROR" }, 84 { 0x00000200, "ECC_DED_ERROR" }, 85 { 0x00000400, "DEBUG" }, 86 { 0x00000800, "ATOMIC_TO_Z" }, 87 { 0x00001000, "ILLEGAL_ATOMIC" }, 88 { 0x00002000, "BLKACTIVITY_ERR" }, 89 {} 90 }; 91 92 static void 93 gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s) 94 { 95 struct nvkm_subdev *subdev = <c->subdev; 96 struct nvkm_device *device = subdev->device; 97 u32 base = 0x141000 + (c * 0x2000) + (s * 0x400); 98 u32 intr = nvkm_rd32(device, base + 0x020); 99 u32 stat = intr & 0x0000ffff; 100 char msg[128]; 101 102 if (stat) { 103 nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat); 104 nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg); 105 } 106 107 nvkm_wr32(device, base + 0x020, intr); 108 } 109 110 void 111 gf100_ltc_intr(struct nvkm_ltc *ltc) 112 { 113 struct nvkm_device *device = ltc->subdev.device; 114 u32 mask; 115 116 mask = nvkm_rd32(device, 0x00017c); 117 while (mask) { 118 u32 s, c = __ffs(mask); 119 for (s = 0; s < ltc->lts_nr; s++) 120 gf100_ltc_lts_intr(ltc, c, s); 121 mask &= ~(1 << c); 122 } 123 } 124 125 void 126 gf100_ltc_invalidate(struct nvkm_ltc *ltc) 127 { 128 struct nvkm_device *device = ltc->subdev.device; 129 s64 taken; 130 131 nvkm_wr32(device, 0x70004, 0x00000001); 132 taken = nvkm_wait_msec(device, 2000, 0x70004, 0x00000003, 0x00000000); 133 134 if (taken > 0) 135 nvkm_debug(<c->subdev, "LTC invalidate took %lld ns\n", taken); 136 } 137 138 void 139 gf100_ltc_flush(struct nvkm_ltc *ltc) 140 { 141 struct nvkm_device *device = ltc->subdev.device; 142 s64 taken; 143 144 nvkm_wr32(device, 0x70010, 0x00000001); 145 taken = nvkm_wait_msec(device, 2000, 0x70010, 0x00000003, 0x00000000); 146 147 if (taken > 0) 148 nvkm_debug(<c->subdev, "LTC flush took %lld ns\n", taken); 149 } 150 151 /* TODO: Figure out tag memory details and drop the over-cautious allocation. 152 */ 153 int 154 gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc) 155 { 156 struct nvkm_device *device = ltc->subdev.device; 157 struct nvkm_fb *fb = device->fb; 158 struct nvkm_ram *ram = fb->ram; 159 u32 bits = (nvkm_rd32(device, 0x100c80) & 0x00001000) ? 16 : 17; 160 u32 tag_size, tag_margin, tag_align; 161 int ret; 162 163 /* No VRAM, no tags for now. */ 164 if (!ram) { 165 ltc->num_tags = 0; 166 goto mm_init; 167 } 168 169 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 170 ltc->num_tags = (ram->size >> 17) / 4; 171 if (ltc->num_tags > (1 << bits)) 172 ltc->num_tags = 1 << bits; /* we have 16/17 bits in PTE */ 173 ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */ 174 175 tag_align = ltc->ltc_nr * 0x800; 176 tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align; 177 178 /* 4 part 4 sub: 0x2000 bytes for 56 tags */ 179 /* 3 part 4 sub: 0x6000 bytes for 168 tags */ 180 /* 181 * About 147 bytes per tag. Let's be safe and allocate x2, which makes 182 * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags. 183 * 184 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %. 185 */ 186 tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin; 187 tag_size += tag_align; 188 189 ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 12, tag_size, 190 true, true, <c->tag_ram); 191 if (ret) { 192 ltc->num_tags = 0; 193 } else { 194 u64 tag_base = nvkm_memory_addr(ltc->tag_ram) + tag_margin; 195 196 tag_base += tag_align - 1; 197 do_div(tag_base, tag_align); 198 199 ltc->tag_base = tag_base; 200 } 201 202 mm_init: 203 nvkm_mm_fini(&fb->tags.mm); 204 return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1); 205 } 206 207 int 208 gf100_ltc_oneinit(struct nvkm_ltc *ltc) 209 { 210 struct nvkm_device *device = ltc->subdev.device; 211 const u32 parts = nvkm_rd32(device, 0x022438); 212 const u32 mask = nvkm_rd32(device, 0x022554); 213 const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28; 214 int i; 215 216 for (i = 0; i < parts; i++) { 217 if (!(mask & (1 << i))) 218 ltc->ltc_nr++; 219 } 220 ltc->lts_nr = slice; 221 222 return gf100_ltc_oneinit_tag_ram(ltc); 223 } 224 225 static void 226 gf100_ltc_init(struct nvkm_ltc *ltc) 227 { 228 struct nvkm_device *device = ltc->subdev.device; 229 u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001); 230 231 nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 232 nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr); 233 nvkm_wr32(device, 0x17e8d4, ltc->tag_base); 234 nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); 235 } 236 237 static const struct nvkm_ltc_func 238 gf100_ltc = { 239 .oneinit = gf100_ltc_oneinit, 240 .init = gf100_ltc_init, 241 .intr = gf100_ltc_intr, 242 .cbc_clear = gf100_ltc_cbc_clear, 243 .cbc_wait = gf100_ltc_cbc_wait, 244 .zbc_color = 16, 245 .zbc_depth = 16, 246 .zbc_clear_color = gf100_ltc_zbc_clear_color, 247 .zbc_clear_depth = gf100_ltc_zbc_clear_depth, 248 .invalidate = gf100_ltc_invalidate, 249 .flush = gf100_ltc_flush, 250 }; 251 252 int 253 gf100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 254 struct nvkm_ltc **pltc) 255 { 256 return nvkm_ltc_new_(&gf100_ltc, device, type, inst, pltc); 257 } 258