1 /* 2 * Copyright 2018 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "ctxgf100.h" 23 24 /******************************************************************************* 25 * PGRAPH context implementation 26 ******************************************************************************/ 27 28 const struct gf100_gr_init 29 gv100_grctx_init_sw_veid_bundle_init_0[] = { 30 { 0x00001000, 64, 0x00100000, 0x00000008 }, 31 { 0x00000941, 64, 0x00100000, 0x00000000 }, 32 { 0x0000097e, 64, 0x00100000, 0x00000000 }, 33 { 0x0000097f, 64, 0x00100000, 0x00000100 }, 34 { 0x0000035c, 64, 0x00100000, 0x00000000 }, 35 { 0x0000035d, 64, 0x00100000, 0x00000000 }, 36 { 0x00000a08, 64, 0x00100000, 0x00000000 }, 37 { 0x00000a09, 64, 0x00100000, 0x00000000 }, 38 { 0x00000a0a, 64, 0x00100000, 0x00000000 }, 39 { 0x00000352, 64, 0x00100000, 0x00000000 }, 40 { 0x00000353, 64, 0x00100000, 0x00000000 }, 41 { 0x00000358, 64, 0x00100000, 0x00000000 }, 42 { 0x00000359, 64, 0x00100000, 0x00000000 }, 43 { 0x00000370, 64, 0x00100000, 0x00000000 }, 44 { 0x00000371, 64, 0x00100000, 0x00000000 }, 45 { 0x00000372, 64, 0x00100000, 0x000fffff }, 46 { 0x00000366, 64, 0x00100000, 0x00000000 }, 47 { 0x00000367, 64, 0x00100000, 0x00000000 }, 48 { 0x00000368, 64, 0x00100000, 0x00000fff }, 49 { 0x00000623, 64, 0x00100000, 0x00000000 }, 50 { 0x00000624, 64, 0x00100000, 0x00000000 }, 51 { 0x0001e100, 1, 0x00000001, 0x02000001 }, 52 {} 53 }; 54 55 static const struct gf100_gr_pack 56 gv100_grctx_pack_sw_veid_bundle_init[] = { 57 { gv100_grctx_init_sw_veid_bundle_init_0 }, 58 {} 59 }; 60 61 void 62 gv100_grctx_generate_attrib(struct gf100_grctx *info) 63 { 64 struct gf100_gr *gr = info->gr; 65 const struct gf100_grctx_func *grctx = gr->func->grctx; 66 const u32 alpha = grctx->alpha_nr; 67 const u32 attrib = grctx->attrib_nr; 68 const u32 gfxp = grctx->gfxp_nr; 69 const int s = 12; 70 u32 size = grctx->alpha_nr_max * gr->tpc_total; 71 u32 ao = 0; 72 u32 bo = ao + size; 73 int gpc, ppc, b, n = 0; 74 75 for (gpc = 0; gpc < gr->gpc_nr; gpc++) 76 size += grctx->gfxp_nr * gr->ppc_nr[gpc] * gr->ppc_tpc_max; 77 size = ((size * 0x20) + 127) & ~127; 78 b = mmio_vram(info, size, (1 << s), false); 79 80 mmio_refn(info, 0x418810, 0x80000000, s, b); 81 mmio_refn(info, 0x419848, 0x10000000, s, b); 82 mmio_refn(info, 0x419c2c, 0x10000000, s, b); 83 mmio_refn(info, 0x419e00, 0x00000000, s, b); 84 mmio_wr32(info, 0x419e04, 0x80000000 | size >> 7); 85 mmio_wr32(info, 0x405830, attrib); 86 mmio_wr32(info, 0x40585c, alpha); 87 88 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 89 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { 90 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; 91 const u32 bs = attrib * gr->ppc_tpc_max; 92 const u32 gs = gfxp * gr->ppc_tpc_max; 93 const u32 u = 0x418ea0 + (n * 0x04); 94 const u32 o = PPC_UNIT(gpc, ppc, 0); 95 if (!(gr->ppc_mask[gpc] & (1 << ppc))) 96 continue; 97 mmio_wr32(info, o + 0xc0, gs); 98 mmio_wr32(info, o + 0xf4, bo); 99 mmio_wr32(info, o + 0xf0, bs); 100 bo += gs; 101 mmio_wr32(info, o + 0xe4, as); 102 mmio_wr32(info, o + 0xf8, ao); 103 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 104 mmio_wr32(info, u, bs); 105 } 106 } 107 108 mmio_wr32(info, 0x4181e4, 0x00000100); 109 mmio_wr32(info, 0x41befc, 0x00000100); 110 } 111 112 void 113 gv100_grctx_generate_rop_mapping(struct gf100_gr *gr) 114 { 115 struct nvkm_device *device = gr->base.engine.subdev.device; 116 u32 data; 117 int i, j; 118 119 /* Pack tile map into register format. */ 120 nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) | 121 gr->screen_tile_row_offset); 122 for (i = 0; i < 11; i++) { 123 for (data = 0, j = 0; j < 6; j++) 124 data |= (gr->tile[i * 6 + j] & 0x1f) << (j * 5); 125 nvkm_wr32(device, 0x418b08 + (i * 4), data); 126 nvkm_wr32(device, 0x41bf00 + (i * 4), data); 127 nvkm_wr32(device, 0x40780c + (i * 4), data); 128 } 129 130 /* GPC_BROADCAST.TP_BROADCAST */ 131 nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) | 132 gr->screen_tile_row_offset); 133 for (i = 0, j = 1; i < 5; i++, j += 4) { 134 u8 v19 = (1 << (j + 0)) % gr->tpc_total; 135 u8 v20 = (1 << (j + 1)) % gr->tpc_total; 136 u8 v21 = (1 << (j + 2)) % gr->tpc_total; 137 u8 v22 = (1 << (j + 3)) % gr->tpc_total; 138 nvkm_wr32(device, 0x41bfb0 + (i * 4), (v22 << 24) | 139 (v21 << 16) | 140 (v20 << 8) | 141 v19); 142 } 143 144 /* UNK78xx */ 145 nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) | 146 gr->screen_tile_row_offset); 147 } 148 149 void 150 gv100_grctx_generate_r400088(struct gf100_gr *gr, bool on) 151 { 152 struct nvkm_device *device = gr->base.engine.subdev.device; 153 nvkm_mask(device, 0x400088, 0x00060000, on ? 0x00060000 : 0x00000000); 154 } 155 156 static void 157 gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) 158 { 159 struct nvkm_device *device = gr->base.engine.subdev.device; 160 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm); 161 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm); 162 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); 163 } 164 165 void 166 gv100_grctx_generate_unkn(struct gf100_gr *gr) 167 { 168 struct nvkm_device *device = gr->base.engine.subdev.device; 169 nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010); 170 nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004); 171 nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000); 172 nvkm_mask(device, 0x405800, 0x08000000, 0x08000000); 173 nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008); 174 } 175 176 void 177 gv100_grctx_unkn88c(struct gf100_gr *gr, bool on) 178 { 179 struct nvkm_device *device = gr->base.engine.subdev.device; 180 const u32 mask = 0x00000010, data = on ? mask : 0x00000000; 181 nvkm_mask(device, 0x40988c, mask, data); 182 nvkm_rd32(device, 0x40988c); 183 nvkm_mask(device, 0x41a88c, mask, data); 184 nvkm_rd32(device, 0x41a88c); 185 nvkm_mask(device, 0x408a14, mask, data); 186 nvkm_rd32(device, 0x408a14); 187 } 188 189 const struct gf100_grctx_func 190 gv100_grctx = { 191 .unkn88c = gv100_grctx_unkn88c, 192 .main = gf100_grctx_generate_main, 193 .unkn = gv100_grctx_generate_unkn, 194 .sw_veid_bundle_init = gv100_grctx_pack_sw_veid_bundle_init, 195 .bundle = gm107_grctx_generate_bundle, 196 .bundle_size = 0x3000, 197 .bundle_min_gpm_fifo_depth = 0x180, 198 .bundle_token_limit = 0x1680, 199 .pagepool = gp100_grctx_generate_pagepool, 200 .pagepool_size = 0x20000, 201 .attrib = gv100_grctx_generate_attrib, 202 .attrib_nr_max = 0x6c0, 203 .attrib_nr = 0x480, 204 .alpha_nr_max = 0xc00, 205 .alpha_nr = 0x800, 206 .gfxp_nr = 0xd10, 207 .sm_id = gv100_grctx_generate_sm_id, 208 .rop_mapping = gv100_grctx_generate_rop_mapping, 209 .dist_skip_table = gm200_grctx_generate_dist_skip_table, 210 .r406500 = gm200_grctx_generate_r406500, 211 .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, 212 .smid_config = gp100_grctx_generate_smid_config, 213 .r400088 = gv100_grctx_generate_r400088, 214 }; 215