1 /* 2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include "gf100.h" 23 #include "ctxgf100.h" 24 25 #include <subdev/timer.h> 26 27 #include <nvif/class.h> 28 29 static void 30 gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) 31 { 32 struct nvkm_device *device = gr->base.engine.subdev.device; 33 u32 val; 34 35 /* Bypass MMU check for non-secure boot */ 36 if (!device->secboot) { 37 nvkm_wr32(device, 0x100ce4, 0xffffffff); 38 39 if (nvkm_rd32(device, 0x100ce4) != 0xffffffff) 40 nvdev_warn(device, 41 "cannot bypass secure boot - expect failure soon!\n"); 42 } 43 44 val = nvkm_rd32(device, 0x100c80); 45 val &= 0xf000187f; 46 nvkm_wr32(device, 0x418880, val); 47 nvkm_wr32(device, 0x418890, 0); 48 nvkm_wr32(device, 0x418894, 0); 49 50 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4)); 51 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8)); 52 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc)); 53 54 nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800)); 55 } 56 57 static void 58 gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr) 59 { 60 struct nvkm_device *device = gr->base.engine.subdev.device; 61 nvkm_wr32(device, 0x419e44, 0xdffffe); 62 nvkm_wr32(device, 0x419e4c, 0x5); 63 } 64 65 static const struct gf100_gr_func 66 gm20b_gr = { 67 .oneinit_tiles = gm200_gr_oneinit_tiles, 68 .oneinit_sm_id = gm200_gr_oneinit_sm_id, 69 .init = gk20a_gr_init, 70 .init_zcull = gf117_gr_init_zcull, 71 .init_gpc_mmu = gm20b_gr_init_gpc_mmu, 72 .init_rop_active_fbps = gk104_gr_init_rop_active_fbps, 73 .trap_mp = gf100_gr_trap_mp, 74 .set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask, 75 .rops = gm200_gr_rops, 76 .ppc_nr = 1, 77 .grctx = &gm20b_grctx, 78 .zbc = &gf100_gr_zbc, 79 .sclass = { 80 { -1, -1, FERMI_TWOD_A }, 81 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 82 { -1, -1, MAXWELL_B, &gf100_fermi }, 83 { -1, -1, MAXWELL_COMPUTE_B }, 84 {} 85 } 86 }; 87 88 int 89 gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 90 { 91 return gm200_gr_new_(&gm20b_gr, device, index, pgr); 92 } 93