1 /* 2 * Copyright 2013 Ilia Mirkin 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <engine/xtensa.h> 23 24 #include <core/engctx.h> 25 26 int 27 _nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 28 struct nvkm_oclass *oclass, void *data, u32 size, 29 struct nvkm_object **pobject) 30 { 31 struct nvkm_engctx *engctx; 32 int ret; 33 34 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000, 35 NVOBJ_FLAG_ZERO_ALLOC, &engctx); 36 *pobject = nv_object(engctx); 37 return ret; 38 } 39 40 void 41 _nvkm_xtensa_intr(struct nvkm_subdev *subdev) 42 { 43 struct nvkm_xtensa *xtensa = (void *)subdev; 44 struct nvkm_device *device = xtensa->engine.subdev.device; 45 const u32 base = xtensa->addr; 46 u32 unk104 = nvkm_rd32(device, base + 0xd04); 47 u32 intr = nvkm_rd32(device, base + 0xc20); 48 u32 chan = nvkm_rd32(device, base + 0xc28); 49 u32 unk10c = nvkm_rd32(device, base + 0xd0c); 50 51 if (intr & 0x10) 52 nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n"); 53 nvkm_wr32(device, base + 0xc20, intr); 54 intr = nvkm_rd32(device, base + 0xc20); 55 if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) { 56 nvkm_debug(subdev, "Enabling FIFO_CTRL\n"); 57 nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->fifo_val); 58 } 59 } 60 61 int 62 nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine, 63 struct nvkm_oclass *oclass, u32 addr, bool enable, 64 const char *iname, const char *fname, 65 int length, void **pobject) 66 { 67 struct nvkm_xtensa *xtensa; 68 int ret; 69 70 ret = nvkm_engine_create_(parent, engine, oclass, enable, iname, 71 fname, length, pobject); 72 xtensa = *pobject; 73 if (ret) 74 return ret; 75 76 nv_subdev(xtensa)->intr = _nvkm_xtensa_intr; 77 xtensa->addr = addr; 78 return 0; 79 } 80 81 int 82 _nvkm_xtensa_init(struct nvkm_object *object) 83 { 84 struct nvkm_xtensa *xtensa = (void *)object; 85 struct nvkm_subdev *subdev = &xtensa->engine.subdev; 86 struct nvkm_device *device = subdev->device; 87 const u32 base = xtensa->addr; 88 const struct firmware *fw; 89 char name[32]; 90 int i, ret; 91 u64 addr, size; 92 u32 tmp; 93 94 ret = nvkm_engine_init_old(&xtensa->engine); 95 if (ret) 96 return ret; 97 98 if (!xtensa->gpu_fw) { 99 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x", 100 xtensa->addr >> 12); 101 102 ret = request_firmware(&fw, name, nv_device_base(device)); 103 if (ret) { 104 nvkm_warn(subdev, "unable to load firmware %s\n", name); 105 return ret; 106 } 107 108 if (fw->size > 0x40000) { 109 nvkm_warn(subdev, "firmware %s too large\n", name); 110 release_firmware(fw); 111 return -EINVAL; 112 } 113 114 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 115 0x40000, 0x1000, false, 116 &xtensa->gpu_fw); 117 if (ret) { 118 release_firmware(fw); 119 return ret; 120 } 121 122 nvkm_kmap(xtensa->gpu_fw); 123 for (i = 0; i < fw->size / 4; i++) 124 nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i)); 125 nvkm_done(xtensa->gpu_fw); 126 release_firmware(fw); 127 } 128 129 addr = nvkm_memory_addr(xtensa->gpu_fw); 130 size = nvkm_memory_size(xtensa->gpu_fw); 131 132 nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */ 133 nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */ 134 135 nvkm_wr32(device, base + 0xd28, xtensa->unkd28); /* ?? */ 136 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */ 137 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */ 138 139 nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */ 140 nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */ 141 nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */ 142 143 tmp = nvkm_rd32(device, 0x0); 144 nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */ 145 146 nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */ 147 148 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */ 149 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */ 150 return 0; 151 } 152 153 int 154 _nvkm_xtensa_fini(struct nvkm_object *object, bool suspend) 155 { 156 struct nvkm_xtensa *xtensa = (void *)object; 157 struct nvkm_device *device = xtensa->engine.subdev.device; 158 const u32 base = xtensa->addr; 159 160 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ 161 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ 162 163 if (!suspend) 164 nvkm_memory_del(&xtensa->gpu_fw); 165 166 return nvkm_engine_fini_old(&xtensa->engine, suspend); 167 } 168