1 /* 2 * Copyright 2013 Ilia Mirkin 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <engine/xtensa.h> 23 24 #include <core/engctx.h> 25 26 int 27 _nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 28 struct nvkm_oclass *oclass, void *data, u32 size, 29 struct nvkm_object **pobject) 30 { 31 struct nvkm_engctx *engctx; 32 int ret; 33 34 ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000, 35 NVOBJ_FLAG_ZERO_ALLOC, &engctx); 36 *pobject = nv_object(engctx); 37 return ret; 38 } 39 40 void 41 _nvkm_xtensa_intr(struct nvkm_subdev *subdev) 42 { 43 struct nvkm_xtensa *xtensa = (void *)subdev; 44 struct nvkm_device *device = xtensa->engine.subdev.device; 45 const u32 base = xtensa->addr; 46 u32 unk104 = nvkm_rd32(device, base + 0xd04); 47 u32 intr = nvkm_rd32(device, base + 0xc20); 48 u32 chan = nvkm_rd32(device, base + 0xc28); 49 u32 unk10c = nvkm_rd32(device, base + 0xd0c); 50 51 if (intr & 0x10) 52 nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n"); 53 nvkm_wr32(device, base + 0xc20, intr); 54 intr = nvkm_rd32(device, base + 0xc20); 55 if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) { 56 nvkm_debug(subdev, "Enabling FIFO_CTRL\n"); 57 nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->fifo_val); 58 } 59 } 60 61 int 62 nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine, 63 struct nvkm_oclass *oclass, u32 addr, bool enable, 64 const char *iname, const char *fname, 65 int length, void **pobject) 66 { 67 struct nvkm_xtensa *xtensa; 68 int ret; 69 70 ret = nvkm_engine_create_(parent, engine, oclass, enable, iname, 71 fname, length, pobject); 72 xtensa = *pobject; 73 if (ret) 74 return ret; 75 76 nv_subdev(xtensa)->intr = _nvkm_xtensa_intr; 77 xtensa->addr = addr; 78 return 0; 79 } 80 81 int 82 _nvkm_xtensa_init(struct nvkm_object *object) 83 { 84 struct nvkm_xtensa *xtensa = (void *)object; 85 struct nvkm_subdev *subdev = &xtensa->engine.subdev; 86 struct nvkm_device *device = subdev->device; 87 const u32 base = xtensa->addr; 88 const struct firmware *fw; 89 char name[32]; 90 int i, ret; 91 u32 tmp; 92 93 ret = nvkm_engine_init(&xtensa->engine); 94 if (ret) 95 return ret; 96 97 if (!xtensa->gpu_fw) { 98 snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x", 99 xtensa->addr >> 12); 100 101 ret = request_firmware(&fw, name, nv_device_base(device)); 102 if (ret) { 103 nvkm_warn(subdev, "unable to load firmware %s\n", name); 104 return ret; 105 } 106 107 if (fw->size > 0x40000) { 108 nvkm_warn(subdev, "firmware %s too large\n", name); 109 release_firmware(fw); 110 return -EINVAL; 111 } 112 113 ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, 114 &xtensa->gpu_fw); 115 if (ret) { 116 release_firmware(fw); 117 return ret; 118 } 119 120 nvkm_debug(subdev, "Loading firmware to address: %010llx\n", 121 xtensa->gpu_fw->addr); 122 123 nvkm_kmap(xtensa->gpu_fw); 124 for (i = 0; i < fw->size / 4; i++) 125 nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i)); 126 nvkm_done(xtensa->gpu_fw); 127 release_firmware(fw); 128 } 129 130 nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */ 131 nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */ 132 133 nvkm_wr32(device, base + 0xd28, xtensa->unkd28); /* ?? */ 134 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */ 135 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */ 136 137 nvkm_wr32(device, base + 0xcc0, xtensa->gpu_fw->addr >> 8); /* XT_REGION_BASE */ 138 nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */ 139 nvkm_wr32(device, base + 0xcc8, xtensa->gpu_fw->size >> 8); /* XT_REGION_LIMIT */ 140 141 tmp = nvkm_rd32(device, 0x0); 142 nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */ 143 144 nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */ 145 146 nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */ 147 nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */ 148 return 0; 149 } 150 151 int 152 _nvkm_xtensa_fini(struct nvkm_object *object, bool suspend) 153 { 154 struct nvkm_xtensa *xtensa = (void *)object; 155 struct nvkm_device *device = xtensa->engine.subdev.device; 156 const u32 base = xtensa->addr; 157 158 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ 159 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ 160 161 if (!suspend) 162 nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw); 163 164 return nvkm_engine_fini(&xtensa->engine, suspend); 165 } 166