1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <subdev/mc.h>
25 
26 void
27 nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
28 		      u32 size, u16 tag, u8 port, bool secure)
29 {
30 	if (secure && !falcon->secret) {
31 		nvkm_warn(falcon->user,
32 			  "writing with secure tag on a non-secure falcon!\n");
33 		return;
34 	}
35 
36 	falcon->func->load_imem(falcon, data, start, size, tag, port,
37 				secure);
38 }
39 
40 void
41 nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
42 		      u32 size, u8 port)
43 {
44 	falcon->func->load_dmem(falcon, data, start, size, port);
45 }
46 
47 void
48 nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
49 		      void *data)
50 {
51 	falcon->func->read_dmem(falcon, start, size, port, data);
52 }
53 
54 void
55 nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
56 {
57 	if (!falcon->func->bind_context) {
58 		nvkm_error(falcon->user,
59 			   "Context binding not supported on this falcon!\n");
60 		return;
61 	}
62 
63 	falcon->func->bind_context(falcon, inst);
64 }
65 
66 void
67 nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
68 {
69 	falcon->func->set_start_addr(falcon, start_addr);
70 }
71 
72 void
73 nvkm_falcon_start(struct nvkm_falcon *falcon)
74 {
75 	falcon->func->start(falcon);
76 }
77 
78 int
79 nvkm_falcon_enable(struct nvkm_falcon *falcon)
80 {
81 	struct nvkm_device *device = falcon->owner->device;
82 	enum nvkm_devidx id = falcon->owner->index;
83 	int ret;
84 
85 	nvkm_mc_enable(device, id);
86 	ret = falcon->func->enable(falcon);
87 	if (ret) {
88 		nvkm_mc_disable(device, id);
89 		return ret;
90 	}
91 
92 	return 0;
93 }
94 
95 void
96 nvkm_falcon_disable(struct nvkm_falcon *falcon)
97 {
98 	struct nvkm_device *device = falcon->owner->device;
99 	enum nvkm_devidx id = falcon->owner->index;
100 
101 	/* already disabled, return or wait_idle will timeout */
102 	if (!nvkm_mc_enabled(device, id))
103 		return;
104 
105 	falcon->func->disable(falcon);
106 
107 	nvkm_mc_disable(device, id);
108 }
109 
110 int
111 nvkm_falcon_reset(struct nvkm_falcon *falcon)
112 {
113 	nvkm_falcon_disable(falcon);
114 	return nvkm_falcon_enable(falcon);
115 }
116 
117 int
118 nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
119 {
120 	return falcon->func->wait_for_halt(falcon, ms);
121 }
122 
123 int
124 nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
125 {
126 	return falcon->func->clear_interrupt(falcon, mask);
127 }
128 
129 void
130 nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
131 {
132 	mutex_lock(&falcon->mutex);
133 	if (falcon->user == user) {
134 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
135 		falcon->user = NULL;
136 	}
137 	mutex_unlock(&falcon->mutex);
138 }
139 
140 int
141 nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
142 {
143 	mutex_lock(&falcon->mutex);
144 	if (falcon->user) {
145 		nvkm_error(user, "%s falcon already acquired by %s!\n",
146 			   falcon->name, nvkm_subdev_name[falcon->user->index]);
147 		mutex_unlock(&falcon->mutex);
148 		return -EBUSY;
149 	}
150 
151 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
152 	falcon->user = user;
153 	mutex_unlock(&falcon->mutex);
154 	return 0;
155 }
156 
157 void
158 nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
159 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
160 		 struct nvkm_falcon *falcon)
161 {
162 	u32 reg;
163 
164 	falcon->func = func;
165 	falcon->owner = subdev;
166 	falcon->name = name;
167 	falcon->addr = addr;
168 	mutex_init(&falcon->mutex);
169 
170 	reg = nvkm_falcon_rd32(falcon, 0x12c);
171 	falcon->version = reg & 0xf;
172 	falcon->secret = (reg >> 4) & 0x3;
173 	falcon->code.ports = (reg >> 8) & 0xf;
174 	falcon->data.ports = (reg >> 12) & 0xf;
175 
176 	reg = nvkm_falcon_rd32(falcon, 0x108);
177 	falcon->code.limit = (reg & 0x1ff) << 8;
178 	falcon->data.limit = (reg & 0x3fe00) >> 1;
179 
180 	reg = nvkm_falcon_rd32(falcon, 0xc08);
181 	falcon->debug = (reg >> 20) & 0x1;
182 }
183 
184 void
185 nvkm_falcon_del(struct nvkm_falcon **pfalcon)
186 {
187 	if (*pfalcon) {
188 		kfree(*pfalcon);
189 		*pfalcon = NULL;
190 	}
191 }
192