xref: /openbmc/linux/drivers/firmware/qcom_scm.c (revision a353e4a0)
1 /* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15  * 02110-1301, USA.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/qcom_scm.h>
25 
26 #include <asm/outercache.h>
27 #include <asm/cacheflush.h>
28 
29 
30 #define QCOM_SCM_ENOMEM		-5
31 #define QCOM_SCM_EOPNOTSUPP	-4
32 #define QCOM_SCM_EINVAL_ADDR	-3
33 #define QCOM_SCM_EINVAL_ARG	-2
34 #define QCOM_SCM_ERROR		-1
35 #define QCOM_SCM_INTERRUPTED	1
36 
37 #define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
38 #define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
39 #define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
40 #define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
41 
42 static DEFINE_MUTEX(qcom_scm_lock);
43 
44 /**
45  * struct qcom_scm_command - one SCM command buffer
46  * @len: total available memory for command and response
47  * @buf_offset: start of command buffer
48  * @resp_hdr_offset: start of response buffer
49  * @id: command to be executed
50  * @buf: buffer returned from qcom_scm_get_command_buffer()
51  *
52  * An SCM command is laid out in memory as follows:
53  *
54  *	------------------- <--- struct qcom_scm_command
55  *	| command header  |
56  *	------------------- <--- qcom_scm_get_command_buffer()
57  *	| command buffer  |
58  *	------------------- <--- struct qcom_scm_response and
59  *	| response header |      qcom_scm_command_to_response()
60  *	------------------- <--- qcom_scm_get_response_buffer()
61  *	| response buffer |
62  *	-------------------
63  *
64  * There can be arbitrary padding between the headers and buffers so
65  * you should always use the appropriate qcom_scm_get_*_buffer() routines
66  * to access the buffers in a safe manner.
67  */
68 struct qcom_scm_command {
69 	__le32 len;
70 	__le32 buf_offset;
71 	__le32 resp_hdr_offset;
72 	__le32 id;
73 	__le32 buf[0];
74 };
75 
76 /**
77  * struct qcom_scm_response - one SCM response buffer
78  * @len: total available memory for response
79  * @buf_offset: start of response data relative to start of qcom_scm_response
80  * @is_complete: indicates if the command has finished processing
81  */
82 struct qcom_scm_response {
83 	__le32 len;
84 	__le32 buf_offset;
85 	__le32 is_complete;
86 };
87 
88 /**
89  * alloc_qcom_scm_command() - Allocate an SCM command
90  * @cmd_size: size of the command buffer
91  * @resp_size: size of the response buffer
92  *
93  * Allocate an SCM command, including enough room for the command
94  * and response headers as well as the command and response buffers.
95  *
96  * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails.
97  */
98 static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size)
99 {
100 	struct qcom_scm_command *cmd;
101 	size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size +
102 		resp_size;
103 	u32 offset;
104 
105 	cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
106 	if (cmd) {
107 		cmd->len = cpu_to_le32(len);
108 		offset = offsetof(struct qcom_scm_command, buf);
109 		cmd->buf_offset = cpu_to_le32(offset);
110 		cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
111 	}
112 	return cmd;
113 }
114 
115 /**
116  * free_qcom_scm_command() - Free an SCM command
117  * @cmd: command to free
118  *
119  * Free an SCM command.
120  */
121 static inline void free_qcom_scm_command(struct qcom_scm_command *cmd)
122 {
123 	kfree(cmd);
124 }
125 
126 /**
127  * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
128  * @cmd: command
129  *
130  * Returns a pointer to a response for a command.
131  */
132 static inline struct qcom_scm_response *qcom_scm_command_to_response(
133 		const struct qcom_scm_command *cmd)
134 {
135 	return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
136 }
137 
138 /**
139  * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
140  * @cmd: command
141  *
142  * Returns a pointer to the command buffer of a command.
143  */
144 static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
145 {
146 	return (void *)cmd->buf;
147 }
148 
149 /**
150  * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
151  * @rsp: response
152  *
153  * Returns a pointer to a response buffer of a response.
154  */
155 static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
156 {
157 	return (void *)rsp + le32_to_cpu(rsp->buf_offset);
158 }
159 
160 static int qcom_scm_remap_error(int err)
161 {
162 	pr_err("qcom_scm_call failed with error code %d\n", err);
163 	switch (err) {
164 	case QCOM_SCM_ERROR:
165 		return -EIO;
166 	case QCOM_SCM_EINVAL_ADDR:
167 	case QCOM_SCM_EINVAL_ARG:
168 		return -EINVAL;
169 	case QCOM_SCM_EOPNOTSUPP:
170 		return -EOPNOTSUPP;
171 	case QCOM_SCM_ENOMEM:
172 		return -ENOMEM;
173 	}
174 	return -EINVAL;
175 }
176 
177 static u32 smc(u32 cmd_addr)
178 {
179 	int context_id;
180 	register u32 r0 asm("r0") = 1;
181 	register u32 r1 asm("r1") = (u32)&context_id;
182 	register u32 r2 asm("r2") = cmd_addr;
183 	do {
184 		asm volatile(
185 			__asmeq("%0", "r0")
186 			__asmeq("%1", "r0")
187 			__asmeq("%2", "r1")
188 			__asmeq("%3", "r2")
189 #ifdef REQUIRES_SEC
190 			".arch_extension sec\n"
191 #endif
192 			"smc	#0	@ switch to secure world\n"
193 			: "=r" (r0)
194 			: "r" (r0), "r" (r1), "r" (r2)
195 			: "r3");
196 	} while (r0 == QCOM_SCM_INTERRUPTED);
197 
198 	return r0;
199 }
200 
201 static int __qcom_scm_call(const struct qcom_scm_command *cmd)
202 {
203 	int ret;
204 	u32 cmd_addr = virt_to_phys(cmd);
205 
206 	/*
207 	 * Flush the command buffer so that the secure world sees
208 	 * the correct data.
209 	 */
210 	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
211 	outer_flush_range(cmd_addr, cmd_addr + cmd->len);
212 
213 	ret = smc(cmd_addr);
214 	if (ret < 0)
215 		ret = qcom_scm_remap_error(ret);
216 
217 	return ret;
218 }
219 
220 static void qcom_scm_inv_range(unsigned long start, unsigned long end)
221 {
222 	u32 cacheline_size, ctr;
223 
224 	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
225 	cacheline_size = 4 << ((ctr >> 16) & 0xf);
226 
227 	start = round_down(start, cacheline_size);
228 	end = round_up(end, cacheline_size);
229 	outer_inv_range(start, end);
230 	while (start < end) {
231 		asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
232 		     : "memory");
233 		start += cacheline_size;
234 	}
235 	dsb();
236 	isb();
237 }
238 
239 /**
240  * qcom_scm_call() - Send an SCM command
241  * @svc_id: service identifier
242  * @cmd_id: command identifier
243  * @cmd_buf: command buffer
244  * @cmd_len: length of the command buffer
245  * @resp_buf: response buffer
246  * @resp_len: length of the response buffer
247  *
248  * Sends a command to the SCM and waits for the command to finish processing.
249  *
250  * A note on cache maintenance:
251  * Note that any buffers that are expected to be accessed by the secure world
252  * must be flushed before invoking qcom_scm_call and invalidated in the cache
253  * immediately after qcom_scm_call returns. Cache maintenance on the command
254  * and response buffers is taken care of by qcom_scm_call; however, callers are
255  * responsible for any other cached buffers passed over to the secure world.
256  */
257 static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
258 			size_t cmd_len, void *resp_buf, size_t resp_len)
259 {
260 	int ret;
261 	struct qcom_scm_command *cmd;
262 	struct qcom_scm_response *rsp;
263 	unsigned long start, end;
264 
265 	cmd = alloc_qcom_scm_command(cmd_len, resp_len);
266 	if (!cmd)
267 		return -ENOMEM;
268 
269 	cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
270 	if (cmd_buf)
271 		memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
272 
273 	mutex_lock(&qcom_scm_lock);
274 	ret = __qcom_scm_call(cmd);
275 	mutex_unlock(&qcom_scm_lock);
276 	if (ret)
277 		goto out;
278 
279 	rsp = qcom_scm_command_to_response(cmd);
280 	start = (unsigned long)rsp;
281 
282 	do {
283 		qcom_scm_inv_range(start, start + sizeof(*rsp));
284 	} while (!rsp->is_complete);
285 
286 	end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len;
287 	qcom_scm_inv_range(start, end);
288 
289 	if (resp_buf)
290 		memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len);
291 out:
292 	free_qcom_scm_command(cmd);
293 	return ret;
294 }
295 
296 u32 qcom_scm_get_version(void)
297 {
298 	int context_id;
299 	static u32 version = -1;
300 	register u32 r0 asm("r0");
301 	register u32 r1 asm("r1");
302 
303 	if (version != -1)
304 		return version;
305 
306 	mutex_lock(&qcom_scm_lock);
307 
308 	r0 = 0x1 << 8;
309 	r1 = (u32)&context_id;
310 	do {
311 		asm volatile(
312 			__asmeq("%0", "r0")
313 			__asmeq("%1", "r1")
314 			__asmeq("%2", "r0")
315 			__asmeq("%3", "r1")
316 #ifdef REQUIRES_SEC
317 			".arch_extension sec\n"
318 #endif
319 			"smc	#0	@ switch to secure world\n"
320 			: "=r" (r0), "=r" (r1)
321 			: "r" (r0), "r" (r1)
322 			: "r2", "r3");
323 	} while (r0 == QCOM_SCM_INTERRUPTED);
324 
325 	version = r1;
326 	mutex_unlock(&qcom_scm_lock);
327 
328 	return version;
329 }
330 EXPORT_SYMBOL(qcom_scm_get_version);
331 
332 #define QCOM_SCM_SVC_BOOT			0x1
333 #define QCOM_SCM_BOOT_ADDR			0x1
334 /*
335  * Set the cold/warm boot address for one of the CPU cores.
336  */
337 static int qcom_scm_set_boot_addr(u32 addr, int flags)
338 {
339 	struct {
340 		__le32 flags;
341 		__le32 addr;
342 	} cmd;
343 
344 	cmd.addr = cpu_to_le32(addr);
345 	cmd.flags = cpu_to_le32(flags);
346 	return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
347 			&cmd, sizeof(cmd), NULL, 0);
348 }
349 
350 /**
351  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
352  * @entry: Entry point function for the cpus
353  * @cpus: The cpumask of cpus that will use the entry point
354  *
355  * Set the cold boot address of the cpus. Any cpu outside the supported
356  * range would be removed from the cpu present mask.
357  */
358 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
359 {
360 	int flags = 0;
361 	int cpu;
362 	int scm_cb_flags[] = {
363 		QCOM_SCM_FLAG_COLDBOOT_CPU0,
364 		QCOM_SCM_FLAG_COLDBOOT_CPU1,
365 		QCOM_SCM_FLAG_COLDBOOT_CPU2,
366 		QCOM_SCM_FLAG_COLDBOOT_CPU3,
367 	};
368 
369 	if (!cpus || (cpus && cpumask_empty(cpus)))
370 		return -EINVAL;
371 
372 	for_each_cpu(cpu, cpus) {
373 		if (cpu < ARRAY_SIZE(scm_cb_flags))
374 			flags |= scm_cb_flags[cpu];
375 		else
376 			set_cpu_present(cpu, false);
377 	}
378 
379 	return qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
380 }
381 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
382