1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/device.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/types.h>
15 #include "vpu.h"
16 #include "vpu_core.h"
17 #include "vpu_imx8q.h"
18 #include "vpu_rpc.h"
19 
20 #define IMX8Q_CSR_CM0Px_ADDR_OFFSET			0x00000000
21 #define IMX8Q_CSR_CM0Px_CPUWAIT				0x00000004
22 
23 #ifdef CONFIG_IMX_SCU
24 #include <linux/firmware/imx/ipc.h>
25 #include <linux/firmware/imx/svc/misc.h>
26 
27 #define VPU_DISABLE_BITS			0x7
28 #define VPU_IMX_DECODER_FUSE_OFFSET		14
29 #define VPU_ENCODER_MASK			0x1
30 #define VPU_DECODER_MASK			0x3UL
31 #define VPU_DECODER_H264_MASK			0x2UL
32 #define VPU_DECODER_HEVC_MASK			0x1UL
33 
34 static u32 imx8q_fuse;
35 
36 struct vpu_sc_msg_misc {
37 	struct imx_sc_rpc_msg hdr;
38 	u32 word;
39 } __packed;
40 #endif
41 
vpu_imx8q_setup_dec(struct vpu_dev * vpu)42 int vpu_imx8q_setup_dec(struct vpu_dev *vpu)
43 {
44 	const off_t offset = DEC_MFD_XREG_SLV_BASE + MFD_BLK_CTRL;
45 
46 	vpu_writel(vpu, offset + MFD_BLK_CTRL_MFD_SYS_CLOCK_ENABLE_SET, 0x1f);
47 	vpu_writel(vpu, offset + MFD_BLK_CTRL_MFD_SYS_RESET_SET, 0xffffffff);
48 
49 	return 0;
50 }
51 
vpu_imx8q_setup_enc(struct vpu_dev * vpu)52 int vpu_imx8q_setup_enc(struct vpu_dev *vpu)
53 {
54 	return 0;
55 }
56 
vpu_imx8q_setup(struct vpu_dev * vpu)57 int vpu_imx8q_setup(struct vpu_dev *vpu)
58 {
59 	const off_t offset = SCB_XREG_SLV_BASE + SCB_SCB_BLK_CTRL;
60 
61 	vpu_readl(vpu, offset + 0x108);
62 
63 	vpu_writel(vpu, offset + SCB_BLK_CTRL_SCB_CLK_ENABLE_SET, 0x1);
64 	vpu_writel(vpu, offset + 0x190, 0xffffffff);
65 	vpu_writel(vpu, offset + SCB_BLK_CTRL_XMEM_RESET_SET, 0xffffffff);
66 	vpu_writel(vpu, offset + SCB_BLK_CTRL_SCB_CLK_ENABLE_SET, 0xE);
67 	vpu_writel(vpu, offset + SCB_BLK_CTRL_CACHE_RESET_SET, 0x7);
68 	vpu_writel(vpu, XMEM_CONTROL, 0x102);
69 
70 	vpu_readl(vpu, offset + 0x108);
71 
72 	return 0;
73 }
74 
vpu_imx8q_reset_enc(struct vpu_dev * vpu)75 static int vpu_imx8q_reset_enc(struct vpu_dev *vpu)
76 {
77 	return 0;
78 }
79 
vpu_imx8q_reset_dec(struct vpu_dev * vpu)80 static int vpu_imx8q_reset_dec(struct vpu_dev *vpu)
81 {
82 	const off_t offset = DEC_MFD_XREG_SLV_BASE + MFD_BLK_CTRL;
83 
84 	vpu_writel(vpu, offset + MFD_BLK_CTRL_MFD_SYS_RESET_CLR, 0xffffffff);
85 
86 	return 0;
87 }
88 
vpu_imx8q_reset(struct vpu_dev * vpu)89 int vpu_imx8q_reset(struct vpu_dev *vpu)
90 {
91 	const off_t offset = SCB_XREG_SLV_BASE + SCB_SCB_BLK_CTRL;
92 
93 	vpu_writel(vpu, offset + SCB_BLK_CTRL_CACHE_RESET_CLR, 0x7);
94 	vpu_imx8q_reset_enc(vpu);
95 	vpu_imx8q_reset_dec(vpu);
96 
97 	return 0;
98 }
99 
vpu_imx8q_set_system_cfg_common(struct vpu_rpc_system_config * config,u32 regs,u32 core_id)100 int vpu_imx8q_set_system_cfg_common(struct vpu_rpc_system_config *config, u32 regs, u32 core_id)
101 {
102 	if (!config)
103 		return -EINVAL;
104 
105 	switch (core_id) {
106 	case 0:
107 		config->malone_base_addr[0] = regs + DEC_MFD_XREG_SLV_BASE;
108 		config->num_malones = 1;
109 		config->num_windsors = 0;
110 		break;
111 	case 1:
112 		config->windsor_base_addr[0] = regs + ENC_MFD_XREG_SLV_0_BASE;
113 		config->num_windsors = 1;
114 		config->num_malones = 0;
115 		break;
116 	case 2:
117 		config->windsor_base_addr[0] = regs + ENC_MFD_XREG_SLV_1_BASE;
118 		config->num_windsors = 1;
119 		config->num_malones = 0;
120 		break;
121 	default:
122 		return -EINVAL;
123 	}
124 	if (config->num_windsors) {
125 		config->windsor_irq_pin[0x0][0x0] = WINDSOR_PAL_IRQ_PIN_L;
126 		config->windsor_irq_pin[0x0][0x1] = WINDSOR_PAL_IRQ_PIN_H;
127 	}
128 
129 	config->malone_base_addr[0x1] = 0x0;
130 	config->hif_offset[0x0] = MFD_HIF;
131 	config->hif_offset[0x1] = 0x0;
132 
133 	config->dpv_base_addr = 0x0;
134 	config->dpv_irq_pin = 0x0;
135 	config->pixif_base_addr = regs + DEC_MFD_XREG_SLV_BASE + MFD_PIX_IF;
136 	config->cache_base_addr[0] = regs + MC_CACHE_0_BASE;
137 	config->cache_base_addr[1] = regs + MC_CACHE_1_BASE;
138 
139 	return 0;
140 }
141 
vpu_imx8q_boot_core(struct vpu_core * core)142 int vpu_imx8q_boot_core(struct vpu_core *core)
143 {
144 	csr_writel(core, IMX8Q_CSR_CM0Px_ADDR_OFFSET, core->fw.phys);
145 	csr_writel(core, IMX8Q_CSR_CM0Px_CPUWAIT, 0);
146 	return 0;
147 }
148 
vpu_imx8q_get_power_state(struct vpu_core * core)149 int vpu_imx8q_get_power_state(struct vpu_core *core)
150 {
151 	if (csr_readl(core, IMX8Q_CSR_CM0Px_CPUWAIT) == 1)
152 		return 0;
153 	return 1;
154 }
155 
vpu_imx8q_on_firmware_loaded(struct vpu_core * core)156 int vpu_imx8q_on_firmware_loaded(struct vpu_core *core)
157 {
158 	u8 *p;
159 
160 	p = core->fw.virt;
161 	p[16] = core->vpu->res->plat_type;
162 	p[17] = core->id;
163 	p[18] = 1;
164 
165 	return 0;
166 }
167 
vpu_imx8q_check_memory_region(dma_addr_t base,dma_addr_t addr,u32 size)168 int vpu_imx8q_check_memory_region(dma_addr_t base, dma_addr_t addr, u32 size)
169 {
170 	const struct vpu_rpc_region_t imx8q_regions[] = {
171 		{0x00000000, 0x08000000, VPU_CORE_MEMORY_CACHED},
172 		{0x08000000, 0x10000000, VPU_CORE_MEMORY_UNCACHED},
173 		{0x10000000, 0x20000000, VPU_CORE_MEMORY_CACHED},
174 		{0x20000000, 0x40000000, VPU_CORE_MEMORY_UNCACHED}
175 	};
176 	int i;
177 
178 	if (addr < base)
179 		return VPU_CORE_MEMORY_INVALID;
180 
181 	addr -= base;
182 	for (i = 0; i < ARRAY_SIZE(imx8q_regions); i++) {
183 		const struct vpu_rpc_region_t *region = &imx8q_regions[i];
184 
185 		if (addr >= region->start && addr + size < region->end)
186 			return region->type;
187 	}
188 
189 	return VPU_CORE_MEMORY_INVALID;
190 }
191 
192 #ifdef CONFIG_IMX_SCU
vpu_imx8q_get_fuse(void)193 static u32 vpu_imx8q_get_fuse(void)
194 {
195 	static u32 fuse_got;
196 	struct imx_sc_ipc *ipc;
197 	struct vpu_sc_msg_misc msg;
198 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
199 	int ret;
200 
201 	if (fuse_got)
202 		return imx8q_fuse;
203 
204 	ret = imx_scu_get_handle(&ipc);
205 	if (ret) {
206 		pr_err("error: get sct handle fail: %d\n", ret);
207 		return 0;
208 	}
209 
210 	hdr->ver = IMX_SC_RPC_VERSION;
211 	hdr->svc = IMX_SC_RPC_SVC_MISC;
212 	hdr->func = IMX_SC_MISC_FUNC_OTP_FUSE_READ;
213 	hdr->size = 2;
214 
215 	msg.word = VPU_DISABLE_BITS;
216 
217 	ret = imx_scu_call_rpc(ipc, &msg, true);
218 	if (ret)
219 		return 0;
220 
221 	imx8q_fuse = msg.word;
222 	fuse_got = 1;
223 	return imx8q_fuse;
224 }
225 
vpu_imx8q_check_codec(enum vpu_core_type type)226 bool vpu_imx8q_check_codec(enum vpu_core_type type)
227 {
228 	u32 fuse = vpu_imx8q_get_fuse();
229 
230 	if (type == VPU_CORE_TYPE_ENC) {
231 		if (fuse & VPU_ENCODER_MASK)
232 			return false;
233 	} else if (type == VPU_CORE_TYPE_DEC) {
234 		fuse >>= VPU_IMX_DECODER_FUSE_OFFSET;
235 		fuse &= VPU_DECODER_MASK;
236 
237 		if (fuse == VPU_DECODER_MASK)
238 			return false;
239 	}
240 	return true;
241 }
242 
vpu_imx8q_check_fmt(enum vpu_core_type type,u32 pixelfmt)243 bool vpu_imx8q_check_fmt(enum vpu_core_type type, u32 pixelfmt)
244 {
245 	u32 fuse = vpu_imx8q_get_fuse();
246 
247 	if (type == VPU_CORE_TYPE_DEC) {
248 		fuse >>= VPU_IMX_DECODER_FUSE_OFFSET;
249 		fuse &= VPU_DECODER_MASK;
250 
251 		if (fuse == VPU_DECODER_HEVC_MASK && pixelfmt == V4L2_PIX_FMT_HEVC)
252 			return false;
253 		if (fuse == VPU_DECODER_H264_MASK && pixelfmt == V4L2_PIX_FMT_H264)
254 			return false;
255 		if (fuse == VPU_DECODER_MASK)
256 			return false;
257 	}
258 
259 	return true;
260 }
261 #else
vpu_imx8q_check_codec(enum vpu_core_type type)262 bool vpu_imx8q_check_codec(enum vpu_core_type type)
263 {
264 	return true;
265 }
266 
vpu_imx8q_check_fmt(enum vpu_core_type type,u32 pixelfmt)267 bool vpu_imx8q_check_fmt(enum vpu_core_type type, u32 pixelfmt)
268 {
269 	return true;
270 }
271 #endif
272