xref: /openbmc/linux/drivers/media/platform/qcom/venus/hfi_venus.c (revision b97d6790d03b763eca08847a9a5869a4291b9f9a)
197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d96d3f30SStanimir Varbanov /*
3d96d3f30SStanimir Varbanov  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4d96d3f30SStanimir Varbanov  * Copyright (C) 2017 Linaro Ltd.
5d96d3f30SStanimir Varbanov  */
6d96d3f30SStanimir Varbanov 
7d96d3f30SStanimir Varbanov #include <linux/delay.h>
8d96d3f30SStanimir Varbanov #include <linux/device.h>
9d96d3f30SStanimir Varbanov #include <linux/dma-mapping.h>
10d96d3f30SStanimir Varbanov #include <linux/interrupt.h>
11d96d3f30SStanimir Varbanov #include <linux/iopoll.h>
12d96d3f30SStanimir Varbanov #include <linux/kernel.h>
13d96d3f30SStanimir Varbanov #include <linux/slab.h>
14d96d3f30SStanimir Varbanov 
15d96d3f30SStanimir Varbanov #include "core.h"
16d96d3f30SStanimir Varbanov #include "hfi_cmds.h"
17d96d3f30SStanimir Varbanov #include "hfi_msgs.h"
18d96d3f30SStanimir Varbanov #include "hfi_venus.h"
19d96d3f30SStanimir Varbanov #include "hfi_venus_io.h"
205df317c8SVikash Garodia #include "firmware.h"
21d96d3f30SStanimir Varbanov 
22d96d3f30SStanimir Varbanov #define HFI_MASK_QHDR_TX_TYPE		0xff000000
23d96d3f30SStanimir Varbanov #define HFI_MASK_QHDR_RX_TYPE		0x00ff0000
24d96d3f30SStanimir Varbanov #define HFI_MASK_QHDR_PRI_TYPE		0x0000ff00
25d96d3f30SStanimir Varbanov #define HFI_MASK_QHDR_ID_TYPE		0x000000ff
26d96d3f30SStanimir Varbanov 
27d96d3f30SStanimir Varbanov #define HFI_HOST_TO_CTRL_CMD_Q		0
28d96d3f30SStanimir Varbanov #define HFI_CTRL_TO_HOST_MSG_Q		1
29d96d3f30SStanimir Varbanov #define HFI_CTRL_TO_HOST_DBG_Q		2
30d96d3f30SStanimir Varbanov #define HFI_MASK_QHDR_STATUS		0x000000ff
31d96d3f30SStanimir Varbanov 
32d96d3f30SStanimir Varbanov #define IFACEQ_NUM			3
33d96d3f30SStanimir Varbanov #define IFACEQ_CMD_IDX			0
34d96d3f30SStanimir Varbanov #define IFACEQ_MSG_IDX			1
35d96d3f30SStanimir Varbanov #define IFACEQ_DBG_IDX			2
36d96d3f30SStanimir Varbanov #define IFACEQ_MAX_BUF_COUNT		50
37d96d3f30SStanimir Varbanov #define IFACEQ_MAX_PARALLEL_CLNTS	16
38d96d3f30SStanimir Varbanov #define IFACEQ_DFLT_QHDR		0x01010000
39d96d3f30SStanimir Varbanov 
40d96d3f30SStanimir Varbanov #define POLL_INTERVAL_US		50
41d96d3f30SStanimir Varbanov 
42d96d3f30SStanimir Varbanov #define IFACEQ_MAX_PKT_SIZE		1024
43d96d3f30SStanimir Varbanov #define IFACEQ_MED_PKT_SIZE		768
44d96d3f30SStanimir Varbanov #define IFACEQ_MIN_PKT_SIZE		8
45d96d3f30SStanimir Varbanov #define IFACEQ_VAR_SMALL_PKT_SIZE	100
46d96d3f30SStanimir Varbanov #define IFACEQ_VAR_LARGE_PKT_SIZE	512
47d96d3f30SStanimir Varbanov #define IFACEQ_VAR_HUGE_PKT_SIZE	(1024 * 12)
48d96d3f30SStanimir Varbanov 
49d96d3f30SStanimir Varbanov struct hfi_queue_table_header {
50d96d3f30SStanimir Varbanov 	u32 version;
51d96d3f30SStanimir Varbanov 	u32 size;
52d96d3f30SStanimir Varbanov 	u32 qhdr0_offset;
53d96d3f30SStanimir Varbanov 	u32 qhdr_size;
54d96d3f30SStanimir Varbanov 	u32 num_q;
55d96d3f30SStanimir Varbanov 	u32 num_active_q;
56d96d3f30SStanimir Varbanov };
57d96d3f30SStanimir Varbanov 
58d96d3f30SStanimir Varbanov struct hfi_queue_header {
59d96d3f30SStanimir Varbanov 	u32 status;
60d96d3f30SStanimir Varbanov 	u32 start_addr;
61d96d3f30SStanimir Varbanov 	u32 type;
62d96d3f30SStanimir Varbanov 	u32 q_size;
63d96d3f30SStanimir Varbanov 	u32 pkt_size;
64d96d3f30SStanimir Varbanov 	u32 pkt_drop_cnt;
65d96d3f30SStanimir Varbanov 	u32 rx_wm;
66d96d3f30SStanimir Varbanov 	u32 tx_wm;
67d96d3f30SStanimir Varbanov 	u32 rx_req;
68d96d3f30SStanimir Varbanov 	u32 tx_req;
69d96d3f30SStanimir Varbanov 	u32 rx_irq_status;
70d96d3f30SStanimir Varbanov 	u32 tx_irq_status;
71d96d3f30SStanimir Varbanov 	u32 read_idx;
72d96d3f30SStanimir Varbanov 	u32 write_idx;
73d96d3f30SStanimir Varbanov };
74d96d3f30SStanimir Varbanov 
75d96d3f30SStanimir Varbanov #define IFACEQ_TABLE_SIZE	\
76d96d3f30SStanimir Varbanov 	(sizeof(struct hfi_queue_table_header) +	\
77d96d3f30SStanimir Varbanov 	 sizeof(struct hfi_queue_header) * IFACEQ_NUM)
78d96d3f30SStanimir Varbanov 
79d96d3f30SStanimir Varbanov #define IFACEQ_QUEUE_SIZE	(IFACEQ_MAX_PKT_SIZE *	\
80d96d3f30SStanimir Varbanov 	IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
81d96d3f30SStanimir Varbanov 
82d96d3f30SStanimir Varbanov #define IFACEQ_GET_QHDR_START_ADDR(ptr, i)	\
83d96d3f30SStanimir Varbanov 	(void *)(((ptr) + sizeof(struct hfi_queue_table_header)) +	\
84d96d3f30SStanimir Varbanov 		((i) * sizeof(struct hfi_queue_header)))
85d96d3f30SStanimir Varbanov 
86d96d3f30SStanimir Varbanov #define QDSS_SIZE		SZ_4K
87d96d3f30SStanimir Varbanov #define SFR_SIZE		SZ_4K
88d96d3f30SStanimir Varbanov #define QUEUE_SIZE		\
89d96d3f30SStanimir Varbanov 	(IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
90d96d3f30SStanimir Varbanov 
91d96d3f30SStanimir Varbanov #define ALIGNED_QDSS_SIZE	ALIGN(QDSS_SIZE, SZ_4K)
92d96d3f30SStanimir Varbanov #define ALIGNED_SFR_SIZE	ALIGN(SFR_SIZE, SZ_4K)
93d96d3f30SStanimir Varbanov #define ALIGNED_QUEUE_SIZE	ALIGN(QUEUE_SIZE, SZ_4K)
94d96d3f30SStanimir Varbanov #define SHARED_QSIZE		ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
95d96d3f30SStanimir Varbanov 				      ALIGNED_QDSS_SIZE, SZ_1M)
96d96d3f30SStanimir Varbanov 
97d96d3f30SStanimir Varbanov struct mem_desc {
98d96d3f30SStanimir Varbanov 	dma_addr_t da;	/* device address */
99d96d3f30SStanimir Varbanov 	void *kva;	/* kernel virtual address */
100d96d3f30SStanimir Varbanov 	u32 size;
101d96d3f30SStanimir Varbanov 	unsigned long attrs;
102d96d3f30SStanimir Varbanov };
103d96d3f30SStanimir Varbanov 
104d96d3f30SStanimir Varbanov struct iface_queue {
105d96d3f30SStanimir Varbanov 	struct hfi_queue_header *qhdr;
106d96d3f30SStanimir Varbanov 	struct mem_desc qmem;
107d96d3f30SStanimir Varbanov };
108d96d3f30SStanimir Varbanov 
109d96d3f30SStanimir Varbanov enum venus_state {
110d96d3f30SStanimir Varbanov 	VENUS_STATE_DEINIT = 1,
111d96d3f30SStanimir Varbanov 	VENUS_STATE_INIT,
112d96d3f30SStanimir Varbanov };
113d96d3f30SStanimir Varbanov 
114d96d3f30SStanimir Varbanov struct venus_hfi_device {
115d96d3f30SStanimir Varbanov 	struct venus_core *core;
116d96d3f30SStanimir Varbanov 	u32 irq_status;
117d96d3f30SStanimir Varbanov 	u32 last_packet_type;
118d96d3f30SStanimir Varbanov 	bool power_enabled;
119d96d3f30SStanimir Varbanov 	bool suspended;
120d96d3f30SStanimir Varbanov 	enum venus_state state;
121d96d3f30SStanimir Varbanov 	/* serialize read / write to the shared memory */
122d96d3f30SStanimir Varbanov 	struct mutex lock;
123d96d3f30SStanimir Varbanov 	struct completion pwr_collapse_prep;
124d96d3f30SStanimir Varbanov 	struct completion release_resource;
125d96d3f30SStanimir Varbanov 	struct mem_desc ifaceq_table;
126d96d3f30SStanimir Varbanov 	struct mem_desc sfr;
127d96d3f30SStanimir Varbanov 	struct iface_queue queues[IFACEQ_NUM];
128d96d3f30SStanimir Varbanov 	u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
129d96d3f30SStanimir Varbanov 	u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
130d96d3f30SStanimir Varbanov };
131d96d3f30SStanimir Varbanov 
132d96d3f30SStanimir Varbanov static bool venus_pkt_debug;
133f08abe6aSStanimir Varbanov int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
134d96d3f30SStanimir Varbanov static bool venus_fw_low_power_mode = true;
135d96d3f30SStanimir Varbanov static int venus_hw_rsp_timeout = 1000;
136d96d3f30SStanimir Varbanov static bool venus_fw_coverage;
137d96d3f30SStanimir Varbanov 
venus_set_state(struct venus_hfi_device * hdev,enum venus_state state)138d96d3f30SStanimir Varbanov static void venus_set_state(struct venus_hfi_device *hdev,
139d96d3f30SStanimir Varbanov 			    enum venus_state state)
140d96d3f30SStanimir Varbanov {
141d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
142d96d3f30SStanimir Varbanov 	hdev->state = state;
143d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
144d96d3f30SStanimir Varbanov }
145d96d3f30SStanimir Varbanov 
venus_is_valid_state(struct venus_hfi_device * hdev)146d96d3f30SStanimir Varbanov static bool venus_is_valid_state(struct venus_hfi_device *hdev)
147d96d3f30SStanimir Varbanov {
148d96d3f30SStanimir Varbanov 	return hdev->state != VENUS_STATE_DEINIT;
149d96d3f30SStanimir Varbanov }
150d96d3f30SStanimir Varbanov 
venus_dump_packet(struct venus_hfi_device * hdev,const void * packet)151d96d3f30SStanimir Varbanov static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
152d96d3f30SStanimir Varbanov {
153d96d3f30SStanimir Varbanov 	size_t pkt_size = *(u32 *)packet;
154d96d3f30SStanimir Varbanov 
155d96d3f30SStanimir Varbanov 	if (!venus_pkt_debug)
156d96d3f30SStanimir Varbanov 		return;
157d96d3f30SStanimir Varbanov 
158d96d3f30SStanimir Varbanov 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
159d96d3f30SStanimir Varbanov 		       pkt_size, true);
160d96d3f30SStanimir Varbanov }
161d96d3f30SStanimir Varbanov 
venus_write_queue(struct venus_hfi_device * hdev,struct iface_queue * queue,void * packet,u32 * rx_req)162d96d3f30SStanimir Varbanov static int venus_write_queue(struct venus_hfi_device *hdev,
163d96d3f30SStanimir Varbanov 			     struct iface_queue *queue,
164d96d3f30SStanimir Varbanov 			     void *packet, u32 *rx_req)
165d96d3f30SStanimir Varbanov {
166d96d3f30SStanimir Varbanov 	struct hfi_queue_header *qhdr;
167d96d3f30SStanimir Varbanov 	u32 dwords, new_wr_idx;
168d96d3f30SStanimir Varbanov 	u32 empty_space, rd_idx, wr_idx, qsize;
169d96d3f30SStanimir Varbanov 	u32 *wr_ptr;
170d96d3f30SStanimir Varbanov 
171d96d3f30SStanimir Varbanov 	if (!queue->qmem.kva)
172d96d3f30SStanimir Varbanov 		return -EINVAL;
173d96d3f30SStanimir Varbanov 
174d96d3f30SStanimir Varbanov 	qhdr = queue->qhdr;
175d96d3f30SStanimir Varbanov 	if (!qhdr)
176d96d3f30SStanimir Varbanov 		return -EINVAL;
177d96d3f30SStanimir Varbanov 
178d96d3f30SStanimir Varbanov 	venus_dump_packet(hdev, packet);
179d96d3f30SStanimir Varbanov 
180d96d3f30SStanimir Varbanov 	dwords = (*(u32 *)packet) >> 2;
181d96d3f30SStanimir Varbanov 	if (!dwords)
182d96d3f30SStanimir Varbanov 		return -EINVAL;
183d96d3f30SStanimir Varbanov 
184d96d3f30SStanimir Varbanov 	rd_idx = qhdr->read_idx;
185d96d3f30SStanimir Varbanov 	wr_idx = qhdr->write_idx;
186d96d3f30SStanimir Varbanov 	qsize = qhdr->q_size;
187d96d3f30SStanimir Varbanov 	/* ensure rd/wr indices's are read from memory */
188d96d3f30SStanimir Varbanov 	rmb();
189d96d3f30SStanimir Varbanov 
190d96d3f30SStanimir Varbanov 	if (wr_idx >= rd_idx)
191d96d3f30SStanimir Varbanov 		empty_space = qsize - (wr_idx - rd_idx);
192d96d3f30SStanimir Varbanov 	else
193d96d3f30SStanimir Varbanov 		empty_space = rd_idx - wr_idx;
194d96d3f30SStanimir Varbanov 
195d96d3f30SStanimir Varbanov 	if (empty_space <= dwords) {
196d96d3f30SStanimir Varbanov 		qhdr->tx_req = 1;
197d96d3f30SStanimir Varbanov 		/* ensure tx_req is updated in memory */
198d96d3f30SStanimir Varbanov 		wmb();
199d96d3f30SStanimir Varbanov 		return -ENOSPC;
200d96d3f30SStanimir Varbanov 	}
201d96d3f30SStanimir Varbanov 
202d96d3f30SStanimir Varbanov 	qhdr->tx_req = 0;
203d96d3f30SStanimir Varbanov 	/* ensure tx_req is updated in memory */
204d96d3f30SStanimir Varbanov 	wmb();
205d96d3f30SStanimir Varbanov 
206d96d3f30SStanimir Varbanov 	new_wr_idx = wr_idx + dwords;
207d96d3f30SStanimir Varbanov 	wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
208*074aed64SVikash Garodia 
209*074aed64SVikash Garodia 	if (wr_ptr < (u32 *)queue->qmem.kva ||
210*074aed64SVikash Garodia 	    wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
211*074aed64SVikash Garodia 		return -EINVAL;
212*074aed64SVikash Garodia 
213d96d3f30SStanimir Varbanov 	if (new_wr_idx < qsize) {
214d96d3f30SStanimir Varbanov 		memcpy(wr_ptr, packet, dwords << 2);
215d96d3f30SStanimir Varbanov 	} else {
216d96d3f30SStanimir Varbanov 		size_t len;
217d96d3f30SStanimir Varbanov 
218d96d3f30SStanimir Varbanov 		new_wr_idx -= qsize;
219d96d3f30SStanimir Varbanov 		len = (dwords - new_wr_idx) << 2;
220d96d3f30SStanimir Varbanov 		memcpy(wr_ptr, packet, len);
221d96d3f30SStanimir Varbanov 		memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
222d96d3f30SStanimir Varbanov 	}
223d96d3f30SStanimir Varbanov 
224d96d3f30SStanimir Varbanov 	/* make sure packet is written before updating the write index */
225d96d3f30SStanimir Varbanov 	wmb();
226d96d3f30SStanimir Varbanov 
227d96d3f30SStanimir Varbanov 	qhdr->write_idx = new_wr_idx;
228d96d3f30SStanimir Varbanov 	*rx_req = qhdr->rx_req ? 1 : 0;
229d96d3f30SStanimir Varbanov 
230d96d3f30SStanimir Varbanov 	/* make sure write index is updated before an interrupt is raised */
231d96d3f30SStanimir Varbanov 	mb();
232d96d3f30SStanimir Varbanov 
233d96d3f30SStanimir Varbanov 	return 0;
234d96d3f30SStanimir Varbanov }
235d96d3f30SStanimir Varbanov 
venus_read_queue(struct venus_hfi_device * hdev,struct iface_queue * queue,void * pkt,u32 * tx_req)236d96d3f30SStanimir Varbanov static int venus_read_queue(struct venus_hfi_device *hdev,
237d96d3f30SStanimir Varbanov 			    struct iface_queue *queue, void *pkt, u32 *tx_req)
238d96d3f30SStanimir Varbanov {
239d96d3f30SStanimir Varbanov 	struct hfi_queue_header *qhdr;
240d96d3f30SStanimir Varbanov 	u32 dwords, new_rd_idx;
241d96d3f30SStanimir Varbanov 	u32 rd_idx, wr_idx, type, qsize;
242d96d3f30SStanimir Varbanov 	u32 *rd_ptr;
243d96d3f30SStanimir Varbanov 	u32 recv_request = 0;
244d96d3f30SStanimir Varbanov 	int ret = 0;
245d96d3f30SStanimir Varbanov 
246d96d3f30SStanimir Varbanov 	if (!queue->qmem.kva)
247d96d3f30SStanimir Varbanov 		return -EINVAL;
248d96d3f30SStanimir Varbanov 
249d96d3f30SStanimir Varbanov 	qhdr = queue->qhdr;
250d96d3f30SStanimir Varbanov 	if (!qhdr)
251d96d3f30SStanimir Varbanov 		return -EINVAL;
252d96d3f30SStanimir Varbanov 
253d96d3f30SStanimir Varbanov 	type = qhdr->type;
254d96d3f30SStanimir Varbanov 	rd_idx = qhdr->read_idx;
255d96d3f30SStanimir Varbanov 	wr_idx = qhdr->write_idx;
256d96d3f30SStanimir Varbanov 	qsize = qhdr->q_size;
257d96d3f30SStanimir Varbanov 
258d96d3f30SStanimir Varbanov 	/* make sure data is valid before using it */
259d96d3f30SStanimir Varbanov 	rmb();
260d96d3f30SStanimir Varbanov 
261d96d3f30SStanimir Varbanov 	/*
262d96d3f30SStanimir Varbanov 	 * Do not set receive request for debug queue, if set, Venus generates
263d96d3f30SStanimir Varbanov 	 * interrupt for debug messages even when there is no response message
264d96d3f30SStanimir Varbanov 	 * available. In general debug queue will not become full as it is being
265d96d3f30SStanimir Varbanov 	 * emptied out for every interrupt from Venus. Venus will anyway
266d96d3f30SStanimir Varbanov 	 * generates interrupt if it is full.
267d96d3f30SStanimir Varbanov 	 */
268d96d3f30SStanimir Varbanov 	if (type & HFI_CTRL_TO_HOST_MSG_Q)
269d96d3f30SStanimir Varbanov 		recv_request = 1;
270d96d3f30SStanimir Varbanov 
271d96d3f30SStanimir Varbanov 	if (rd_idx == wr_idx) {
272d96d3f30SStanimir Varbanov 		qhdr->rx_req = recv_request;
273d96d3f30SStanimir Varbanov 		*tx_req = 0;
274d96d3f30SStanimir Varbanov 		/* update rx_req field in memory */
275d96d3f30SStanimir Varbanov 		wmb();
276d96d3f30SStanimir Varbanov 		return -ENODATA;
277d96d3f30SStanimir Varbanov 	}
278d96d3f30SStanimir Varbanov 
279d96d3f30SStanimir Varbanov 	rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
280*074aed64SVikash Garodia 
281*074aed64SVikash Garodia 	if (rd_ptr < (u32 *)queue->qmem.kva ||
282*074aed64SVikash Garodia 	    rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
283*074aed64SVikash Garodia 		return -EINVAL;
284*074aed64SVikash Garodia 
285d96d3f30SStanimir Varbanov 	dwords = *rd_ptr >> 2;
286d96d3f30SStanimir Varbanov 	if (!dwords)
287d96d3f30SStanimir Varbanov 		return -EINVAL;
288d96d3f30SStanimir Varbanov 
289d96d3f30SStanimir Varbanov 	new_rd_idx = rd_idx + dwords;
290d96d3f30SStanimir Varbanov 	if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
291d96d3f30SStanimir Varbanov 		if (new_rd_idx < qsize) {
292d96d3f30SStanimir Varbanov 			memcpy(pkt, rd_ptr, dwords << 2);
293d96d3f30SStanimir Varbanov 		} else {
294d96d3f30SStanimir Varbanov 			size_t len;
295d96d3f30SStanimir Varbanov 
296d96d3f30SStanimir Varbanov 			new_rd_idx -= qsize;
297d96d3f30SStanimir Varbanov 			len = (dwords - new_rd_idx) << 2;
298d96d3f30SStanimir Varbanov 			memcpy(pkt, rd_ptr, len);
299d96d3f30SStanimir Varbanov 			memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
300d96d3f30SStanimir Varbanov 		}
301d96d3f30SStanimir Varbanov 	} else {
302d96d3f30SStanimir Varbanov 		/* bad packet received, dropping */
303d96d3f30SStanimir Varbanov 		new_rd_idx = qhdr->write_idx;
304d96d3f30SStanimir Varbanov 		ret = -EBADMSG;
305d96d3f30SStanimir Varbanov 	}
306d96d3f30SStanimir Varbanov 
307d96d3f30SStanimir Varbanov 	/* ensure the packet is read before updating read index */
308d96d3f30SStanimir Varbanov 	rmb();
309d96d3f30SStanimir Varbanov 
310d96d3f30SStanimir Varbanov 	qhdr->read_idx = new_rd_idx;
311d96d3f30SStanimir Varbanov 	/* ensure updating read index */
312d96d3f30SStanimir Varbanov 	wmb();
313d96d3f30SStanimir Varbanov 
314d96d3f30SStanimir Varbanov 	rd_idx = qhdr->read_idx;
315d96d3f30SStanimir Varbanov 	wr_idx = qhdr->write_idx;
316d96d3f30SStanimir Varbanov 	/* ensure rd/wr indices are read from memory */
317d96d3f30SStanimir Varbanov 	rmb();
318d96d3f30SStanimir Varbanov 
319d96d3f30SStanimir Varbanov 	if (rd_idx != wr_idx)
320d96d3f30SStanimir Varbanov 		qhdr->rx_req = 0;
321d96d3f30SStanimir Varbanov 	else
322d96d3f30SStanimir Varbanov 		qhdr->rx_req = recv_request;
323d96d3f30SStanimir Varbanov 
324d96d3f30SStanimir Varbanov 	*tx_req = qhdr->tx_req ? 1 : 0;
325d96d3f30SStanimir Varbanov 
326d96d3f30SStanimir Varbanov 	/* ensure rx_req is stored to memory and tx_req is loaded from memory */
327d96d3f30SStanimir Varbanov 	mb();
328d96d3f30SStanimir Varbanov 
329d96d3f30SStanimir Varbanov 	venus_dump_packet(hdev, pkt);
330d96d3f30SStanimir Varbanov 
331d96d3f30SStanimir Varbanov 	return ret;
332d96d3f30SStanimir Varbanov }
333d96d3f30SStanimir Varbanov 
venus_alloc(struct venus_hfi_device * hdev,struct mem_desc * desc,u32 size)334d96d3f30SStanimir Varbanov static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
335d96d3f30SStanimir Varbanov 		       u32 size)
336d96d3f30SStanimir Varbanov {
337d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
338d96d3f30SStanimir Varbanov 
339d96d3f30SStanimir Varbanov 	desc->attrs = DMA_ATTR_WRITE_COMBINE;
340d96d3f30SStanimir Varbanov 	desc->size = ALIGN(size, SZ_4K);
341d96d3f30SStanimir Varbanov 
342cd1a77e3SStanimir Varbanov 	desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
343d96d3f30SStanimir Varbanov 				    desc->attrs);
344d96d3f30SStanimir Varbanov 	if (!desc->kva)
345d96d3f30SStanimir Varbanov 		return -ENOMEM;
346d96d3f30SStanimir Varbanov 
347d96d3f30SStanimir Varbanov 	return 0;
348d96d3f30SStanimir Varbanov }
349d96d3f30SStanimir Varbanov 
venus_free(struct venus_hfi_device * hdev,struct mem_desc * mem)350d96d3f30SStanimir Varbanov static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
351d96d3f30SStanimir Varbanov {
352d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
353d96d3f30SStanimir Varbanov 
354d96d3f30SStanimir Varbanov 	dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
355d96d3f30SStanimir Varbanov }
356d96d3f30SStanimir Varbanov 
venus_set_registers(struct venus_hfi_device * hdev)357d96d3f30SStanimir Varbanov static void venus_set_registers(struct venus_hfi_device *hdev)
358d96d3f30SStanimir Varbanov {
359d96d3f30SStanimir Varbanov 	const struct venus_resources *res = hdev->core->res;
360d96d3f30SStanimir Varbanov 	const struct reg_val *tbl = res->reg_tbl;
361d96d3f30SStanimir Varbanov 	unsigned int count = res->reg_tbl_size;
362d96d3f30SStanimir Varbanov 	unsigned int i;
363d96d3f30SStanimir Varbanov 
364d96d3f30SStanimir Varbanov 	for (i = 0; i < count; i++)
365ff2a7013SBryan O'Donoghue 		writel(tbl[i].value, hdev->core->base + tbl[i].reg);
366d96d3f30SStanimir Varbanov }
367d96d3f30SStanimir Varbanov 
venus_soft_int(struct venus_hfi_device * hdev)368d96d3f30SStanimir Varbanov static void venus_soft_int(struct venus_hfi_device *hdev)
369d96d3f30SStanimir Varbanov {
370ff2a7013SBryan O'Donoghue 	void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
37124fcc052SDikshita Agarwal 	u32 clear_bit;
372ff2a7013SBryan O'Donoghue 
37324fcc052SDikshita Agarwal 	if (IS_V6(hdev->core))
37424fcc052SDikshita Agarwal 		clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
37524fcc052SDikshita Agarwal 	else
37624fcc052SDikshita Agarwal 		clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
37724fcc052SDikshita Agarwal 
37824fcc052SDikshita Agarwal 	writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
379d96d3f30SStanimir Varbanov }
380d96d3f30SStanimir Varbanov 
venus_iface_cmdq_write_nolock(struct venus_hfi_device * hdev,void * pkt,bool sync)381d96d3f30SStanimir Varbanov static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
3827f339fdcSVikash Garodia 					 void *pkt, bool sync)
383d96d3f30SStanimir Varbanov {
384d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
385d96d3f30SStanimir Varbanov 	struct hfi_pkt_hdr *cmd_packet;
386d96d3f30SStanimir Varbanov 	struct iface_queue *queue;
387d96d3f30SStanimir Varbanov 	u32 rx_req;
388d96d3f30SStanimir Varbanov 	int ret;
389d96d3f30SStanimir Varbanov 
390d96d3f30SStanimir Varbanov 	if (!venus_is_valid_state(hdev))
391d96d3f30SStanimir Varbanov 		return -EINVAL;
392d96d3f30SStanimir Varbanov 
393d96d3f30SStanimir Varbanov 	cmd_packet = (struct hfi_pkt_hdr *)pkt;
394d96d3f30SStanimir Varbanov 	hdev->last_packet_type = cmd_packet->pkt_type;
395d96d3f30SStanimir Varbanov 
396d96d3f30SStanimir Varbanov 	queue = &hdev->queues[IFACEQ_CMD_IDX];
397d96d3f30SStanimir Varbanov 
398d96d3f30SStanimir Varbanov 	ret = venus_write_queue(hdev, queue, pkt, &rx_req);
399d96d3f30SStanimir Varbanov 	if (ret) {
400d96d3f30SStanimir Varbanov 		dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
401d96d3f30SStanimir Varbanov 		return ret;
402d96d3f30SStanimir Varbanov 	}
403d96d3f30SStanimir Varbanov 
4047f339fdcSVikash Garodia 	if (sync) {
4057f339fdcSVikash Garodia 		/*
4067f339fdcSVikash Garodia 		 * Inform video hardware to raise interrupt for synchronous
4077f339fdcSVikash Garodia 		 * commands
4087f339fdcSVikash Garodia 		 */
4097f339fdcSVikash Garodia 		queue = &hdev->queues[IFACEQ_MSG_IDX];
4107f339fdcSVikash Garodia 		queue->qhdr->rx_req = 1;
4117f339fdcSVikash Garodia 		/* ensure rx_req is updated in memory */
4127f339fdcSVikash Garodia 		wmb();
4137f339fdcSVikash Garodia 	}
4147f339fdcSVikash Garodia 
415d96d3f30SStanimir Varbanov 	if (rx_req)
416d96d3f30SStanimir Varbanov 		venus_soft_int(hdev);
417d96d3f30SStanimir Varbanov 
418d96d3f30SStanimir Varbanov 	return 0;
419d96d3f30SStanimir Varbanov }
420d96d3f30SStanimir Varbanov 
venus_iface_cmdq_write(struct venus_hfi_device * hdev,void * pkt,bool sync)4217f339fdcSVikash Garodia static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
422d96d3f30SStanimir Varbanov {
423d96d3f30SStanimir Varbanov 	int ret;
424d96d3f30SStanimir Varbanov 
425d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
4267f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
427d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
428d96d3f30SStanimir Varbanov 
429d96d3f30SStanimir Varbanov 	return ret;
430d96d3f30SStanimir Varbanov }
431d96d3f30SStanimir Varbanov 
venus_hfi_core_set_resource(struct venus_core * core,u32 id,u32 size,u32 addr,void * cookie)432d96d3f30SStanimir Varbanov static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
433d96d3f30SStanimir Varbanov 				       u32 size, u32 addr, void *cookie)
434d96d3f30SStanimir Varbanov {
435d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
436d96d3f30SStanimir Varbanov 	struct hfi_sys_set_resource_pkt *pkt;
437d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
438d96d3f30SStanimir Varbanov 	int ret;
439d96d3f30SStanimir Varbanov 
440d96d3f30SStanimir Varbanov 	if (id == VIDC_RESOURCE_NONE)
441d96d3f30SStanimir Varbanov 		return 0;
442d96d3f30SStanimir Varbanov 
443d96d3f30SStanimir Varbanov 	pkt = (struct hfi_sys_set_resource_pkt *)packet;
444d96d3f30SStanimir Varbanov 
445d96d3f30SStanimir Varbanov 	ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
446d96d3f30SStanimir Varbanov 	if (ret)
447d96d3f30SStanimir Varbanov 		return ret;
448d96d3f30SStanimir Varbanov 
4497f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write(hdev, pkt, false);
450d96d3f30SStanimir Varbanov 	if (ret)
451d96d3f30SStanimir Varbanov 		return ret;
452d96d3f30SStanimir Varbanov 
453d96d3f30SStanimir Varbanov 	return 0;
454d96d3f30SStanimir Varbanov }
455d96d3f30SStanimir Varbanov 
venus_boot_core(struct venus_hfi_device * hdev)456d96d3f30SStanimir Varbanov static int venus_boot_core(struct venus_hfi_device *hdev)
457d96d3f30SStanimir Varbanov {
458d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
459d96d3f30SStanimir Varbanov 	static const unsigned int max_tries = 100;
460ff877873SKonrad Dybcio 	u32 ctrl_status = 0, mask_val = 0;
461d96d3f30SStanimir Varbanov 	unsigned int count = 0;
462ff2a7013SBryan O'Donoghue 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
463ff2a7013SBryan O'Donoghue 	void __iomem *wrapper_base = hdev->core->wrapper_base;
464d96d3f30SStanimir Varbanov 	int ret = 0;
465d96d3f30SStanimir Varbanov 
466ff877873SKonrad Dybcio 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
467255385caSDikshita Agarwal 		mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
468255385caSDikshita Agarwal 		mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
469255385caSDikshita Agarwal 			      WRAPPER_INTR_MASK_A2HCPU_MASK);
470255385caSDikshita Agarwal 	} else {
471255385caSDikshita Agarwal 		mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
472255385caSDikshita Agarwal 	}
473ff877873SKonrad Dybcio 
474255385caSDikshita Agarwal 	writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
475d5a8d2d3SKonrad Dybcio 	if (IS_V1(hdev->core))
476ff2a7013SBryan O'Donoghue 		writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
477d96d3f30SStanimir Varbanov 
478d74e4816SKonrad Dybcio 	writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
479d96d3f30SStanimir Varbanov 	while (!ctrl_status && count < max_tries) {
480ff2a7013SBryan O'Donoghue 		ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
481d96d3f30SStanimir Varbanov 		if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
482d96d3f30SStanimir Varbanov 			dev_err(dev, "invalid setting for UC_REGION\n");
483d96d3f30SStanimir Varbanov 			ret = -EINVAL;
484d96d3f30SStanimir Varbanov 			break;
485d96d3f30SStanimir Varbanov 		}
486d96d3f30SStanimir Varbanov 
487d96d3f30SStanimir Varbanov 		usleep_range(500, 1000);
488d96d3f30SStanimir Varbanov 		count++;
489d96d3f30SStanimir Varbanov 	}
490d96d3f30SStanimir Varbanov 
491d96d3f30SStanimir Varbanov 	if (count >= max_tries)
492d96d3f30SStanimir Varbanov 		ret = -ETIMEDOUT;
493d96d3f30SStanimir Varbanov 
494ff877873SKonrad Dybcio 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
49524fcc052SDikshita Agarwal 		writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
496255385caSDikshita Agarwal 		writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
49724fcc052SDikshita Agarwal 	}
498255385caSDikshita Agarwal 
499d96d3f30SStanimir Varbanov 	return ret;
500d96d3f30SStanimir Varbanov }
501d96d3f30SStanimir Varbanov 
venus_hwversion(struct venus_hfi_device * hdev)502d96d3f30SStanimir Varbanov static u32 venus_hwversion(struct venus_hfi_device *hdev)
503d96d3f30SStanimir Varbanov {
504d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
505ff2a7013SBryan O'Donoghue 	void __iomem *wrapper_base = hdev->core->wrapper_base;
506ff2a7013SBryan O'Donoghue 	u32 ver;
507d96d3f30SStanimir Varbanov 	u32 major, minor, step;
508d96d3f30SStanimir Varbanov 
509ff2a7013SBryan O'Donoghue 	ver = readl(wrapper_base + WRAPPER_HW_VERSION);
510d96d3f30SStanimir Varbanov 	major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
511d96d3f30SStanimir Varbanov 	major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
512d96d3f30SStanimir Varbanov 	minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
513d96d3f30SStanimir Varbanov 	minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
514d96d3f30SStanimir Varbanov 	step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
515d96d3f30SStanimir Varbanov 
5168c91dc08SStanimir Varbanov 	dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
517d96d3f30SStanimir Varbanov 
518d96d3f30SStanimir Varbanov 	return major;
519d96d3f30SStanimir Varbanov }
520d96d3f30SStanimir Varbanov 
venus_run(struct venus_hfi_device * hdev)521d96d3f30SStanimir Varbanov static int venus_run(struct venus_hfi_device *hdev)
522d96d3f30SStanimir Varbanov {
523d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
524ff2a7013SBryan O'Donoghue 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
525d96d3f30SStanimir Varbanov 	int ret;
526d96d3f30SStanimir Varbanov 
527d96d3f30SStanimir Varbanov 	/*
528d96d3f30SStanimir Varbanov 	 * Re-program all of the registers that get reset as a result of
529d96d3f30SStanimir Varbanov 	 * regulator_disable() and _enable()
530d96d3f30SStanimir Varbanov 	 */
531d96d3f30SStanimir Varbanov 	venus_set_registers(hdev);
532d96d3f30SStanimir Varbanov 
533ff2a7013SBryan O'Donoghue 	writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
534ff2a7013SBryan O'Donoghue 	writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
535ff2a7013SBryan O'Donoghue 	writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
536ff2a7013SBryan O'Donoghue 	writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
537d96d3f30SStanimir Varbanov 	if (hdev->sfr.da)
538ff2a7013SBryan O'Donoghue 		writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
539d96d3f30SStanimir Varbanov 
540d96d3f30SStanimir Varbanov 	ret = venus_boot_core(hdev);
541d96d3f30SStanimir Varbanov 	if (ret) {
542d96d3f30SStanimir Varbanov 		dev_err(dev, "failed to reset venus core\n");
543d96d3f30SStanimir Varbanov 		return ret;
544d96d3f30SStanimir Varbanov 	}
545d96d3f30SStanimir Varbanov 
546d96d3f30SStanimir Varbanov 	venus_hwversion(hdev);
547d96d3f30SStanimir Varbanov 
548d96d3f30SStanimir Varbanov 	return 0;
549d96d3f30SStanimir Varbanov }
550d96d3f30SStanimir Varbanov 
venus_halt_axi(struct venus_hfi_device * hdev)551d96d3f30SStanimir Varbanov static int venus_halt_axi(struct venus_hfi_device *hdev)
552d96d3f30SStanimir Varbanov {
553ff2a7013SBryan O'Donoghue 	void __iomem *wrapper_base = hdev->core->wrapper_base;
554ff2a7013SBryan O'Donoghue 	void __iomem *vbif_base = hdev->core->vbif_base;
5554b0b6e14SDikshita Agarwal 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
5564b0b6e14SDikshita Agarwal 	void __iomem *aon_base = hdev->core->aon_base;
557d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
558d96d3f30SStanimir Varbanov 	u32 val;
5594b0b6e14SDikshita Agarwal 	u32 mask_val;
560d96d3f30SStanimir Varbanov 	int ret;
561d96d3f30SStanimir Varbanov 
5625516263fSKonrad Dybcio 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
5634b0b6e14SDikshita Agarwal 		writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
5644b0b6e14SDikshita Agarwal 
5655516263fSKonrad Dybcio 		if (IS_IRIS2_1(hdev->core))
56678d434baSDikshita Agarwal 			goto skip_aon_mvp_noc;
56778d434baSDikshita Agarwal 
5684b0b6e14SDikshita Agarwal 		writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
5694b0b6e14SDikshita Agarwal 		ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
5704b0b6e14SDikshita Agarwal 					 val,
5714b0b6e14SDikshita Agarwal 					 val & BIT(0),
5724b0b6e14SDikshita Agarwal 					 POLL_INTERVAL_US,
5734b0b6e14SDikshita Agarwal 					 VBIF_AXI_HALT_ACK_TIMEOUT_US);
5744b0b6e14SDikshita Agarwal 		if (ret)
5754b0b6e14SDikshita Agarwal 			return -ETIMEDOUT;
5764b0b6e14SDikshita Agarwal 
57778d434baSDikshita Agarwal skip_aon_mvp_noc:
5784b0b6e14SDikshita Agarwal 		mask_val = (BIT(2) | BIT(1) | BIT(0));
5794b0b6e14SDikshita Agarwal 		writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
5804b0b6e14SDikshita Agarwal 
5814b0b6e14SDikshita Agarwal 		writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
5824b0b6e14SDikshita Agarwal 		ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
5834b0b6e14SDikshita Agarwal 					 val,
5844b0b6e14SDikshita Agarwal 					 val == 0,
5854b0b6e14SDikshita Agarwal 					 POLL_INTERVAL_US,
5864b0b6e14SDikshita Agarwal 					 VBIF_AXI_HALT_ACK_TIMEOUT_US);
5874b0b6e14SDikshita Agarwal 
5884b0b6e14SDikshita Agarwal 		if (ret) {
5894b0b6e14SDikshita Agarwal 			dev_err(dev, "DBLP Release: lpi_status %x\n", val);
5904b0b6e14SDikshita Agarwal 			return -ETIMEDOUT;
5914b0b6e14SDikshita Agarwal 		}
5924b0b6e14SDikshita Agarwal 		return 0;
5934b0b6e14SDikshita Agarwal 	}
5944b0b6e14SDikshita Agarwal 
595eb72356eSStanimir Varbanov 	if (IS_V4(hdev->core)) {
596ff2a7013SBryan O'Donoghue 		val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
597eb72356eSStanimir Varbanov 		val |= WRAPPER_CPU_AXI_HALT_HALT;
598ff2a7013SBryan O'Donoghue 		writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
599eb72356eSStanimir Varbanov 
600ff2a7013SBryan O'Donoghue 		ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
601eb72356eSStanimir Varbanov 					 val,
602eb72356eSStanimir Varbanov 					 val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
603eb72356eSStanimir Varbanov 					 POLL_INTERVAL_US,
604eb72356eSStanimir Varbanov 					 VBIF_AXI_HALT_ACK_TIMEOUT_US);
605eb72356eSStanimir Varbanov 		if (ret) {
606eb72356eSStanimir Varbanov 			dev_err(dev, "AXI bus port halt timeout\n");
607eb72356eSStanimir Varbanov 			return ret;
608eb72356eSStanimir Varbanov 		}
609eb72356eSStanimir Varbanov 
610eb72356eSStanimir Varbanov 		return 0;
611eb72356eSStanimir Varbanov 	}
612eb72356eSStanimir Varbanov 
613d96d3f30SStanimir Varbanov 	/* Halt AXI and AXI IMEM VBIF Access */
614ff2a7013SBryan O'Donoghue 	val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
615d96d3f30SStanimir Varbanov 	val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
616ff2a7013SBryan O'Donoghue 	writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
617d96d3f30SStanimir Varbanov 
618d96d3f30SStanimir Varbanov 	/* Request for AXI bus port halt */
619ff2a7013SBryan O'Donoghue 	ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
620d96d3f30SStanimir Varbanov 				 val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
621d96d3f30SStanimir Varbanov 				 POLL_INTERVAL_US,
622d96d3f30SStanimir Varbanov 				 VBIF_AXI_HALT_ACK_TIMEOUT_US);
623d96d3f30SStanimir Varbanov 	if (ret) {
624d96d3f30SStanimir Varbanov 		dev_err(dev, "AXI bus port halt timeout\n");
625d96d3f30SStanimir Varbanov 		return ret;
626d96d3f30SStanimir Varbanov 	}
627d96d3f30SStanimir Varbanov 
628d96d3f30SStanimir Varbanov 	return 0;
629d96d3f30SStanimir Varbanov }
630d96d3f30SStanimir Varbanov 
venus_power_off(struct venus_hfi_device * hdev)631d96d3f30SStanimir Varbanov static int venus_power_off(struct venus_hfi_device *hdev)
632d96d3f30SStanimir Varbanov {
633d96d3f30SStanimir Varbanov 	int ret;
634d96d3f30SStanimir Varbanov 
635d96d3f30SStanimir Varbanov 	if (!hdev->power_enabled)
636d96d3f30SStanimir Varbanov 		return 0;
637d96d3f30SStanimir Varbanov 
6385df317c8SVikash Garodia 	ret = venus_set_hw_state_suspend(hdev->core);
639d96d3f30SStanimir Varbanov 	if (ret)
640d96d3f30SStanimir Varbanov 		return ret;
641d96d3f30SStanimir Varbanov 
642d96d3f30SStanimir Varbanov 	ret = venus_halt_axi(hdev);
643d96d3f30SStanimir Varbanov 	if (ret)
644d96d3f30SStanimir Varbanov 		return ret;
645d96d3f30SStanimir Varbanov 
646d96d3f30SStanimir Varbanov 	hdev->power_enabled = false;
647d96d3f30SStanimir Varbanov 
648d96d3f30SStanimir Varbanov 	return 0;
649d96d3f30SStanimir Varbanov }
650d96d3f30SStanimir Varbanov 
venus_power_on(struct venus_hfi_device * hdev)651d96d3f30SStanimir Varbanov static int venus_power_on(struct venus_hfi_device *hdev)
652d96d3f30SStanimir Varbanov {
653d96d3f30SStanimir Varbanov 	int ret;
654d96d3f30SStanimir Varbanov 
655d96d3f30SStanimir Varbanov 	if (hdev->power_enabled)
656d96d3f30SStanimir Varbanov 		return 0;
657d96d3f30SStanimir Varbanov 
6585df317c8SVikash Garodia 	ret = venus_set_hw_state_resume(hdev->core);
659d96d3f30SStanimir Varbanov 	if (ret)
660d96d3f30SStanimir Varbanov 		goto err;
661d96d3f30SStanimir Varbanov 
662d96d3f30SStanimir Varbanov 	ret = venus_run(hdev);
663d96d3f30SStanimir Varbanov 	if (ret)
664d96d3f30SStanimir Varbanov 		goto err_suspend;
665d96d3f30SStanimir Varbanov 
666d96d3f30SStanimir Varbanov 	hdev->power_enabled = true;
667d96d3f30SStanimir Varbanov 
668d96d3f30SStanimir Varbanov 	return 0;
669d96d3f30SStanimir Varbanov 
670d96d3f30SStanimir Varbanov err_suspend:
6715df317c8SVikash Garodia 	venus_set_hw_state_suspend(hdev->core);
672d96d3f30SStanimir Varbanov err:
673d96d3f30SStanimir Varbanov 	hdev->power_enabled = false;
674d96d3f30SStanimir Varbanov 	return ret;
675d96d3f30SStanimir Varbanov }
676d96d3f30SStanimir Varbanov 
venus_iface_msgq_read_nolock(struct venus_hfi_device * hdev,void * pkt)677d96d3f30SStanimir Varbanov static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
678d96d3f30SStanimir Varbanov 					void *pkt)
679d96d3f30SStanimir Varbanov {
680d96d3f30SStanimir Varbanov 	struct iface_queue *queue;
681d96d3f30SStanimir Varbanov 	u32 tx_req;
682d96d3f30SStanimir Varbanov 	int ret;
683d96d3f30SStanimir Varbanov 
684d96d3f30SStanimir Varbanov 	if (!venus_is_valid_state(hdev))
685d96d3f30SStanimir Varbanov 		return -EINVAL;
686d96d3f30SStanimir Varbanov 
687d96d3f30SStanimir Varbanov 	queue = &hdev->queues[IFACEQ_MSG_IDX];
688d96d3f30SStanimir Varbanov 
689d96d3f30SStanimir Varbanov 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
690d96d3f30SStanimir Varbanov 	if (ret)
691d96d3f30SStanimir Varbanov 		return ret;
692d96d3f30SStanimir Varbanov 
693d96d3f30SStanimir Varbanov 	if (tx_req)
694d96d3f30SStanimir Varbanov 		venus_soft_int(hdev);
695d96d3f30SStanimir Varbanov 
696d96d3f30SStanimir Varbanov 	return 0;
697d96d3f30SStanimir Varbanov }
698d96d3f30SStanimir Varbanov 
venus_iface_msgq_read(struct venus_hfi_device * hdev,void * pkt)699d96d3f30SStanimir Varbanov static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
700d96d3f30SStanimir Varbanov {
701d96d3f30SStanimir Varbanov 	int ret;
702d96d3f30SStanimir Varbanov 
703d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
704d96d3f30SStanimir Varbanov 	ret = venus_iface_msgq_read_nolock(hdev, pkt);
705d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
706d96d3f30SStanimir Varbanov 
707d96d3f30SStanimir Varbanov 	return ret;
708d96d3f30SStanimir Varbanov }
709d96d3f30SStanimir Varbanov 
venus_iface_dbgq_read_nolock(struct venus_hfi_device * hdev,void * pkt)710d96d3f30SStanimir Varbanov static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
711d96d3f30SStanimir Varbanov 					void *pkt)
712d96d3f30SStanimir Varbanov {
713d96d3f30SStanimir Varbanov 	struct iface_queue *queue;
714d96d3f30SStanimir Varbanov 	u32 tx_req;
715d96d3f30SStanimir Varbanov 	int ret;
716d96d3f30SStanimir Varbanov 
717d96d3f30SStanimir Varbanov 	ret = venus_is_valid_state(hdev);
718d96d3f30SStanimir Varbanov 	if (!ret)
719d96d3f30SStanimir Varbanov 		return -EINVAL;
720d96d3f30SStanimir Varbanov 
721d96d3f30SStanimir Varbanov 	queue = &hdev->queues[IFACEQ_DBG_IDX];
722d96d3f30SStanimir Varbanov 
723d96d3f30SStanimir Varbanov 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
724d96d3f30SStanimir Varbanov 	if (ret)
725d96d3f30SStanimir Varbanov 		return ret;
726d96d3f30SStanimir Varbanov 
727d96d3f30SStanimir Varbanov 	if (tx_req)
728d96d3f30SStanimir Varbanov 		venus_soft_int(hdev);
729d96d3f30SStanimir Varbanov 
730d96d3f30SStanimir Varbanov 	return 0;
731d96d3f30SStanimir Varbanov }
732d96d3f30SStanimir Varbanov 
venus_iface_dbgq_read(struct venus_hfi_device * hdev,void * pkt)733d96d3f30SStanimir Varbanov static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
734d96d3f30SStanimir Varbanov {
735d96d3f30SStanimir Varbanov 	int ret;
736d96d3f30SStanimir Varbanov 
737d96d3f30SStanimir Varbanov 	if (!pkt)
738d96d3f30SStanimir Varbanov 		return -EINVAL;
739d96d3f30SStanimir Varbanov 
740d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
741d96d3f30SStanimir Varbanov 	ret = venus_iface_dbgq_read_nolock(hdev, pkt);
742d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
743d96d3f30SStanimir Varbanov 
744d96d3f30SStanimir Varbanov 	return ret;
745d96d3f30SStanimir Varbanov }
746d96d3f30SStanimir Varbanov 
venus_set_qhdr_defaults(struct hfi_queue_header * qhdr)747d96d3f30SStanimir Varbanov static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
748d96d3f30SStanimir Varbanov {
749d96d3f30SStanimir Varbanov 	qhdr->status = 1;
750d96d3f30SStanimir Varbanov 	qhdr->type = IFACEQ_DFLT_QHDR;
751d96d3f30SStanimir Varbanov 	qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
752d96d3f30SStanimir Varbanov 	qhdr->pkt_size = 0;
753d96d3f30SStanimir Varbanov 	qhdr->rx_wm = 1;
754d96d3f30SStanimir Varbanov 	qhdr->tx_wm = 1;
755d96d3f30SStanimir Varbanov 	qhdr->rx_req = 1;
756d96d3f30SStanimir Varbanov 	qhdr->tx_req = 0;
757d96d3f30SStanimir Varbanov 	qhdr->rx_irq_status = 0;
758d96d3f30SStanimir Varbanov 	qhdr->tx_irq_status = 0;
759d96d3f30SStanimir Varbanov 	qhdr->read_idx = 0;
760d96d3f30SStanimir Varbanov 	qhdr->write_idx = 0;
761d96d3f30SStanimir Varbanov }
762d96d3f30SStanimir Varbanov 
venus_interface_queues_release(struct venus_hfi_device * hdev)763d96d3f30SStanimir Varbanov static void venus_interface_queues_release(struct venus_hfi_device *hdev)
764d96d3f30SStanimir Varbanov {
765d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
766d96d3f30SStanimir Varbanov 
767d96d3f30SStanimir Varbanov 	venus_free(hdev, &hdev->ifaceq_table);
768d96d3f30SStanimir Varbanov 	venus_free(hdev, &hdev->sfr);
769d96d3f30SStanimir Varbanov 
770d96d3f30SStanimir Varbanov 	memset(hdev->queues, 0, sizeof(hdev->queues));
771d96d3f30SStanimir Varbanov 	memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
772d96d3f30SStanimir Varbanov 	memset(&hdev->sfr, 0, sizeof(hdev->sfr));
773d96d3f30SStanimir Varbanov 
774d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
775d96d3f30SStanimir Varbanov }
776d96d3f30SStanimir Varbanov 
venus_interface_queues_init(struct venus_hfi_device * hdev)777d96d3f30SStanimir Varbanov static int venus_interface_queues_init(struct venus_hfi_device *hdev)
778d96d3f30SStanimir Varbanov {
779d96d3f30SStanimir Varbanov 	struct hfi_queue_table_header *tbl_hdr;
780d96d3f30SStanimir Varbanov 	struct iface_queue *queue;
781d96d3f30SStanimir Varbanov 	struct hfi_sfr *sfr;
782d96d3f30SStanimir Varbanov 	struct mem_desc desc = {0};
783d96d3f30SStanimir Varbanov 	unsigned int offset;
784d96d3f30SStanimir Varbanov 	unsigned int i;
785d96d3f30SStanimir Varbanov 	int ret;
786d96d3f30SStanimir Varbanov 
787d96d3f30SStanimir Varbanov 	ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
788d96d3f30SStanimir Varbanov 	if (ret)
789d96d3f30SStanimir Varbanov 		return ret;
790d96d3f30SStanimir Varbanov 
791cd1a77e3SStanimir Varbanov 	hdev->ifaceq_table = desc;
792cd1a77e3SStanimir Varbanov 	offset = IFACEQ_TABLE_SIZE;
793d96d3f30SStanimir Varbanov 
794d96d3f30SStanimir Varbanov 	for (i = 0; i < IFACEQ_NUM; i++) {
795d96d3f30SStanimir Varbanov 		queue = &hdev->queues[i];
796d96d3f30SStanimir Varbanov 		queue->qmem.da = desc.da + offset;
797d96d3f30SStanimir Varbanov 		queue->qmem.kva = desc.kva + offset;
798d96d3f30SStanimir Varbanov 		queue->qmem.size = IFACEQ_QUEUE_SIZE;
799d96d3f30SStanimir Varbanov 		offset += queue->qmem.size;
800d96d3f30SStanimir Varbanov 		queue->qhdr =
801d96d3f30SStanimir Varbanov 			IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
802d96d3f30SStanimir Varbanov 
803d96d3f30SStanimir Varbanov 		venus_set_qhdr_defaults(queue->qhdr);
804d96d3f30SStanimir Varbanov 
805d96d3f30SStanimir Varbanov 		queue->qhdr->start_addr = queue->qmem.da;
806d96d3f30SStanimir Varbanov 
807d96d3f30SStanimir Varbanov 		if (i == IFACEQ_CMD_IDX)
808d96d3f30SStanimir Varbanov 			queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
809d96d3f30SStanimir Varbanov 		else if (i == IFACEQ_MSG_IDX)
810d96d3f30SStanimir Varbanov 			queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
811d96d3f30SStanimir Varbanov 		else if (i == IFACEQ_DBG_IDX)
812d96d3f30SStanimir Varbanov 			queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
813d96d3f30SStanimir Varbanov 	}
814d96d3f30SStanimir Varbanov 
815d96d3f30SStanimir Varbanov 	tbl_hdr = hdev->ifaceq_table.kva;
816d96d3f30SStanimir Varbanov 	tbl_hdr->version = 0;
817d96d3f30SStanimir Varbanov 	tbl_hdr->size = IFACEQ_TABLE_SIZE;
818d96d3f30SStanimir Varbanov 	tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
819d96d3f30SStanimir Varbanov 	tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
820d96d3f30SStanimir Varbanov 	tbl_hdr->num_q = IFACEQ_NUM;
821d96d3f30SStanimir Varbanov 	tbl_hdr->num_active_q = IFACEQ_NUM;
822d96d3f30SStanimir Varbanov 
823d96d3f30SStanimir Varbanov 	/*
824d96d3f30SStanimir Varbanov 	 * Set receive request to zero on debug queue as there is no
825d96d3f30SStanimir Varbanov 	 * need of interrupt from video hardware for debug messages
826d96d3f30SStanimir Varbanov 	 */
827d96d3f30SStanimir Varbanov 	queue = &hdev->queues[IFACEQ_DBG_IDX];
828d96d3f30SStanimir Varbanov 	queue->qhdr->rx_req = 0;
829d96d3f30SStanimir Varbanov 
830d96d3f30SStanimir Varbanov 	ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
831d96d3f30SStanimir Varbanov 	if (ret) {
832d96d3f30SStanimir Varbanov 		hdev->sfr.da = 0;
833d96d3f30SStanimir Varbanov 	} else {
834cd1a77e3SStanimir Varbanov 		hdev->sfr = desc;
835d96d3f30SStanimir Varbanov 		sfr = hdev->sfr.kva;
836d96d3f30SStanimir Varbanov 		sfr->buf_size = ALIGNED_SFR_SIZE;
837d96d3f30SStanimir Varbanov 	}
838d96d3f30SStanimir Varbanov 
839d96d3f30SStanimir Varbanov 	/* ensure table and queue header structs are settled in memory */
840d96d3f30SStanimir Varbanov 	wmb();
841d96d3f30SStanimir Varbanov 
842d96d3f30SStanimir Varbanov 	return 0;
843d96d3f30SStanimir Varbanov }
844d96d3f30SStanimir Varbanov 
venus_sys_set_debug(struct venus_hfi_device * hdev,u32 debug)845d96d3f30SStanimir Varbanov static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
846d96d3f30SStanimir Varbanov {
847d96d3f30SStanimir Varbanov 	struct hfi_sys_set_property_pkt *pkt;
848d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
849d96d3f30SStanimir Varbanov 
850d96d3f30SStanimir Varbanov 	pkt = (struct hfi_sys_set_property_pkt *)packet;
851d96d3f30SStanimir Varbanov 
852d96d3f30SStanimir Varbanov 	pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
853d96d3f30SStanimir Varbanov 
854751be5c6SLiu Shixin 	return venus_iface_cmdq_write(hdev, pkt, false);
855d96d3f30SStanimir Varbanov }
856d96d3f30SStanimir Varbanov 
venus_sys_set_coverage(struct venus_hfi_device * hdev,u32 mode)857d96d3f30SStanimir Varbanov static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
858d96d3f30SStanimir Varbanov {
859d96d3f30SStanimir Varbanov 	struct hfi_sys_set_property_pkt *pkt;
860d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
861d96d3f30SStanimir Varbanov 
862d96d3f30SStanimir Varbanov 	pkt = (struct hfi_sys_set_property_pkt *)packet;
863d96d3f30SStanimir Varbanov 
864d96d3f30SStanimir Varbanov 	pkt_sys_coverage_config(pkt, mode);
865d96d3f30SStanimir Varbanov 
866751be5c6SLiu Shixin 	return venus_iface_cmdq_write(hdev, pkt, false);
867d96d3f30SStanimir Varbanov }
868d96d3f30SStanimir Varbanov 
venus_sys_set_idle_message(struct venus_hfi_device * hdev,bool enable)869d96d3f30SStanimir Varbanov static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
870d96d3f30SStanimir Varbanov 				      bool enable)
871d96d3f30SStanimir Varbanov {
872d96d3f30SStanimir Varbanov 	struct hfi_sys_set_property_pkt *pkt;
873d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
874d96d3f30SStanimir Varbanov 
875d96d3f30SStanimir Varbanov 	if (!enable)
876d96d3f30SStanimir Varbanov 		return 0;
877d96d3f30SStanimir Varbanov 
878d96d3f30SStanimir Varbanov 	pkt = (struct hfi_sys_set_property_pkt *)packet;
879d96d3f30SStanimir Varbanov 
880d96d3f30SStanimir Varbanov 	pkt_sys_idle_indicator(pkt, enable);
881d96d3f30SStanimir Varbanov 
882751be5c6SLiu Shixin 	return venus_iface_cmdq_write(hdev, pkt, false);
883d96d3f30SStanimir Varbanov }
884d96d3f30SStanimir Varbanov 
venus_sys_set_power_control(struct venus_hfi_device * hdev,bool enable)885d96d3f30SStanimir Varbanov static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
886d96d3f30SStanimir Varbanov 				       bool enable)
887d96d3f30SStanimir Varbanov {
888d96d3f30SStanimir Varbanov 	struct hfi_sys_set_property_pkt *pkt;
889d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
890d96d3f30SStanimir Varbanov 
891d96d3f30SStanimir Varbanov 	pkt = (struct hfi_sys_set_property_pkt *)packet;
892d96d3f30SStanimir Varbanov 
893d96d3f30SStanimir Varbanov 	pkt_sys_power_control(pkt, enable);
894d96d3f30SStanimir Varbanov 
895751be5c6SLiu Shixin 	return venus_iface_cmdq_write(hdev, pkt, false);
896d96d3f30SStanimir Varbanov }
897d96d3f30SStanimir Varbanov 
venus_sys_set_ubwc_config(struct venus_hfi_device * hdev)898b228cf38SVikash Garodia static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
899b228cf38SVikash Garodia {
900b228cf38SVikash Garodia 	struct hfi_sys_set_property_pkt *pkt;
901b228cf38SVikash Garodia 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
902b228cf38SVikash Garodia 	const struct venus_resources *res = hdev->core->res;
903b228cf38SVikash Garodia 	int ret;
904b228cf38SVikash Garodia 
905b228cf38SVikash Garodia 	pkt = (struct hfi_sys_set_property_pkt *)packet;
906b228cf38SVikash Garodia 
907b228cf38SVikash Garodia 	pkt_sys_ubwc_config(pkt, res->ubwc_conf);
908b228cf38SVikash Garodia 
909b228cf38SVikash Garodia 	ret = venus_iface_cmdq_write(hdev, pkt, false);
910b228cf38SVikash Garodia 	if (ret)
911b228cf38SVikash Garodia 		return ret;
912b228cf38SVikash Garodia 
913b228cf38SVikash Garodia 	return 0;
914b228cf38SVikash Garodia }
915b228cf38SVikash Garodia 
venus_get_queue_size(struct venus_hfi_device * hdev,unsigned int index)916d96d3f30SStanimir Varbanov static int venus_get_queue_size(struct venus_hfi_device *hdev,
917d96d3f30SStanimir Varbanov 				unsigned int index)
918d96d3f30SStanimir Varbanov {
919d96d3f30SStanimir Varbanov 	struct hfi_queue_header *qhdr;
920d96d3f30SStanimir Varbanov 
921d96d3f30SStanimir Varbanov 	if (index >= IFACEQ_NUM)
922d96d3f30SStanimir Varbanov 		return -EINVAL;
923d96d3f30SStanimir Varbanov 
924d96d3f30SStanimir Varbanov 	qhdr = hdev->queues[index].qhdr;
925d96d3f30SStanimir Varbanov 	if (!qhdr)
926d96d3f30SStanimir Varbanov 		return -EINVAL;
927d96d3f30SStanimir Varbanov 
928d96d3f30SStanimir Varbanov 	return abs(qhdr->read_idx - qhdr->write_idx);
929d96d3f30SStanimir Varbanov }
930d96d3f30SStanimir Varbanov 
venus_sys_set_default_properties(struct venus_hfi_device * hdev)931d96d3f30SStanimir Varbanov static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
932d96d3f30SStanimir Varbanov {
933d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
934b228cf38SVikash Garodia 	const struct venus_resources *res = hdev->core->res;
935d96d3f30SStanimir Varbanov 	int ret;
936d96d3f30SStanimir Varbanov 
937d96d3f30SStanimir Varbanov 	ret = venus_sys_set_debug(hdev, venus_fw_debug);
938d96d3f30SStanimir Varbanov 	if (ret)
939d96d3f30SStanimir Varbanov 		dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
940d96d3f30SStanimir Varbanov 
9416283e483SKonrad Dybcio 	/* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
9426283e483SKonrad Dybcio 	if (IS_V1(hdev->core)) {
9436283e483SKonrad Dybcio 		ret = venus_sys_set_idle_message(hdev, false);
944d96d3f30SStanimir Varbanov 		if (ret)
945d96d3f30SStanimir Varbanov 			dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
9466283e483SKonrad Dybcio 	}
947d96d3f30SStanimir Varbanov 
948d96d3f30SStanimir Varbanov 	ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
949d96d3f30SStanimir Varbanov 	if (ret)
950d96d3f30SStanimir Varbanov 		dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
951d96d3f30SStanimir Varbanov 			 ret);
952d96d3f30SStanimir Varbanov 
953b228cf38SVikash Garodia 	/* For specific venus core, it is mandatory to set the UBWC configuration */
954b228cf38SVikash Garodia 	if (res->ubwc_conf) {
955b228cf38SVikash Garodia 		ret = venus_sys_set_ubwc_config(hdev);
956b228cf38SVikash Garodia 		if (ret)
957b228cf38SVikash Garodia 			dev_warn(dev, "setting ubwc config failed (%d)\n", ret);
958b228cf38SVikash Garodia 	}
959b228cf38SVikash Garodia 
960d96d3f30SStanimir Varbanov 	return ret;
961d96d3f30SStanimir Varbanov }
962d96d3f30SStanimir Varbanov 
venus_session_cmd(struct venus_inst * inst,u32 pkt_type,bool sync)9637f339fdcSVikash Garodia static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
964d96d3f30SStanimir Varbanov {
965d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
966d96d3f30SStanimir Varbanov 	struct hfi_session_pkt pkt;
967d96d3f30SStanimir Varbanov 
968d96d3f30SStanimir Varbanov 	pkt_session_cmd(&pkt, pkt_type, inst);
969d96d3f30SStanimir Varbanov 
9707f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, &pkt, sync);
971d96d3f30SStanimir Varbanov }
972d96d3f30SStanimir Varbanov 
venus_flush_debug_queue(struct venus_hfi_device * hdev)973d96d3f30SStanimir Varbanov static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
974d96d3f30SStanimir Varbanov {
975d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
976d96d3f30SStanimir Varbanov 	void *packet = hdev->dbg_buf;
977d96d3f30SStanimir Varbanov 
978d96d3f30SStanimir Varbanov 	while (!venus_iface_dbgq_read(hdev, packet)) {
979d96d3f30SStanimir Varbanov 		struct hfi_msg_sys_coverage_pkt *pkt = packet;
980d96d3f30SStanimir Varbanov 
981d96d3f30SStanimir Varbanov 		if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
982d96d3f30SStanimir Varbanov 			struct hfi_msg_sys_debug_pkt *pkt = packet;
983d96d3f30SStanimir Varbanov 
9848c91dc08SStanimir Varbanov 			dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
985d96d3f30SStanimir Varbanov 		}
986d96d3f30SStanimir Varbanov 	}
987d96d3f30SStanimir Varbanov }
988d96d3f30SStanimir Varbanov 
venus_prepare_power_collapse(struct venus_hfi_device * hdev,bool wait)989d96d3f30SStanimir Varbanov static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
990d96d3f30SStanimir Varbanov 					bool wait)
991d96d3f30SStanimir Varbanov {
992d96d3f30SStanimir Varbanov 	unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
993d96d3f30SStanimir Varbanov 	struct hfi_sys_pc_prep_pkt pkt;
994d96d3f30SStanimir Varbanov 	int ret;
995d96d3f30SStanimir Varbanov 
996d96d3f30SStanimir Varbanov 	init_completion(&hdev->pwr_collapse_prep);
997d96d3f30SStanimir Varbanov 
998d96d3f30SStanimir Varbanov 	pkt_sys_pc_prep(&pkt);
999d96d3f30SStanimir Varbanov 
10007f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write(hdev, &pkt, false);
1001d96d3f30SStanimir Varbanov 	if (ret)
1002d96d3f30SStanimir Varbanov 		return ret;
1003d96d3f30SStanimir Varbanov 
1004d96d3f30SStanimir Varbanov 	if (!wait)
1005d96d3f30SStanimir Varbanov 		return 0;
1006d96d3f30SStanimir Varbanov 
1007d96d3f30SStanimir Varbanov 	ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
1008d96d3f30SStanimir Varbanov 	if (!ret) {
1009d96d3f30SStanimir Varbanov 		venus_flush_debug_queue(hdev);
1010d96d3f30SStanimir Varbanov 		return -ETIMEDOUT;
1011d96d3f30SStanimir Varbanov 	}
1012d96d3f30SStanimir Varbanov 
1013d96d3f30SStanimir Varbanov 	return 0;
1014d96d3f30SStanimir Varbanov }
1015d96d3f30SStanimir Varbanov 
venus_are_queues_empty(struct venus_hfi_device * hdev)1016d96d3f30SStanimir Varbanov static int venus_are_queues_empty(struct venus_hfi_device *hdev)
1017d96d3f30SStanimir Varbanov {
1018d96d3f30SStanimir Varbanov 	int ret1, ret2;
1019d96d3f30SStanimir Varbanov 
1020d96d3f30SStanimir Varbanov 	ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
1021d96d3f30SStanimir Varbanov 	if (ret1 < 0)
1022d96d3f30SStanimir Varbanov 		return ret1;
1023d96d3f30SStanimir Varbanov 
1024d96d3f30SStanimir Varbanov 	ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
1025d96d3f30SStanimir Varbanov 	if (ret2 < 0)
1026d96d3f30SStanimir Varbanov 		return ret2;
1027d96d3f30SStanimir Varbanov 
1028d96d3f30SStanimir Varbanov 	if (!ret1 && !ret2)
1029d96d3f30SStanimir Varbanov 		return 1;
1030d96d3f30SStanimir Varbanov 
1031d96d3f30SStanimir Varbanov 	return 0;
1032d96d3f30SStanimir Varbanov }
1033d96d3f30SStanimir Varbanov 
venus_sfr_print(struct venus_hfi_device * hdev)1034d96d3f30SStanimir Varbanov static void venus_sfr_print(struct venus_hfi_device *hdev)
1035d96d3f30SStanimir Varbanov {
1036d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
1037d96d3f30SStanimir Varbanov 	struct hfi_sfr *sfr = hdev->sfr.kva;
1038d96d3f30SStanimir Varbanov 	void *p;
1039d96d3f30SStanimir Varbanov 
1040d96d3f30SStanimir Varbanov 	if (!sfr)
1041d96d3f30SStanimir Varbanov 		return;
1042d96d3f30SStanimir Varbanov 
1043d96d3f30SStanimir Varbanov 	p = memchr(sfr->data, '\0', sfr->buf_size);
1044d96d3f30SStanimir Varbanov 	/*
1045d96d3f30SStanimir Varbanov 	 * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
1046d96d3f30SStanimir Varbanov 	 * that Venus is in the process of crashing.
1047d96d3f30SStanimir Varbanov 	 */
1048d96d3f30SStanimir Varbanov 	if (!p)
1049d96d3f30SStanimir Varbanov 		sfr->data[sfr->buf_size - 1] = '\0';
1050d96d3f30SStanimir Varbanov 
1051d96d3f30SStanimir Varbanov 	dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
1052d96d3f30SStanimir Varbanov }
1053d96d3f30SStanimir Varbanov 
venus_process_msg_sys_error(struct venus_hfi_device * hdev,void * packet)1054d96d3f30SStanimir Varbanov static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
1055d96d3f30SStanimir Varbanov 					void *packet)
1056d96d3f30SStanimir Varbanov {
1057d96d3f30SStanimir Varbanov 	struct hfi_msg_event_notify_pkt *event_pkt = packet;
1058d96d3f30SStanimir Varbanov 
1059d96d3f30SStanimir Varbanov 	if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1060d96d3f30SStanimir Varbanov 		return;
1061d96d3f30SStanimir Varbanov 
1062d96d3f30SStanimir Varbanov 	venus_set_state(hdev, VENUS_STATE_DEINIT);
1063d96d3f30SStanimir Varbanov 
1064d96d3f30SStanimir Varbanov 	venus_sfr_print(hdev);
1065d96d3f30SStanimir Varbanov }
1066d96d3f30SStanimir Varbanov 
venus_isr_thread(struct venus_core * core)1067d96d3f30SStanimir Varbanov static irqreturn_t venus_isr_thread(struct venus_core *core)
1068d96d3f30SStanimir Varbanov {
1069d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
10704cb3548aSStanimir Varbanov 	const struct venus_resources *res;
1071d96d3f30SStanimir Varbanov 	void *pkt;
1072d96d3f30SStanimir Varbanov 	u32 msg_ret;
1073d96d3f30SStanimir Varbanov 
1074d96d3f30SStanimir Varbanov 	if (!hdev)
1075d96d3f30SStanimir Varbanov 		return IRQ_NONE;
1076d96d3f30SStanimir Varbanov 
10774cb3548aSStanimir Varbanov 	res = hdev->core->res;
1078d96d3f30SStanimir Varbanov 	pkt = hdev->pkt_buf;
1079d96d3f30SStanimir Varbanov 
1080d96d3f30SStanimir Varbanov 
1081d96d3f30SStanimir Varbanov 	while (!venus_iface_msgq_read(hdev, pkt)) {
1082d96d3f30SStanimir Varbanov 		msg_ret = hfi_process_msg_packet(core, pkt);
1083d96d3f30SStanimir Varbanov 		switch (msg_ret) {
1084d96d3f30SStanimir Varbanov 		case HFI_MSG_EVENT_NOTIFY:
1085d96d3f30SStanimir Varbanov 			venus_process_msg_sys_error(hdev, pkt);
1086d96d3f30SStanimir Varbanov 			break;
1087d96d3f30SStanimir Varbanov 		case HFI_MSG_SYS_INIT:
1088d96d3f30SStanimir Varbanov 			venus_hfi_core_set_resource(core, res->vmem_id,
1089d96d3f30SStanimir Varbanov 						    res->vmem_size,
1090d96d3f30SStanimir Varbanov 						    res->vmem_addr,
1091d96d3f30SStanimir Varbanov 						    hdev);
1092d96d3f30SStanimir Varbanov 			break;
1093d96d3f30SStanimir Varbanov 		case HFI_MSG_SYS_RELEASE_RESOURCE:
1094d96d3f30SStanimir Varbanov 			complete(&hdev->release_resource);
1095d96d3f30SStanimir Varbanov 			break;
1096d96d3f30SStanimir Varbanov 		case HFI_MSG_SYS_PC_PREP:
1097d96d3f30SStanimir Varbanov 			complete(&hdev->pwr_collapse_prep);
1098d96d3f30SStanimir Varbanov 			break;
1099d96d3f30SStanimir Varbanov 		default:
1100d96d3f30SStanimir Varbanov 			break;
1101d96d3f30SStanimir Varbanov 		}
1102d96d3f30SStanimir Varbanov 	}
1103d96d3f30SStanimir Varbanov 
1104d96d3f30SStanimir Varbanov 	venus_flush_debug_queue(hdev);
1105d96d3f30SStanimir Varbanov 
1106d96d3f30SStanimir Varbanov 	return IRQ_HANDLED;
1107d96d3f30SStanimir Varbanov }
1108d96d3f30SStanimir Varbanov 
venus_isr(struct venus_core * core)1109d96d3f30SStanimir Varbanov static irqreturn_t venus_isr(struct venus_core *core)
1110d96d3f30SStanimir Varbanov {
1111d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1112d96d3f30SStanimir Varbanov 	u32 status;
1113686ee9b6SMauro Carvalho Chehab 	void __iomem *cpu_cs_base;
1114686ee9b6SMauro Carvalho Chehab 	void __iomem *wrapper_base;
1115d96d3f30SStanimir Varbanov 
1116d96d3f30SStanimir Varbanov 	if (!hdev)
1117d96d3f30SStanimir Varbanov 		return IRQ_NONE;
1118d96d3f30SStanimir Varbanov 
1119686ee9b6SMauro Carvalho Chehab 	cpu_cs_base = hdev->core->cpu_cs_base;
1120686ee9b6SMauro Carvalho Chehab 	wrapper_base = hdev->core->wrapper_base;
1121686ee9b6SMauro Carvalho Chehab 
1122ff2a7013SBryan O'Donoghue 	status = readl(wrapper_base + WRAPPER_INTR_STATUS);
112303811969SKonrad Dybcio 	if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
112424fcc052SDikshita Agarwal 		if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
112524fcc052SDikshita Agarwal 		    status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
112624fcc052SDikshita Agarwal 		    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
112724fcc052SDikshita Agarwal 			hdev->irq_status = status;
112824fcc052SDikshita Agarwal 	} else {
1129d96d3f30SStanimir Varbanov 		if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1130d96d3f30SStanimir Varbanov 		    status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1131d96d3f30SStanimir Varbanov 		    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1132d96d3f30SStanimir Varbanov 			hdev->irq_status = status;
113324fcc052SDikshita Agarwal 	}
1134ff2a7013SBryan O'Donoghue 	writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
113503811969SKonrad Dybcio 	if (!(IS_IRIS2(core) || IS_IRIS2_1(core)))
1136ff2a7013SBryan O'Donoghue 		writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1137d96d3f30SStanimir Varbanov 
1138d96d3f30SStanimir Varbanov 	return IRQ_WAKE_THREAD;
1139d96d3f30SStanimir Varbanov }
1140d96d3f30SStanimir Varbanov 
venus_core_init(struct venus_core * core)1141d96d3f30SStanimir Varbanov static int venus_core_init(struct venus_core *core)
1142d96d3f30SStanimir Varbanov {
1143d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1144d96d3f30SStanimir Varbanov 	struct device *dev = core->dev;
1145d96d3f30SStanimir Varbanov 	struct hfi_sys_get_property_pkt version_pkt;
1146d96d3f30SStanimir Varbanov 	struct hfi_sys_init_pkt pkt;
1147d96d3f30SStanimir Varbanov 	int ret;
1148d96d3f30SStanimir Varbanov 
1149d96d3f30SStanimir Varbanov 	pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1150d96d3f30SStanimir Varbanov 
1151d96d3f30SStanimir Varbanov 	venus_set_state(hdev, VENUS_STATE_INIT);
1152d96d3f30SStanimir Varbanov 
11537f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write(hdev, &pkt, false);
1154d96d3f30SStanimir Varbanov 	if (ret)
1155d96d3f30SStanimir Varbanov 		return ret;
1156d96d3f30SStanimir Varbanov 
1157d96d3f30SStanimir Varbanov 	pkt_sys_image_version(&version_pkt);
1158d96d3f30SStanimir Varbanov 
11597f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
1160d96d3f30SStanimir Varbanov 	if (ret)
1161d96d3f30SStanimir Varbanov 		dev_warn(dev, "failed to send image version pkt to fw\n");
1162d96d3f30SStanimir Varbanov 
11634dde81d7SStanimir Varbanov 	ret = venus_sys_set_default_properties(hdev);
11644dde81d7SStanimir Varbanov 	if (ret)
11654dde81d7SStanimir Varbanov 		return ret;
11664dde81d7SStanimir Varbanov 
1167d96d3f30SStanimir Varbanov 	return 0;
1168d96d3f30SStanimir Varbanov }
1169d96d3f30SStanimir Varbanov 
venus_core_deinit(struct venus_core * core)1170d96d3f30SStanimir Varbanov static int venus_core_deinit(struct venus_core *core)
1171d96d3f30SStanimir Varbanov {
1172d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1173d96d3f30SStanimir Varbanov 
1174d96d3f30SStanimir Varbanov 	venus_set_state(hdev, VENUS_STATE_DEINIT);
1175d96d3f30SStanimir Varbanov 	hdev->suspended = true;
1176d96d3f30SStanimir Varbanov 	hdev->power_enabled = false;
1177d96d3f30SStanimir Varbanov 
1178d96d3f30SStanimir Varbanov 	return 0;
1179d96d3f30SStanimir Varbanov }
1180d96d3f30SStanimir Varbanov 
venus_core_ping(struct venus_core * core,u32 cookie)1181d96d3f30SStanimir Varbanov static int venus_core_ping(struct venus_core *core, u32 cookie)
1182d96d3f30SStanimir Varbanov {
1183d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1184d96d3f30SStanimir Varbanov 	struct hfi_sys_ping_pkt pkt;
1185d96d3f30SStanimir Varbanov 
1186d96d3f30SStanimir Varbanov 	pkt_sys_ping(&pkt, cookie);
1187d96d3f30SStanimir Varbanov 
11887f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, &pkt, false);
1189d96d3f30SStanimir Varbanov }
1190d96d3f30SStanimir Varbanov 
venus_core_trigger_ssr(struct venus_core * core,u32 trigger_type)1191d96d3f30SStanimir Varbanov static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1192d96d3f30SStanimir Varbanov {
1193d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1194d96d3f30SStanimir Varbanov 	struct hfi_sys_test_ssr_pkt pkt;
1195d96d3f30SStanimir Varbanov 	int ret;
1196d96d3f30SStanimir Varbanov 
1197d96d3f30SStanimir Varbanov 	ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1198d96d3f30SStanimir Varbanov 	if (ret)
1199d96d3f30SStanimir Varbanov 		return ret;
1200d96d3f30SStanimir Varbanov 
12017f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, &pkt, false);
1202d96d3f30SStanimir Varbanov }
1203d96d3f30SStanimir Varbanov 
venus_session_init(struct venus_inst * inst,u32 session_type,u32 codec)1204d96d3f30SStanimir Varbanov static int venus_session_init(struct venus_inst *inst, u32 session_type,
1205d96d3f30SStanimir Varbanov 			      u32 codec)
1206d96d3f30SStanimir Varbanov {
1207d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1208d96d3f30SStanimir Varbanov 	struct hfi_session_init_pkt pkt;
1209d96d3f30SStanimir Varbanov 	int ret;
1210d96d3f30SStanimir Varbanov 
1211f08abe6aSStanimir Varbanov 	ret = venus_sys_set_debug(hdev, venus_fw_debug);
1212f08abe6aSStanimir Varbanov 	if (ret)
1213f08abe6aSStanimir Varbanov 		goto err;
1214f08abe6aSStanimir Varbanov 
1215d96d3f30SStanimir Varbanov 	ret = pkt_session_init(&pkt, inst, session_type, codec);
1216d96d3f30SStanimir Varbanov 	if (ret)
1217d96d3f30SStanimir Varbanov 		goto err;
1218d96d3f30SStanimir Varbanov 
12197f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write(hdev, &pkt, true);
1220d96d3f30SStanimir Varbanov 	if (ret)
1221d96d3f30SStanimir Varbanov 		goto err;
1222d96d3f30SStanimir Varbanov 
1223d96d3f30SStanimir Varbanov 	return 0;
1224d96d3f30SStanimir Varbanov 
1225d96d3f30SStanimir Varbanov err:
1226d96d3f30SStanimir Varbanov 	venus_flush_debug_queue(hdev);
1227d96d3f30SStanimir Varbanov 	return ret;
1228d96d3f30SStanimir Varbanov }
1229d96d3f30SStanimir Varbanov 
venus_session_end(struct venus_inst * inst)1230d96d3f30SStanimir Varbanov static int venus_session_end(struct venus_inst *inst)
1231d96d3f30SStanimir Varbanov {
1232d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1233d96d3f30SStanimir Varbanov 	struct device *dev = hdev->core->dev;
1234d96d3f30SStanimir Varbanov 
1235d96d3f30SStanimir Varbanov 	if (venus_fw_coverage) {
1236d96d3f30SStanimir Varbanov 		if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1237d96d3f30SStanimir Varbanov 			dev_warn(dev, "fw coverage msg ON failed\n");
1238d96d3f30SStanimir Varbanov 	}
1239d96d3f30SStanimir Varbanov 
12407f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
1241d96d3f30SStanimir Varbanov }
1242d96d3f30SStanimir Varbanov 
venus_session_abort(struct venus_inst * inst)1243d96d3f30SStanimir Varbanov static int venus_session_abort(struct venus_inst *inst)
1244d96d3f30SStanimir Varbanov {
1245d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1246d96d3f30SStanimir Varbanov 
1247d96d3f30SStanimir Varbanov 	venus_flush_debug_queue(hdev);
1248d96d3f30SStanimir Varbanov 
12497f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
1250d96d3f30SStanimir Varbanov }
1251d96d3f30SStanimir Varbanov 
venus_session_flush(struct venus_inst * inst,u32 flush_mode)1252d96d3f30SStanimir Varbanov static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1253d96d3f30SStanimir Varbanov {
1254d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1255d96d3f30SStanimir Varbanov 	struct hfi_session_flush_pkt pkt;
1256d96d3f30SStanimir Varbanov 	int ret;
1257d96d3f30SStanimir Varbanov 
1258d96d3f30SStanimir Varbanov 	ret = pkt_session_flush(&pkt, inst, flush_mode);
1259d96d3f30SStanimir Varbanov 	if (ret)
1260d96d3f30SStanimir Varbanov 		return ret;
1261d96d3f30SStanimir Varbanov 
12627f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, &pkt, true);
1263d96d3f30SStanimir Varbanov }
1264d96d3f30SStanimir Varbanov 
venus_session_start(struct venus_inst * inst)1265d96d3f30SStanimir Varbanov static int venus_session_start(struct venus_inst *inst)
1266d96d3f30SStanimir Varbanov {
12677f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
1268d96d3f30SStanimir Varbanov }
1269d96d3f30SStanimir Varbanov 
venus_session_stop(struct venus_inst * inst)1270d96d3f30SStanimir Varbanov static int venus_session_stop(struct venus_inst *inst)
1271d96d3f30SStanimir Varbanov {
12727f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
1273d96d3f30SStanimir Varbanov }
1274d96d3f30SStanimir Varbanov 
venus_session_continue(struct venus_inst * inst)1275d96d3f30SStanimir Varbanov static int venus_session_continue(struct venus_inst *inst)
1276d96d3f30SStanimir Varbanov {
12777f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
1278d96d3f30SStanimir Varbanov }
1279d96d3f30SStanimir Varbanov 
venus_session_etb(struct venus_inst * inst,struct hfi_frame_data * in_frame)1280d96d3f30SStanimir Varbanov static int venus_session_etb(struct venus_inst *inst,
1281d96d3f30SStanimir Varbanov 			     struct hfi_frame_data *in_frame)
1282d96d3f30SStanimir Varbanov {
1283d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1284d96d3f30SStanimir Varbanov 	u32 session_type = inst->session_type;
1285d96d3f30SStanimir Varbanov 	int ret;
1286d96d3f30SStanimir Varbanov 
1287d96d3f30SStanimir Varbanov 	if (session_type == VIDC_SESSION_TYPE_DEC) {
1288d96d3f30SStanimir Varbanov 		struct hfi_session_empty_buffer_compressed_pkt pkt;
1289d96d3f30SStanimir Varbanov 
1290d96d3f30SStanimir Varbanov 		ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1291d96d3f30SStanimir Varbanov 		if (ret)
1292d96d3f30SStanimir Varbanov 			return ret;
1293d96d3f30SStanimir Varbanov 
12947f339fdcSVikash Garodia 		ret = venus_iface_cmdq_write(hdev, &pkt, false);
1295d96d3f30SStanimir Varbanov 	} else if (session_type == VIDC_SESSION_TYPE_ENC) {
1296d96d3f30SStanimir Varbanov 		struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1297d96d3f30SStanimir Varbanov 
1298d96d3f30SStanimir Varbanov 		ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1299d96d3f30SStanimir Varbanov 		if (ret)
1300d96d3f30SStanimir Varbanov 			return ret;
1301d96d3f30SStanimir Varbanov 
13027f339fdcSVikash Garodia 		ret = venus_iface_cmdq_write(hdev, &pkt, false);
1303d96d3f30SStanimir Varbanov 	} else {
1304d96d3f30SStanimir Varbanov 		ret = -EINVAL;
1305d96d3f30SStanimir Varbanov 	}
1306d96d3f30SStanimir Varbanov 
1307d96d3f30SStanimir Varbanov 	return ret;
1308d96d3f30SStanimir Varbanov }
1309d96d3f30SStanimir Varbanov 
venus_session_ftb(struct venus_inst * inst,struct hfi_frame_data * out_frame)1310d96d3f30SStanimir Varbanov static int venus_session_ftb(struct venus_inst *inst,
1311d96d3f30SStanimir Varbanov 			     struct hfi_frame_data *out_frame)
1312d96d3f30SStanimir Varbanov {
1313d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1314d96d3f30SStanimir Varbanov 	struct hfi_session_fill_buffer_pkt pkt;
1315d96d3f30SStanimir Varbanov 	int ret;
1316d96d3f30SStanimir Varbanov 
1317d96d3f30SStanimir Varbanov 	ret = pkt_session_ftb(&pkt, inst, out_frame);
1318d96d3f30SStanimir Varbanov 	if (ret)
1319d96d3f30SStanimir Varbanov 		return ret;
1320d96d3f30SStanimir Varbanov 
13217f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, &pkt, false);
1322d96d3f30SStanimir Varbanov }
1323d96d3f30SStanimir Varbanov 
venus_session_set_buffers(struct venus_inst * inst,struct hfi_buffer_desc * bd)1324d96d3f30SStanimir Varbanov static int venus_session_set_buffers(struct venus_inst *inst,
1325d96d3f30SStanimir Varbanov 				     struct hfi_buffer_desc *bd)
1326d96d3f30SStanimir Varbanov {
1327d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1328d96d3f30SStanimir Varbanov 	struct hfi_session_set_buffers_pkt *pkt;
1329d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1330d96d3f30SStanimir Varbanov 	int ret;
1331d96d3f30SStanimir Varbanov 
1332d96d3f30SStanimir Varbanov 	if (bd->buffer_type == HFI_BUFFER_INPUT)
1333d96d3f30SStanimir Varbanov 		return 0;
1334d96d3f30SStanimir Varbanov 
1335d96d3f30SStanimir Varbanov 	pkt = (struct hfi_session_set_buffers_pkt *)packet;
1336d96d3f30SStanimir Varbanov 
1337d96d3f30SStanimir Varbanov 	ret = pkt_session_set_buffers(pkt, inst, bd);
1338d96d3f30SStanimir Varbanov 	if (ret)
1339d96d3f30SStanimir Varbanov 		return ret;
1340d96d3f30SStanimir Varbanov 
13417f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, pkt, false);
1342d96d3f30SStanimir Varbanov }
1343d96d3f30SStanimir Varbanov 
venus_session_unset_buffers(struct venus_inst * inst,struct hfi_buffer_desc * bd)1344d96d3f30SStanimir Varbanov static int venus_session_unset_buffers(struct venus_inst *inst,
1345d96d3f30SStanimir Varbanov 				       struct hfi_buffer_desc *bd)
1346d96d3f30SStanimir Varbanov {
1347d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1348d96d3f30SStanimir Varbanov 	struct hfi_session_release_buffer_pkt *pkt;
1349d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1350d96d3f30SStanimir Varbanov 	int ret;
1351d96d3f30SStanimir Varbanov 
1352d96d3f30SStanimir Varbanov 	if (bd->buffer_type == HFI_BUFFER_INPUT)
1353d96d3f30SStanimir Varbanov 		return 0;
1354d96d3f30SStanimir Varbanov 
1355d96d3f30SStanimir Varbanov 	pkt = (struct hfi_session_release_buffer_pkt *)packet;
1356d96d3f30SStanimir Varbanov 
1357d96d3f30SStanimir Varbanov 	ret = pkt_session_unset_buffers(pkt, inst, bd);
1358d96d3f30SStanimir Varbanov 	if (ret)
1359d96d3f30SStanimir Varbanov 		return ret;
1360d96d3f30SStanimir Varbanov 
13617f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, pkt, true);
1362d96d3f30SStanimir Varbanov }
1363d96d3f30SStanimir Varbanov 
venus_session_load_res(struct venus_inst * inst)1364d96d3f30SStanimir Varbanov static int venus_session_load_res(struct venus_inst *inst)
1365d96d3f30SStanimir Varbanov {
13667f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
1367d96d3f30SStanimir Varbanov }
1368d96d3f30SStanimir Varbanov 
venus_session_release_res(struct venus_inst * inst)1369d96d3f30SStanimir Varbanov static int venus_session_release_res(struct venus_inst *inst)
1370d96d3f30SStanimir Varbanov {
13717f339fdcSVikash Garodia 	return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
1372d96d3f30SStanimir Varbanov }
1373d96d3f30SStanimir Varbanov 
venus_session_parse_seq_hdr(struct venus_inst * inst,u32 seq_hdr,u32 seq_hdr_len)1374d96d3f30SStanimir Varbanov static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1375d96d3f30SStanimir Varbanov 				       u32 seq_hdr_len)
1376d96d3f30SStanimir Varbanov {
1377d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1378d96d3f30SStanimir Varbanov 	struct hfi_session_parse_sequence_header_pkt *pkt;
1379d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1380d96d3f30SStanimir Varbanov 	int ret;
1381d96d3f30SStanimir Varbanov 
1382d96d3f30SStanimir Varbanov 	pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1383d96d3f30SStanimir Varbanov 
1384d96d3f30SStanimir Varbanov 	ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1385d96d3f30SStanimir Varbanov 	if (ret)
1386d96d3f30SStanimir Varbanov 		return ret;
1387d96d3f30SStanimir Varbanov 
13887f339fdcSVikash Garodia 	ret = venus_iface_cmdq_write(hdev, pkt, false);
1389d96d3f30SStanimir Varbanov 	if (ret)
1390d96d3f30SStanimir Varbanov 		return ret;
1391d96d3f30SStanimir Varbanov 
1392d96d3f30SStanimir Varbanov 	return 0;
1393d96d3f30SStanimir Varbanov }
1394d96d3f30SStanimir Varbanov 
venus_session_get_seq_hdr(struct venus_inst * inst,u32 seq_hdr,u32 seq_hdr_len)1395d96d3f30SStanimir Varbanov static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1396d96d3f30SStanimir Varbanov 				     u32 seq_hdr_len)
1397d96d3f30SStanimir Varbanov {
1398d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1399d96d3f30SStanimir Varbanov 	struct hfi_session_get_sequence_header_pkt *pkt;
1400d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1401d96d3f30SStanimir Varbanov 	int ret;
1402d96d3f30SStanimir Varbanov 
1403d96d3f30SStanimir Varbanov 	pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1404d96d3f30SStanimir Varbanov 
1405d96d3f30SStanimir Varbanov 	ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1406d96d3f30SStanimir Varbanov 	if (ret)
1407d96d3f30SStanimir Varbanov 		return ret;
1408d96d3f30SStanimir Varbanov 
14097f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, pkt, false);
1410d96d3f30SStanimir Varbanov }
1411d96d3f30SStanimir Varbanov 
venus_session_set_property(struct venus_inst * inst,u32 ptype,void * pdata)1412d96d3f30SStanimir Varbanov static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1413d96d3f30SStanimir Varbanov 				      void *pdata)
1414d96d3f30SStanimir Varbanov {
1415d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1416d96d3f30SStanimir Varbanov 	struct hfi_session_set_property_pkt *pkt;
1417d96d3f30SStanimir Varbanov 	u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1418d96d3f30SStanimir Varbanov 	int ret;
1419d96d3f30SStanimir Varbanov 
1420d96d3f30SStanimir Varbanov 	pkt = (struct hfi_session_set_property_pkt *)packet;
1421d96d3f30SStanimir Varbanov 
1422d96d3f30SStanimir Varbanov 	ret = pkt_session_set_property(pkt, inst, ptype, pdata);
14230aaddaafSMalathi Gottam 	if (ret == -ENOTSUPP)
14240aaddaafSMalathi Gottam 		return 0;
1425d96d3f30SStanimir Varbanov 	if (ret)
1426d96d3f30SStanimir Varbanov 		return ret;
1427d96d3f30SStanimir Varbanov 
14287f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, pkt, false);
1429d96d3f30SStanimir Varbanov }
1430d96d3f30SStanimir Varbanov 
venus_session_get_property(struct venus_inst * inst,u32 ptype)1431d96d3f30SStanimir Varbanov static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1432d96d3f30SStanimir Varbanov {
1433d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1434d96d3f30SStanimir Varbanov 	struct hfi_session_get_property_pkt pkt;
1435d96d3f30SStanimir Varbanov 	int ret;
1436d96d3f30SStanimir Varbanov 
1437d96d3f30SStanimir Varbanov 	ret = pkt_session_get_property(&pkt, inst, ptype);
1438d96d3f30SStanimir Varbanov 	if (ret)
1439d96d3f30SStanimir Varbanov 		return ret;
1440d96d3f30SStanimir Varbanov 
14417f339fdcSVikash Garodia 	return venus_iface_cmdq_write(hdev, &pkt, true);
1442d96d3f30SStanimir Varbanov }
1443d96d3f30SStanimir Varbanov 
venus_resume(struct venus_core * core)1444d96d3f30SStanimir Varbanov static int venus_resume(struct venus_core *core)
1445d96d3f30SStanimir Varbanov {
1446d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1447d96d3f30SStanimir Varbanov 	int ret = 0;
1448d96d3f30SStanimir Varbanov 
1449d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
1450d96d3f30SStanimir Varbanov 
1451d96d3f30SStanimir Varbanov 	if (!hdev->suspended)
1452d96d3f30SStanimir Varbanov 		goto unlock;
1453d96d3f30SStanimir Varbanov 
1454d96d3f30SStanimir Varbanov 	ret = venus_power_on(hdev);
1455d96d3f30SStanimir Varbanov 
1456d96d3f30SStanimir Varbanov unlock:
1457d96d3f30SStanimir Varbanov 	if (!ret)
1458d96d3f30SStanimir Varbanov 		hdev->suspended = false;
1459d96d3f30SStanimir Varbanov 
1460d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
1461d96d3f30SStanimir Varbanov 
1462d96d3f30SStanimir Varbanov 	return ret;
1463d96d3f30SStanimir Varbanov }
1464d96d3f30SStanimir Varbanov 
venus_suspend_1xx(struct venus_core * core)1465d96d3f30SStanimir Varbanov static int venus_suspend_1xx(struct venus_core *core)
1466d96d3f30SStanimir Varbanov {
1467d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1468d96d3f30SStanimir Varbanov 	struct device *dev = core->dev;
1469ff2a7013SBryan O'Donoghue 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1470d96d3f30SStanimir Varbanov 	u32 ctrl_status;
1471d96d3f30SStanimir Varbanov 	int ret;
1472d96d3f30SStanimir Varbanov 
1473d96d3f30SStanimir Varbanov 	if (!hdev->power_enabled || hdev->suspended)
1474d96d3f30SStanimir Varbanov 		return 0;
1475d96d3f30SStanimir Varbanov 
1476d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
1477d96d3f30SStanimir Varbanov 	ret = venus_is_valid_state(hdev);
1478d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
1479d96d3f30SStanimir Varbanov 
1480d96d3f30SStanimir Varbanov 	if (!ret) {
1481d96d3f30SStanimir Varbanov 		dev_err(dev, "bad state, cannot suspend\n");
1482d96d3f30SStanimir Varbanov 		return -EINVAL;
1483d96d3f30SStanimir Varbanov 	}
1484d96d3f30SStanimir Varbanov 
1485d96d3f30SStanimir Varbanov 	ret = venus_prepare_power_collapse(hdev, true);
1486d96d3f30SStanimir Varbanov 	if (ret) {
1487d96d3f30SStanimir Varbanov 		dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1488d96d3f30SStanimir Varbanov 		return ret;
1489d96d3f30SStanimir Varbanov 	}
1490d96d3f30SStanimir Varbanov 
1491d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
1492d96d3f30SStanimir Varbanov 
1493d96d3f30SStanimir Varbanov 	if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1494d96d3f30SStanimir Varbanov 		mutex_unlock(&hdev->lock);
1495d96d3f30SStanimir Varbanov 		return -EINVAL;
1496d96d3f30SStanimir Varbanov 	}
1497d96d3f30SStanimir Varbanov 
1498d96d3f30SStanimir Varbanov 	ret = venus_are_queues_empty(hdev);
1499d96d3f30SStanimir Varbanov 	if (ret < 0 || !ret) {
1500d96d3f30SStanimir Varbanov 		mutex_unlock(&hdev->lock);
1501d96d3f30SStanimir Varbanov 		return -EINVAL;
1502d96d3f30SStanimir Varbanov 	}
1503d96d3f30SStanimir Varbanov 
1504ff2a7013SBryan O'Donoghue 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1505d96d3f30SStanimir Varbanov 	if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1506d96d3f30SStanimir Varbanov 		mutex_unlock(&hdev->lock);
1507d96d3f30SStanimir Varbanov 		return -EINVAL;
1508d96d3f30SStanimir Varbanov 	}
1509d96d3f30SStanimir Varbanov 
1510d96d3f30SStanimir Varbanov 	ret = venus_power_off(hdev);
1511d96d3f30SStanimir Varbanov 	if (ret) {
1512d96d3f30SStanimir Varbanov 		mutex_unlock(&hdev->lock);
1513d96d3f30SStanimir Varbanov 		return ret;
1514d96d3f30SStanimir Varbanov 	}
1515d96d3f30SStanimir Varbanov 
1516d96d3f30SStanimir Varbanov 	hdev->suspended = true;
1517d96d3f30SStanimir Varbanov 
1518d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
1519d96d3f30SStanimir Varbanov 
1520d96d3f30SStanimir Varbanov 	return 0;
1521d96d3f30SStanimir Varbanov }
1522d96d3f30SStanimir Varbanov 
venus_cpu_and_video_core_idle(struct venus_hfi_device * hdev)1523bc897723SStanimir Varbanov static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1524bc897723SStanimir Varbanov {
1525ff2a7013SBryan O'Donoghue 	void __iomem *wrapper_base = hdev->core->wrapper_base;
1526e396e75fSBryan O'Donoghue 	void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1527ff2a7013SBryan O'Donoghue 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1528bc897723SStanimir Varbanov 	u32 ctrl_status, cpu_status;
1529bc897723SStanimir Varbanov 
15303b96e82dSKonrad Dybcio 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
1531e396e75fSBryan O'Donoghue 		cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1532e396e75fSBryan O'Donoghue 	else
1533ff2a7013SBryan O'Donoghue 		cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1534ff2a7013SBryan O'Donoghue 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1535bc897723SStanimir Varbanov 
1536bc897723SStanimir Varbanov 	if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1537bc897723SStanimir Varbanov 	    ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1538bc897723SStanimir Varbanov 		return true;
1539bc897723SStanimir Varbanov 
1540bc897723SStanimir Varbanov 	return false;
1541bc897723SStanimir Varbanov }
1542bc897723SStanimir Varbanov 
venus_cpu_idle_and_pc_ready(struct venus_hfi_device * hdev)1543bc897723SStanimir Varbanov static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1544bc897723SStanimir Varbanov {
1545ff2a7013SBryan O'Donoghue 	void __iomem *wrapper_base = hdev->core->wrapper_base;
1546e396e75fSBryan O'Donoghue 	void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1547ff2a7013SBryan O'Donoghue 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1548bc897723SStanimir Varbanov 	u32 ctrl_status, cpu_status;
1549bc897723SStanimir Varbanov 
1550365b4824SKonrad Dybcio 	if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
1551e396e75fSBryan O'Donoghue 		cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1552e396e75fSBryan O'Donoghue 	else
1553ff2a7013SBryan O'Donoghue 		cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1554ff2a7013SBryan O'Donoghue 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1555bc897723SStanimir Varbanov 
1556bc897723SStanimir Varbanov 	if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1557bc897723SStanimir Varbanov 	    ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1558bc897723SStanimir Varbanov 		return true;
1559bc897723SStanimir Varbanov 
1560bc897723SStanimir Varbanov 	return false;
1561bc897723SStanimir Varbanov }
1562bc897723SStanimir Varbanov 
venus_suspend_3xx(struct venus_core * core)1563d96d3f30SStanimir Varbanov static int venus_suspend_3xx(struct venus_core *core)
1564d96d3f30SStanimir Varbanov {
1565d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1566d96d3f30SStanimir Varbanov 	struct device *dev = core->dev;
1567ff2a7013SBryan O'Donoghue 	void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
15688dbebb2bSStanimir Varbanov 	u32 ctrl_status;
1569bc897723SStanimir Varbanov 	bool val;
1570d96d3f30SStanimir Varbanov 	int ret;
1571d96d3f30SStanimir Varbanov 
1572d96d3f30SStanimir Varbanov 	if (!hdev->power_enabled || hdev->suspended)
1573d96d3f30SStanimir Varbanov 		return 0;
1574d96d3f30SStanimir Varbanov 
1575d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
1576d96d3f30SStanimir Varbanov 	ret = venus_is_valid_state(hdev);
1577d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
1578d96d3f30SStanimir Varbanov 
1579d96d3f30SStanimir Varbanov 	if (!ret) {
1580d96d3f30SStanimir Varbanov 		dev_err(dev, "bad state, cannot suspend\n");
1581d96d3f30SStanimir Varbanov 		return -EINVAL;
1582d96d3f30SStanimir Varbanov 	}
1583d96d3f30SStanimir Varbanov 
1584ff2a7013SBryan O'Donoghue 	ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
15858dbebb2bSStanimir Varbanov 	if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
15868dbebb2bSStanimir Varbanov 		goto power_off;
15878dbebb2bSStanimir Varbanov 
1588bc897723SStanimir Varbanov 	/*
1589bc897723SStanimir Varbanov 	 * Power collapse sequence for Venus 3xx and 4xx versions:
1590bc897723SStanimir Varbanov 	 * 1. Check for ARM9 and video core to be idle by checking WFI bit
1591bc897723SStanimir Varbanov 	 *    (bit 0) in CPU status register and by checking Idle (bit 30) in
1592bc897723SStanimir Varbanov 	 *    Control status register for video core.
1593bc897723SStanimir Varbanov 	 * 2. Send a command to prepare for power collapse.
1594bc897723SStanimir Varbanov 	 * 3. Check for WFI and PC_READY bits.
1595bc897723SStanimir Varbanov 	 */
1596bc897723SStanimir Varbanov 	ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1597bc897723SStanimir Varbanov 				 1500, 100 * 1500);
159890307ebeSLuca Weiss 	if (ret) {
159990307ebeSLuca Weiss 		dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret);
1600bc897723SStanimir Varbanov 		return ret;
160190307ebeSLuca Weiss 	}
1602d96d3f30SStanimir Varbanov 
1603d96d3f30SStanimir Varbanov 	ret = venus_prepare_power_collapse(hdev, false);
1604d96d3f30SStanimir Varbanov 	if (ret) {
1605bc897723SStanimir Varbanov 		dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1606d96d3f30SStanimir Varbanov 		return ret;
1607d96d3f30SStanimir Varbanov 	}
1608d96d3f30SStanimir Varbanov 
1609bc897723SStanimir Varbanov 	ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1610bc897723SStanimir Varbanov 				 1500, 100 * 1500);
1611bc897723SStanimir Varbanov 	if (ret)
1612bc897723SStanimir Varbanov 		return ret;
1613d96d3f30SStanimir Varbanov 
16148dbebb2bSStanimir Varbanov power_off:
1615d96d3f30SStanimir Varbanov 	mutex_lock(&hdev->lock);
1616d96d3f30SStanimir Varbanov 
1617d96d3f30SStanimir Varbanov 	ret = venus_power_off(hdev);
1618d96d3f30SStanimir Varbanov 	if (ret) {
1619d96d3f30SStanimir Varbanov 		dev_err(dev, "venus_power_off (%d)\n", ret);
1620d96d3f30SStanimir Varbanov 		mutex_unlock(&hdev->lock);
1621d96d3f30SStanimir Varbanov 		return ret;
1622d96d3f30SStanimir Varbanov 	}
1623d96d3f30SStanimir Varbanov 
1624d96d3f30SStanimir Varbanov 	hdev->suspended = true;
1625d96d3f30SStanimir Varbanov 
1626d96d3f30SStanimir Varbanov 	mutex_unlock(&hdev->lock);
1627d96d3f30SStanimir Varbanov 
1628d96d3f30SStanimir Varbanov 	return 0;
1629d96d3f30SStanimir Varbanov }
1630d96d3f30SStanimir Varbanov 
venus_suspend(struct venus_core * core)1631d96d3f30SStanimir Varbanov static int venus_suspend(struct venus_core *core)
1632d96d3f30SStanimir Varbanov {
16337ed9e0b3SBryan O'Donoghue 	if (IS_V3(core) || IS_V4(core) || IS_V6(core))
1634d96d3f30SStanimir Varbanov 		return venus_suspend_3xx(core);
1635d96d3f30SStanimir Varbanov 
1636d96d3f30SStanimir Varbanov 	return venus_suspend_1xx(core);
1637d96d3f30SStanimir Varbanov }
1638d96d3f30SStanimir Varbanov 
1639d96d3f30SStanimir Varbanov static const struct hfi_ops venus_hfi_ops = {
1640d96d3f30SStanimir Varbanov 	.core_init			= venus_core_init,
1641d96d3f30SStanimir Varbanov 	.core_deinit			= venus_core_deinit,
1642d96d3f30SStanimir Varbanov 	.core_ping			= venus_core_ping,
1643d96d3f30SStanimir Varbanov 	.core_trigger_ssr		= venus_core_trigger_ssr,
1644d96d3f30SStanimir Varbanov 
1645d96d3f30SStanimir Varbanov 	.session_init			= venus_session_init,
1646d96d3f30SStanimir Varbanov 	.session_end			= venus_session_end,
1647d96d3f30SStanimir Varbanov 	.session_abort			= venus_session_abort,
1648d96d3f30SStanimir Varbanov 	.session_flush			= venus_session_flush,
1649d96d3f30SStanimir Varbanov 	.session_start			= venus_session_start,
1650d96d3f30SStanimir Varbanov 	.session_stop			= venus_session_stop,
1651d96d3f30SStanimir Varbanov 	.session_continue		= venus_session_continue,
1652d96d3f30SStanimir Varbanov 	.session_etb			= venus_session_etb,
1653d96d3f30SStanimir Varbanov 	.session_ftb			= venus_session_ftb,
1654d96d3f30SStanimir Varbanov 	.session_set_buffers		= venus_session_set_buffers,
1655d96d3f30SStanimir Varbanov 	.session_unset_buffers		= venus_session_unset_buffers,
1656d96d3f30SStanimir Varbanov 	.session_load_res		= venus_session_load_res,
1657d96d3f30SStanimir Varbanov 	.session_release_res		= venus_session_release_res,
1658d96d3f30SStanimir Varbanov 	.session_parse_seq_hdr		= venus_session_parse_seq_hdr,
1659d96d3f30SStanimir Varbanov 	.session_get_seq_hdr		= venus_session_get_seq_hdr,
1660d96d3f30SStanimir Varbanov 	.session_set_property		= venus_session_set_property,
1661d96d3f30SStanimir Varbanov 	.session_get_property		= venus_session_get_property,
1662d96d3f30SStanimir Varbanov 
1663d96d3f30SStanimir Varbanov 	.resume				= venus_resume,
1664d96d3f30SStanimir Varbanov 	.suspend			= venus_suspend,
1665d96d3f30SStanimir Varbanov 
1666d96d3f30SStanimir Varbanov 	.isr				= venus_isr,
1667d96d3f30SStanimir Varbanov 	.isr_thread			= venus_isr_thread,
1668d96d3f30SStanimir Varbanov };
1669d96d3f30SStanimir Varbanov 
venus_hfi_destroy(struct venus_core * core)1670d96d3f30SStanimir Varbanov void venus_hfi_destroy(struct venus_core *core)
1671d96d3f30SStanimir Varbanov {
1672d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
1673d96d3f30SStanimir Varbanov 
1674686ee9b6SMauro Carvalho Chehab 	core->priv = NULL;
1675d96d3f30SStanimir Varbanov 	venus_interface_queues_release(hdev);
1676d96d3f30SStanimir Varbanov 	mutex_destroy(&hdev->lock);
1677d96d3f30SStanimir Varbanov 	kfree(hdev);
1678d96d3f30SStanimir Varbanov 	core->ops = NULL;
1679d96d3f30SStanimir Varbanov }
1680d96d3f30SStanimir Varbanov 
venus_hfi_create(struct venus_core * core)1681d96d3f30SStanimir Varbanov int venus_hfi_create(struct venus_core *core)
1682d96d3f30SStanimir Varbanov {
1683d96d3f30SStanimir Varbanov 	struct venus_hfi_device *hdev;
1684d96d3f30SStanimir Varbanov 	int ret;
1685d96d3f30SStanimir Varbanov 
1686d96d3f30SStanimir Varbanov 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1687d96d3f30SStanimir Varbanov 	if (!hdev)
1688d96d3f30SStanimir Varbanov 		return -ENOMEM;
1689d96d3f30SStanimir Varbanov 
1690d96d3f30SStanimir Varbanov 	mutex_init(&hdev->lock);
1691d96d3f30SStanimir Varbanov 
1692d96d3f30SStanimir Varbanov 	hdev->core = core;
1693d96d3f30SStanimir Varbanov 	hdev->suspended = true;
1694d96d3f30SStanimir Varbanov 	core->priv = hdev;
1695d96d3f30SStanimir Varbanov 	core->ops = &venus_hfi_ops;
1696d96d3f30SStanimir Varbanov 
1697d96d3f30SStanimir Varbanov 	ret = venus_interface_queues_init(hdev);
1698d96d3f30SStanimir Varbanov 	if (ret)
1699d96d3f30SStanimir Varbanov 		goto err_kfree;
1700d96d3f30SStanimir Varbanov 
1701d96d3f30SStanimir Varbanov 	return 0;
1702d96d3f30SStanimir Varbanov 
1703d96d3f30SStanimir Varbanov err_kfree:
1704d96d3f30SStanimir Varbanov 	kfree(hdev);
1705d96d3f30SStanimir Varbanov 	core->priv = NULL;
1706d96d3f30SStanimir Varbanov 	core->ops = NULL;
1707d96d3f30SStanimir Varbanov 	return ret;
1708d96d3f30SStanimir Varbanov }
170973d513e4SStanimir Varbanov 
venus_hfi_queues_reinit(struct venus_core * core)171073d513e4SStanimir Varbanov void venus_hfi_queues_reinit(struct venus_core *core)
171173d513e4SStanimir Varbanov {
171273d513e4SStanimir Varbanov 	struct venus_hfi_device *hdev = to_hfi_priv(core);
171373d513e4SStanimir Varbanov 	struct hfi_queue_table_header *tbl_hdr;
171473d513e4SStanimir Varbanov 	struct iface_queue *queue;
171573d513e4SStanimir Varbanov 	struct hfi_sfr *sfr;
171673d513e4SStanimir Varbanov 	unsigned int i;
171773d513e4SStanimir Varbanov 
171873d513e4SStanimir Varbanov 	mutex_lock(&hdev->lock);
171973d513e4SStanimir Varbanov 
172073d513e4SStanimir Varbanov 	for (i = 0; i < IFACEQ_NUM; i++) {
172173d513e4SStanimir Varbanov 		queue = &hdev->queues[i];
172273d513e4SStanimir Varbanov 		queue->qhdr =
172373d513e4SStanimir Varbanov 			IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
172473d513e4SStanimir Varbanov 
172573d513e4SStanimir Varbanov 		venus_set_qhdr_defaults(queue->qhdr);
172673d513e4SStanimir Varbanov 
172773d513e4SStanimir Varbanov 		queue->qhdr->start_addr = queue->qmem.da;
172873d513e4SStanimir Varbanov 
172973d513e4SStanimir Varbanov 		if (i == IFACEQ_CMD_IDX)
173073d513e4SStanimir Varbanov 			queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
173173d513e4SStanimir Varbanov 		else if (i == IFACEQ_MSG_IDX)
173273d513e4SStanimir Varbanov 			queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
173373d513e4SStanimir Varbanov 		else if (i == IFACEQ_DBG_IDX)
173473d513e4SStanimir Varbanov 			queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
173573d513e4SStanimir Varbanov 	}
173673d513e4SStanimir Varbanov 
173773d513e4SStanimir Varbanov 	tbl_hdr = hdev->ifaceq_table.kva;
173873d513e4SStanimir Varbanov 	tbl_hdr->version = 0;
173973d513e4SStanimir Varbanov 	tbl_hdr->size = IFACEQ_TABLE_SIZE;
174073d513e4SStanimir Varbanov 	tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
174173d513e4SStanimir Varbanov 	tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
174273d513e4SStanimir Varbanov 	tbl_hdr->num_q = IFACEQ_NUM;
174373d513e4SStanimir Varbanov 	tbl_hdr->num_active_q = IFACEQ_NUM;
174473d513e4SStanimir Varbanov 
174573d513e4SStanimir Varbanov 	/*
174673d513e4SStanimir Varbanov 	 * Set receive request to zero on debug queue as there is no
174773d513e4SStanimir Varbanov 	 * need of interrupt from video hardware for debug messages
174873d513e4SStanimir Varbanov 	 */
174973d513e4SStanimir Varbanov 	queue = &hdev->queues[IFACEQ_DBG_IDX];
175073d513e4SStanimir Varbanov 	queue->qhdr->rx_req = 0;
175173d513e4SStanimir Varbanov 
175273d513e4SStanimir Varbanov 	sfr = hdev->sfr.kva;
175373d513e4SStanimir Varbanov 	sfr->buf_size = ALIGNED_SFR_SIZE;
175473d513e4SStanimir Varbanov 
175573d513e4SStanimir Varbanov 	/* ensure table and queue header structs are settled in memory */
175673d513e4SStanimir Varbanov 	wmb();
175773d513e4SStanimir Varbanov 
175873d513e4SStanimir Varbanov 	mutex_unlock(&hdev->lock);
175973d513e4SStanimir Varbanov }
1760