1 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  */
12 
13 #include "dpu_hwio.h"
14 #include "dpu_hw_catalog.h"
15 #include "dpu_hw_vbif.h"
16 
17 #define VBIF_VERSION			0x0000
18 #define VBIF_CLK_FORCE_CTRL0		0x0008
19 #define VBIF_CLK_FORCE_CTRL1		0x000C
20 #define VBIF_QOS_REMAP_00		0x0020
21 #define VBIF_QOS_REMAP_01		0x0024
22 #define VBIF_QOS_REMAP_10		0x0028
23 #define VBIF_QOS_REMAP_11		0x002C
24 #define VBIF_WRITE_GATHER_EN		0x00AC
25 #define VBIF_IN_RD_LIM_CONF0		0x00B0
26 #define VBIF_IN_RD_LIM_CONF1		0x00B4
27 #define VBIF_IN_RD_LIM_CONF2		0x00B8
28 #define VBIF_IN_WR_LIM_CONF0		0x00C0
29 #define VBIF_IN_WR_LIM_CONF1		0x00C4
30 #define VBIF_IN_WR_LIM_CONF2		0x00C8
31 #define VBIF_OUT_RD_LIM_CONF0		0x00D0
32 #define VBIF_OUT_WR_LIM_CONF0		0x00D4
33 #define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
34 #define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
35 #define VBIF_XIN_PND_ERR		0x0190
36 #define VBIF_XIN_SRC_ERR		0x0194
37 #define VBIF_XIN_CLR_ERR		0x019C
38 #define VBIF_XIN_HALT_CTRL0		0x0200
39 #define VBIF_XIN_HALT_CTRL1		0x0204
40 #define VBIF_XINL_QOS_RP_REMAP_000	0x0550
41 #define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
42 
43 static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
44 		u32 *pnd_errors, u32 *src_errors)
45 {
46 	struct dpu_hw_blk_reg_map *c;
47 	u32 pnd, src;
48 
49 	if (!vbif)
50 		return;
51 	c = &vbif->hw;
52 	pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
53 	src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
54 
55 	if (pnd_errors)
56 		*pnd_errors = pnd;
57 	if (src_errors)
58 		*src_errors = src;
59 
60 	DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
61 }
62 
63 static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
64 		u32 xin_id, u32 value)
65 {
66 	struct dpu_hw_blk_reg_map *c;
67 	u32 reg_off;
68 	u32 bit_off;
69 	u32 reg_val;
70 
71 	/*
72 	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
73 	 * 16 bit fields maximum across two registers
74 	 */
75 	if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
76 		return;
77 
78 	c = &vbif->hw;
79 
80 	if (xin_id >= 8) {
81 		xin_id -= 8;
82 		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
83 	} else {
84 		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
85 	}
86 	bit_off = (xin_id & 0x7) * 4;
87 	reg_val = DPU_REG_READ(c, reg_off);
88 	reg_val &= ~(0x7 << bit_off);
89 	reg_val |= (value & 0x7) << bit_off;
90 	DPU_REG_WRITE(c, reg_off, reg_val);
91 }
92 
93 static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
94 		u32 xin_id, bool rd, u32 limit)
95 {
96 	struct dpu_hw_blk_reg_map *c = &vbif->hw;
97 	u32 reg_val;
98 	u32 reg_off;
99 	u32 bit_off;
100 
101 	if (rd)
102 		reg_off = VBIF_IN_RD_LIM_CONF0;
103 	else
104 		reg_off = VBIF_IN_WR_LIM_CONF0;
105 
106 	reg_off += (xin_id / 4) * 4;
107 	bit_off = (xin_id % 4) * 8;
108 	reg_val = DPU_REG_READ(c, reg_off);
109 	reg_val &= ~(0xFF << bit_off);
110 	reg_val |= (limit) << bit_off;
111 	DPU_REG_WRITE(c, reg_off, reg_val);
112 }
113 
114 static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
115 		u32 xin_id, bool rd)
116 {
117 	struct dpu_hw_blk_reg_map *c = &vbif->hw;
118 	u32 reg_val;
119 	u32 reg_off;
120 	u32 bit_off;
121 	u32 limit;
122 
123 	if (rd)
124 		reg_off = VBIF_IN_RD_LIM_CONF0;
125 	else
126 		reg_off = VBIF_IN_WR_LIM_CONF0;
127 
128 	reg_off += (xin_id / 4) * 4;
129 	bit_off = (xin_id % 4) * 8;
130 	reg_val = DPU_REG_READ(c, reg_off);
131 	limit = (reg_val >> bit_off) & 0xFF;
132 
133 	return limit;
134 }
135 
136 static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
137 		u32 xin_id, bool enable)
138 {
139 	struct dpu_hw_blk_reg_map *c = &vbif->hw;
140 	u32 reg_val;
141 
142 	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
143 
144 	if (enable)
145 		reg_val |= BIT(xin_id);
146 	else
147 		reg_val &= ~BIT(xin_id);
148 
149 	DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
150 }
151 
152 static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
153 		u32 xin_id)
154 {
155 	struct dpu_hw_blk_reg_map *c = &vbif->hw;
156 	u32 reg_val;
157 
158 	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
159 
160 	return (reg_val & BIT(xin_id)) ? true : false;
161 }
162 
163 static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
164 		u32 xin_id, u32 level, u32 remap_level)
165 {
166 	struct dpu_hw_blk_reg_map *c;
167 	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
168 
169 	if (!vbif)
170 		return;
171 
172 	c = &vbif->hw;
173 
174 	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
175 	reg_shift = (xin_id & 0x7) * 4;
176 
177 	reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
178 	reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
179 
180 	mask = 0x7 << reg_shift;
181 
182 	reg_val &= ~mask;
183 	reg_val |= (remap_level << reg_shift) & mask;
184 
185 	reg_val_lvl &= ~mask;
186 	reg_val_lvl |= (remap_level << reg_shift) & mask;
187 
188 	DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
189 	DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
190 }
191 
192 static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
193 {
194 	struct dpu_hw_blk_reg_map *c;
195 	u32 reg_val;
196 
197 	if (!vbif || xin_id >= MAX_XIN_COUNT)
198 		return;
199 
200 	c = &vbif->hw;
201 
202 	reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
203 	reg_val |= BIT(xin_id);
204 	DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
205 }
206 
207 static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
208 		unsigned long cap)
209 {
210 	ops->set_limit_conf = dpu_hw_set_limit_conf;
211 	ops->get_limit_conf = dpu_hw_get_limit_conf;
212 	ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
213 	ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
214 	if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
215 		ops->set_qos_remap = dpu_hw_set_qos_remap;
216 	ops->set_mem_type = dpu_hw_set_mem_type;
217 	ops->clear_errors = dpu_hw_clear_errors;
218 	ops->set_write_gather_en = dpu_hw_set_write_gather_en;
219 }
220 
221 static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
222 		const struct dpu_mdss_cfg *m,
223 		void __iomem *addr,
224 		struct dpu_hw_blk_reg_map *b)
225 {
226 	int i;
227 
228 	for (i = 0; i < m->vbif_count; i++) {
229 		if (vbif == m->vbif[i].id) {
230 			b->base_off = addr;
231 			b->blk_off = m->vbif[i].base;
232 			b->length = m->vbif[i].len;
233 			b->hwversion = m->hwversion;
234 			b->log_mask = DPU_DBG_MASK_VBIF;
235 			return &m->vbif[i];
236 		}
237 	}
238 
239 	return ERR_PTR(-EINVAL);
240 }
241 
242 struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
243 		void __iomem *addr,
244 		const struct dpu_mdss_cfg *m)
245 {
246 	struct dpu_hw_vbif *c;
247 	const struct dpu_vbif_cfg *cfg;
248 
249 	c = kzalloc(sizeof(*c), GFP_KERNEL);
250 	if (!c)
251 		return ERR_PTR(-ENOMEM);
252 
253 	cfg = _top_offset(idx, m, addr, &c->hw);
254 	if (IS_ERR_OR_NULL(cfg)) {
255 		kfree(c);
256 		return ERR_PTR(-EINVAL);
257 	}
258 
259 	/*
260 	 * Assign ops
261 	 */
262 	c->idx = idx;
263 	c->cap = cfg;
264 	_setup_vbif_ops(&c->ops, c->cap->features);
265 
266 	/* no need to register sub-range in dpu dbg, dump entire vbif io base */
267 
268 	return c;
269 }
270 
271 void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
272 {
273 	kfree(vbif);
274 }
275