1 /*
2  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <drm/drm_util.h>
20 
21 #include "mdp5_kms.h"
22 #include "mdp5_smp.h"
23 
24 
25 struct mdp5_smp {
26 	struct drm_device *dev;
27 
28 	uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
29 
30 	int blk_cnt;
31 	int blk_size;
32 
33 	/* register cache */
34 	u32 alloc_w[22];
35 	u32 alloc_r[22];
36 	u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
37 	u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
38 	u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
39 };
40 
41 static inline
42 struct mdp5_kms *get_kms(struct mdp5_smp *smp)
43 {
44 	struct msm_drm_private *priv = smp->dev->dev_private;
45 
46 	return to_mdp5_kms(to_mdp_kms(priv->kms));
47 }
48 
49 static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
50 {
51 #define CID_UNUSED	0
52 
53 	if (WARN_ON(plane >= pipe2nclients(pipe)))
54 		return CID_UNUSED;
55 
56 	/*
57 	 * Note on SMP clients:
58 	 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
59 	 * consecutive, and in that order.
60 	 *
61 	 * e.g.:
62 	 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
63 	 *	Y  plane's client ID is N
64 	 *	Cr plane's client ID is N + 1
65 	 *	Cb plane's client ID is N + 2
66 	 */
67 
68 	return mdp5_cfg->smp.clients[pipe] + plane;
69 }
70 
71 /* allocate blocks for the specified request: */
72 static int smp_request_block(struct mdp5_smp *smp,
73 		struct mdp5_smp_state *state,
74 		u32 cid, int nblks)
75 {
76 	void *cs = state->client_state[cid];
77 	int i, avail, cnt = smp->blk_cnt;
78 	uint8_t reserved;
79 
80 	/* we shouldn't be requesting blocks for an in-use client: */
81 	WARN_ON(bitmap_weight(cs, cnt) > 0);
82 
83 	reserved = smp->reserved[cid];
84 
85 	if (reserved) {
86 		nblks = max(0, nblks - reserved);
87 		DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
88 	}
89 
90 	avail = cnt - bitmap_weight(state->state, cnt);
91 	if (nblks > avail) {
92 		DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
93 				nblks, avail);
94 		return -ENOSPC;
95 	}
96 
97 	for (i = 0; i < nblks; i++) {
98 		int blk = find_first_zero_bit(state->state, cnt);
99 		set_bit(blk, cs);
100 		set_bit(blk, state->state);
101 	}
102 
103 	return 0;
104 }
105 
106 static void set_fifo_thresholds(struct mdp5_smp *smp,
107 		enum mdp5_pipe pipe, int nblks)
108 {
109 	u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
110 	u32 val;
111 
112 	/* 1/4 of SMP pool that is being fetched */
113 	val = (nblks * smp_entries_per_blk) / 4;
114 
115 	smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
116 	smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
117 	smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
118 }
119 
120 /*
121  * NOTE: looks like if horizontal decimation is used (if we supported that)
122  * then the width used to calculate SMP block requirements is the post-
123  * decimated width.  Ie. SMP buffering sits downstream of decimation (which
124  * presumably happens during the dma from scanout buffer).
125  */
126 uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
127 		const struct mdp_format *format,
128 		u32 width, bool hdecim)
129 {
130 	const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
131 	struct mdp5_kms *mdp5_kms = get_kms(smp);
132 	int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
133 	int i, hsub, nplanes, nlines;
134 	u32 fmt = format->base.pixel_format;
135 	uint32_t blkcfg = 0;
136 
137 	nplanes = info->num_planes;
138 	hsub = info->hsub;
139 
140 	/* different if BWC (compressed framebuffer?) enabled: */
141 	nlines = 2;
142 
143 	/* Newer MDPs have split/packing logic, which fetches sub-sampled
144 	 * U and V components (splits them from Y if necessary) and packs
145 	 * them together, writes to SMP using a single client.
146 	 */
147 	if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
148 		fmt = DRM_FORMAT_NV24;
149 		nplanes = 2;
150 
151 		/* if decimation is enabled, HW decimates less on the
152 		 * sub sampled chroma components
153 		 */
154 		if (hdecim && (hsub > 1))
155 			hsub = 1;
156 	}
157 
158 	for (i = 0; i < nplanes; i++) {
159 		int n, fetch_stride, cpp;
160 
161 		cpp = info->cpp[i];
162 		fetch_stride = width * cpp / (i ? hsub : 1);
163 
164 		n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
165 
166 		/* for hw rev v1.00 */
167 		if (rev == 0)
168 			n = roundup_pow_of_two(n);
169 
170 		blkcfg |= (n << (8 * i));
171 	}
172 
173 	return blkcfg;
174 }
175 
176 int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
177 		enum mdp5_pipe pipe, uint32_t blkcfg)
178 {
179 	struct mdp5_kms *mdp5_kms = get_kms(smp);
180 	struct drm_device *dev = mdp5_kms->dev;
181 	int i, ret;
182 
183 	for (i = 0; i < pipe2nclients(pipe); i++) {
184 		u32 cid = pipe2client(pipe, i);
185 		int n = blkcfg & 0xff;
186 
187 		if (!n)
188 			continue;
189 
190 		DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
191 		ret = smp_request_block(smp, state, cid, n);
192 		if (ret) {
193 			DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
194 					n, ret);
195 			return ret;
196 		}
197 
198 		blkcfg >>= 8;
199 	}
200 
201 	state->assigned |= (1 << pipe);
202 
203 	return 0;
204 }
205 
206 /* Release SMP blocks for all clients of the pipe */
207 void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
208 		enum mdp5_pipe pipe)
209 {
210 	int i;
211 	int cnt = smp->blk_cnt;
212 
213 	for (i = 0; i < pipe2nclients(pipe); i++) {
214 		u32 cid = pipe2client(pipe, i);
215 		void *cs = state->client_state[cid];
216 
217 		/* update global state: */
218 		bitmap_andnot(state->state, state->state, cs, cnt);
219 
220 		/* clear client's state */
221 		bitmap_zero(cs, cnt);
222 	}
223 
224 	state->released |= (1 << pipe);
225 }
226 
227 /* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
228  * happen after scanout completes.
229  */
230 static unsigned update_smp_state(struct mdp5_smp *smp,
231 		u32 cid, mdp5_smp_state_t *assigned)
232 {
233 	int cnt = smp->blk_cnt;
234 	unsigned nblks = 0;
235 	u32 blk, val;
236 
237 	for_each_set_bit(blk, *assigned, cnt) {
238 		int idx = blk / 3;
239 		int fld = blk % 3;
240 
241 		val = smp->alloc_w[idx];
242 
243 		switch (fld) {
244 		case 0:
245 			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
246 			val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
247 			break;
248 		case 1:
249 			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
250 			val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
251 			break;
252 		case 2:
253 			val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
254 			val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
255 			break;
256 		}
257 
258 		smp->alloc_w[idx] = val;
259 		smp->alloc_r[idx] = val;
260 
261 		nblks++;
262 	}
263 
264 	return nblks;
265 }
266 
267 static void write_smp_alloc_regs(struct mdp5_smp *smp)
268 {
269 	struct mdp5_kms *mdp5_kms = get_kms(smp);
270 	int i, num_regs;
271 
272 	num_regs = smp->blk_cnt / 3 + 1;
273 
274 	for (i = 0; i < num_regs; i++) {
275 		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
276 			   smp->alloc_w[i]);
277 		mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
278 			   smp->alloc_r[i]);
279 	}
280 }
281 
282 static void write_smp_fifo_regs(struct mdp5_smp *smp)
283 {
284 	struct mdp5_kms *mdp5_kms = get_kms(smp);
285 	int i;
286 
287 	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
288 		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
289 		enum mdp5_pipe pipe = hwpipe->pipe;
290 
291 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
292 			   smp->pipe_reqprio_fifo_wm0[pipe]);
293 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
294 			   smp->pipe_reqprio_fifo_wm1[pipe]);
295 		mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
296 			   smp->pipe_reqprio_fifo_wm2[pipe]);
297 	}
298 }
299 
300 void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
301 {
302 	enum mdp5_pipe pipe;
303 
304 	for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
305 		unsigned i, nblks = 0;
306 
307 		for (i = 0; i < pipe2nclients(pipe); i++) {
308 			u32 cid = pipe2client(pipe, i);
309 			void *cs = state->client_state[cid];
310 
311 			nblks += update_smp_state(smp, cid, cs);
312 
313 			DBG("assign %s:%u, %u blks",
314 				pipe2name(pipe), i, nblks);
315 		}
316 
317 		set_fifo_thresholds(smp, pipe, nblks);
318 	}
319 
320 	write_smp_alloc_regs(smp);
321 	write_smp_fifo_regs(smp);
322 
323 	state->assigned = 0;
324 }
325 
326 void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
327 {
328 	enum mdp5_pipe pipe;
329 
330 	for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
331 		DBG("release %s", pipe2name(pipe));
332 		set_fifo_thresholds(smp, pipe, 0);
333 	}
334 
335 	write_smp_fifo_regs(smp);
336 
337 	state->released = 0;
338 }
339 
340 void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
341 {
342 	struct mdp5_kms *mdp5_kms = get_kms(smp);
343 	struct mdp5_hw_pipe_state *hwpstate;
344 	struct mdp5_smp_state *state;
345 	struct mdp5_global_state *global_state;
346 	int total = 0, i, j;
347 
348 	drm_printf(p, "name\tinuse\tplane\n");
349 	drm_printf(p, "----\t-----\t-----\n");
350 
351 	if (drm_can_sleep())
352 		drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
353 
354 	global_state = mdp5_get_existing_global_state(mdp5_kms);
355 
356 	/* grab these *after* we hold the state_lock */
357 	hwpstate = &global_state->hwpipe;
358 	state = &global_state->smp;
359 
360 	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
361 		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
362 		struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
363 		enum mdp5_pipe pipe = hwpipe->pipe;
364 		for (j = 0; j < pipe2nclients(pipe); j++) {
365 			u32 cid = pipe2client(pipe, j);
366 			void *cs = state->client_state[cid];
367 			int inuse = bitmap_weight(cs, smp->blk_cnt);
368 
369 			drm_printf(p, "%s:%d\t%d\t%s\n",
370 				pipe2name(pipe), j, inuse,
371 				plane ? plane->name : NULL);
372 
373 			total += inuse;
374 		}
375 	}
376 
377 	drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
378 	drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
379 			bitmap_weight(state->state, smp->blk_cnt));
380 
381 	if (drm_can_sleep())
382 		drm_modeset_unlock(&mdp5_kms->glob_state_lock);
383 }
384 
385 void mdp5_smp_destroy(struct mdp5_smp *smp)
386 {
387 	kfree(smp);
388 }
389 
390 struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
391 {
392 	struct mdp5_smp_state *state;
393 	struct mdp5_global_state *global_state;
394 	struct mdp5_smp *smp = NULL;
395 	int ret;
396 
397 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
398 	if (unlikely(!smp)) {
399 		ret = -ENOMEM;
400 		goto fail;
401 	}
402 
403 	smp->dev = mdp5_kms->dev;
404 	smp->blk_cnt = cfg->mmb_count;
405 	smp->blk_size = cfg->mmb_size;
406 
407 	global_state = mdp5_get_existing_global_state(mdp5_kms);
408 	state = &global_state->smp;
409 
410 	/* statically tied MMBs cannot be re-allocated: */
411 	bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
412 	memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
413 
414 	return smp;
415 fail:
416 	if (smp)
417 		mdp5_smp_destroy(smp);
418 
419 	return ERR_PTR(ret);
420 }
421