147f8253dSPrathap Kumar Valsan // SPDX-License-Identifier: MIT
247f8253dSPrathap Kumar Valsan /*
347f8253dSPrathap Kumar Valsan  * Copyright © 2019 Intel Corporation
447f8253dSPrathap Kumar Valsan  */
547f8253dSPrathap Kumar Valsan 
647f8253dSPrathap Kumar Valsan #include "gen7_renderclear.h"
747f8253dSPrathap Kumar Valsan #include "i915_drv.h"
847f8253dSPrathap Kumar Valsan #include "intel_gpu_commands.h"
90d6419e9SMatt Roper #include "intel_gt_regs.h"
1047f8253dSPrathap Kumar Valsan 
1147f8253dSPrathap Kumar Valsan #define GT3_INLINE_DATA_DELAYS 0x1E00
1247f8253dSPrathap Kumar Valsan #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
1347f8253dSPrathap Kumar Valsan 
1447f8253dSPrathap Kumar Valsan struct cb_kernel {
1547f8253dSPrathap Kumar Valsan 	const void *data;
1647f8253dSPrathap Kumar Valsan 	u32 size;
1747f8253dSPrathap Kumar Valsan };
1847f8253dSPrathap Kumar Valsan 
1947f8253dSPrathap Kumar Valsan #define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
2047f8253dSPrathap Kumar Valsan 
2147f8253dSPrathap Kumar Valsan #include "ivb_clear_kernel.c"
2247f8253dSPrathap Kumar Valsan static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
2347f8253dSPrathap Kumar Valsan 
2447f8253dSPrathap Kumar Valsan #include "hsw_clear_kernel.c"
2547f8253dSPrathap Kumar Valsan static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
2647f8253dSPrathap Kumar Valsan 
2747f8253dSPrathap Kumar Valsan struct batch_chunk {
2847f8253dSPrathap Kumar Valsan 	struct i915_vma *vma;
2947f8253dSPrathap Kumar Valsan 	u32 offset;
3047f8253dSPrathap Kumar Valsan 	u32 *start;
3147f8253dSPrathap Kumar Valsan 	u32 *end;
3247f8253dSPrathap Kumar Valsan 	u32 max_items;
3347f8253dSPrathap Kumar Valsan };
3447f8253dSPrathap Kumar Valsan 
3547f8253dSPrathap Kumar Valsan struct batch_vals {
36eebfb32eSChris Wilson 	u32 max_threads;
3747f8253dSPrathap Kumar Valsan 	u32 state_start;
38eebfb32eSChris Wilson 	u32 surface_start;
3947f8253dSPrathap Kumar Valsan 	u32 surface_height;
4047f8253dSPrathap Kumar Valsan 	u32 surface_width;
41eebfb32eSChris Wilson 	u32 size;
4247f8253dSPrathap Kumar Valsan };
4347f8253dSPrathap Kumar Valsan 
num_primitives(const struct batch_vals * bv)449834dfefSChris Wilson static int num_primitives(const struct batch_vals *bv)
45eebfb32eSChris Wilson {
46eebfb32eSChris Wilson 	/*
47eebfb32eSChris Wilson 	 * We need to saturate the GPU with work in order to dispatch
48eebfb32eSChris Wilson 	 * a shader on every HW thread, and clear the thread-local registers.
49eebfb32eSChris Wilson 	 * In short, we have to dispatch work faster than the shaders can
50eebfb32eSChris Wilson 	 * run in order to fill the EU and occupy each HW thread.
51eebfb32eSChris Wilson 	 */
52eebfb32eSChris Wilson 	return bv->max_threads;
53eebfb32eSChris Wilson }
54eebfb32eSChris Wilson 
5547f8253dSPrathap Kumar Valsan static void
batch_get_defaults(struct drm_i915_private * i915,struct batch_vals * bv)5647f8253dSPrathap Kumar Valsan batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
5747f8253dSPrathap Kumar Valsan {
5847f8253dSPrathap Kumar Valsan 	if (IS_HASWELL(i915)) {
59eebfb32eSChris Wilson 		switch (INTEL_INFO(i915)->gt) {
60eebfb32eSChris Wilson 		default:
61eebfb32eSChris Wilson 		case 1:
62eebfb32eSChris Wilson 			bv->max_threads = 70;
63eebfb32eSChris Wilson 			break;
64eebfb32eSChris Wilson 		case 2:
65eebfb32eSChris Wilson 			bv->max_threads = 140;
66eebfb32eSChris Wilson 			break;
67eebfb32eSChris Wilson 		case 3:
68eebfb32eSChris Wilson 			bv->max_threads = 280;
69eebfb32eSChris Wilson 			break;
70eebfb32eSChris Wilson 		}
7147f8253dSPrathap Kumar Valsan 		bv->surface_height = 16 * 16;
7247f8253dSPrathap Kumar Valsan 		bv->surface_width = 32 * 2 * 16;
7347f8253dSPrathap Kumar Valsan 	} else {
74eebfb32eSChris Wilson 		switch (INTEL_INFO(i915)->gt) {
75eebfb32eSChris Wilson 		default:
76eebfb32eSChris Wilson 		case 1: /* including vlv */
77eebfb32eSChris Wilson 			bv->max_threads = 36;
78eebfb32eSChris Wilson 			break;
79eebfb32eSChris Wilson 		case 2:
80eebfb32eSChris Wilson 			bv->max_threads = 128;
81eebfb32eSChris Wilson 			break;
82eebfb32eSChris Wilson 		}
8347f8253dSPrathap Kumar Valsan 		bv->surface_height = 16 * 8;
8447f8253dSPrathap Kumar Valsan 		bv->surface_width = 32 * 16;
8547f8253dSPrathap Kumar Valsan 	}
86eebfb32eSChris Wilson 	bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
87eebfb32eSChris Wilson 	bv->surface_start = bv->state_start + SZ_4K;
88eebfb32eSChris Wilson 	bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
8947f8253dSPrathap Kumar Valsan }
9047f8253dSPrathap Kumar Valsan 
batch_init(struct batch_chunk * bc,struct i915_vma * vma,u32 * start,u32 offset,u32 max_bytes)9147f8253dSPrathap Kumar Valsan static void batch_init(struct batch_chunk *bc,
9247f8253dSPrathap Kumar Valsan 		       struct i915_vma *vma,
9347f8253dSPrathap Kumar Valsan 		       u32 *start, u32 offset, u32 max_bytes)
9447f8253dSPrathap Kumar Valsan {
9547f8253dSPrathap Kumar Valsan 	bc->vma = vma;
9647f8253dSPrathap Kumar Valsan 	bc->offset = offset;
9747f8253dSPrathap Kumar Valsan 	bc->start = start + bc->offset / sizeof(*bc->start);
9847f8253dSPrathap Kumar Valsan 	bc->end = bc->start;
9947f8253dSPrathap Kumar Valsan 	bc->max_items = max_bytes / sizeof(*bc->start);
10047f8253dSPrathap Kumar Valsan }
10147f8253dSPrathap Kumar Valsan 
batch_offset(const struct batch_chunk * bc,u32 * cs)10247f8253dSPrathap Kumar Valsan static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
10347f8253dSPrathap Kumar Valsan {
10447f8253dSPrathap Kumar Valsan 	return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
10547f8253dSPrathap Kumar Valsan }
10647f8253dSPrathap Kumar Valsan 
batch_addr(const struct batch_chunk * bc)10747f8253dSPrathap Kumar Valsan static u32 batch_addr(const struct batch_chunk *bc)
10847f8253dSPrathap Kumar Valsan {
109*8e4ee5e8SChris Wilson 	return i915_vma_offset(bc->vma);
11047f8253dSPrathap Kumar Valsan }
11147f8253dSPrathap Kumar Valsan 
batch_add(struct batch_chunk * bc,const u32 d)11247f8253dSPrathap Kumar Valsan static void batch_add(struct batch_chunk *bc, const u32 d)
11347f8253dSPrathap Kumar Valsan {
11447f8253dSPrathap Kumar Valsan 	GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
11547f8253dSPrathap Kumar Valsan 	*bc->end++ = d;
11647f8253dSPrathap Kumar Valsan }
11747f8253dSPrathap Kumar Valsan 
batch_alloc_items(struct batch_chunk * bc,u32 align,u32 items)11847f8253dSPrathap Kumar Valsan static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
11947f8253dSPrathap Kumar Valsan {
12047f8253dSPrathap Kumar Valsan 	u32 *map;
12147f8253dSPrathap Kumar Valsan 
12247f8253dSPrathap Kumar Valsan 	if (align) {
12347f8253dSPrathap Kumar Valsan 		u32 *end = PTR_ALIGN(bc->end, align);
12447f8253dSPrathap Kumar Valsan 
12547f8253dSPrathap Kumar Valsan 		memset32(bc->end, 0, end - bc->end);
12647f8253dSPrathap Kumar Valsan 		bc->end = end;
12747f8253dSPrathap Kumar Valsan 	}
12847f8253dSPrathap Kumar Valsan 
12947f8253dSPrathap Kumar Valsan 	map = bc->end;
13047f8253dSPrathap Kumar Valsan 	bc->end += items;
13147f8253dSPrathap Kumar Valsan 
13247f8253dSPrathap Kumar Valsan 	return map;
13347f8253dSPrathap Kumar Valsan }
13447f8253dSPrathap Kumar Valsan 
batch_alloc_bytes(struct batch_chunk * bc,u32 align,u32 bytes)13547f8253dSPrathap Kumar Valsan static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
13647f8253dSPrathap Kumar Valsan {
13747f8253dSPrathap Kumar Valsan 	GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
13847f8253dSPrathap Kumar Valsan 	return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
13947f8253dSPrathap Kumar Valsan }
14047f8253dSPrathap Kumar Valsan 
14147f8253dSPrathap Kumar Valsan static u32
gen7_fill_surface_state(struct batch_chunk * state,const u32 dst_offset,const struct batch_vals * bv)14247f8253dSPrathap Kumar Valsan gen7_fill_surface_state(struct batch_chunk *state,
14347f8253dSPrathap Kumar Valsan 			const u32 dst_offset,
14447f8253dSPrathap Kumar Valsan 			const struct batch_vals *bv)
14547f8253dSPrathap Kumar Valsan {
14647f8253dSPrathap Kumar Valsan 	u32 surface_h = bv->surface_height;
14747f8253dSPrathap Kumar Valsan 	u32 surface_w = bv->surface_width;
14847f8253dSPrathap Kumar Valsan 	u32 *cs = batch_alloc_items(state, 32, 8);
14947f8253dSPrathap Kumar Valsan 	u32 offset = batch_offset(state, cs);
15047f8253dSPrathap Kumar Valsan 
15147f8253dSPrathap Kumar Valsan #define SURFACE_2D 1
15247f8253dSPrathap Kumar Valsan #define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
15347f8253dSPrathap Kumar Valsan #define RENDER_CACHE_READ_WRITE 1
15447f8253dSPrathap Kumar Valsan 
15547f8253dSPrathap Kumar Valsan 	*cs++ = SURFACE_2D << 29 |
15647f8253dSPrathap Kumar Valsan 		(SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
15747f8253dSPrathap Kumar Valsan 		(RENDER_CACHE_READ_WRITE << 8);
15847f8253dSPrathap Kumar Valsan 
15947f8253dSPrathap Kumar Valsan 	*cs++ = batch_addr(state) + dst_offset;
16047f8253dSPrathap Kumar Valsan 
16147f8253dSPrathap Kumar Valsan 	*cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
16247f8253dSPrathap Kumar Valsan 	*cs++ = surface_w;
16347f8253dSPrathap Kumar Valsan 	*cs++ = 0;
16447f8253dSPrathap Kumar Valsan 	*cs++ = 0;
16547f8253dSPrathap Kumar Valsan 	*cs++ = 0;
16647f8253dSPrathap Kumar Valsan #define SHADER_CHANNELS(r, g, b, a) \
16747f8253dSPrathap Kumar Valsan 	(((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
16847f8253dSPrathap Kumar Valsan 	*cs++ = SHADER_CHANNELS(4, 5, 6, 7);
16947f8253dSPrathap Kumar Valsan 	batch_advance(state, cs);
17047f8253dSPrathap Kumar Valsan 
17147f8253dSPrathap Kumar Valsan 	return offset;
17247f8253dSPrathap Kumar Valsan }
17347f8253dSPrathap Kumar Valsan 
17447f8253dSPrathap Kumar Valsan static u32
gen7_fill_binding_table(struct batch_chunk * state,const struct batch_vals * bv)17547f8253dSPrathap Kumar Valsan gen7_fill_binding_table(struct batch_chunk *state,
17647f8253dSPrathap Kumar Valsan 			const struct batch_vals *bv)
17747f8253dSPrathap Kumar Valsan {
178eebfb32eSChris Wilson 	u32 surface_start =
179eebfb32eSChris Wilson 		gen7_fill_surface_state(state, bv->surface_start, bv);
18047f8253dSPrathap Kumar Valsan 	u32 *cs = batch_alloc_items(state, 32, 8);
18147f8253dSPrathap Kumar Valsan 	u32 offset = batch_offset(state, cs);
18247f8253dSPrathap Kumar Valsan 
18347f8253dSPrathap Kumar Valsan 	*cs++ = surface_start - state->offset;
18447f8253dSPrathap Kumar Valsan 	*cs++ = 0;
18547f8253dSPrathap Kumar Valsan 	*cs++ = 0;
18647f8253dSPrathap Kumar Valsan 	*cs++ = 0;
18747f8253dSPrathap Kumar Valsan 	*cs++ = 0;
18847f8253dSPrathap Kumar Valsan 	*cs++ = 0;
18947f8253dSPrathap Kumar Valsan 	*cs++ = 0;
19047f8253dSPrathap Kumar Valsan 	*cs++ = 0;
19147f8253dSPrathap Kumar Valsan 	batch_advance(state, cs);
19247f8253dSPrathap Kumar Valsan 
19347f8253dSPrathap Kumar Valsan 	return offset;
19447f8253dSPrathap Kumar Valsan }
19547f8253dSPrathap Kumar Valsan 
19647f8253dSPrathap Kumar Valsan static u32
gen7_fill_kernel_data(struct batch_chunk * state,const u32 * data,const u32 size)19747f8253dSPrathap Kumar Valsan gen7_fill_kernel_data(struct batch_chunk *state,
19847f8253dSPrathap Kumar Valsan 		      const u32 *data,
19947f8253dSPrathap Kumar Valsan 		      const u32 size)
20047f8253dSPrathap Kumar Valsan {
20147f8253dSPrathap Kumar Valsan 	return batch_offset(state,
20247f8253dSPrathap Kumar Valsan 			    memcpy(batch_alloc_bytes(state, 64, size),
20347f8253dSPrathap Kumar Valsan 				   data, size));
20447f8253dSPrathap Kumar Valsan }
20547f8253dSPrathap Kumar Valsan 
20647f8253dSPrathap Kumar Valsan static u32
gen7_fill_interface_descriptor(struct batch_chunk * state,const struct batch_vals * bv,const struct cb_kernel * kernel,unsigned int count)20747f8253dSPrathap Kumar Valsan gen7_fill_interface_descriptor(struct batch_chunk *state,
20847f8253dSPrathap Kumar Valsan 			       const struct batch_vals *bv,
20947f8253dSPrathap Kumar Valsan 			       const struct cb_kernel *kernel,
21047f8253dSPrathap Kumar Valsan 			       unsigned int count)
21147f8253dSPrathap Kumar Valsan {
21247f8253dSPrathap Kumar Valsan 	u32 kernel_offset =
21347f8253dSPrathap Kumar Valsan 		gen7_fill_kernel_data(state, kernel->data, kernel->size);
21447f8253dSPrathap Kumar Valsan 	u32 binding_table = gen7_fill_binding_table(state, bv);
21547f8253dSPrathap Kumar Valsan 	u32 *cs = batch_alloc_items(state, 32, 8 * count);
21647f8253dSPrathap Kumar Valsan 	u32 offset = batch_offset(state, cs);
21747f8253dSPrathap Kumar Valsan 
21847f8253dSPrathap Kumar Valsan 	*cs++ = kernel_offset;
21947f8253dSPrathap Kumar Valsan 	*cs++ = (1 << 7) | (1 << 13);
22047f8253dSPrathap Kumar Valsan 	*cs++ = 0;
22147f8253dSPrathap Kumar Valsan 	*cs++ = (binding_table - state->offset) | 1;
22247f8253dSPrathap Kumar Valsan 	*cs++ = 0;
22347f8253dSPrathap Kumar Valsan 	*cs++ = 0;
22447f8253dSPrathap Kumar Valsan 	*cs++ = 0;
22547f8253dSPrathap Kumar Valsan 	*cs++ = 0;
22647f8253dSPrathap Kumar Valsan 
22747f8253dSPrathap Kumar Valsan 	/* 1 - 63dummy idds */
22847f8253dSPrathap Kumar Valsan 	memset32(cs, 0x00, (count - 1) * 8);
22947f8253dSPrathap Kumar Valsan 	batch_advance(state, cs + (count - 1) * 8);
23047f8253dSPrathap Kumar Valsan 
23147f8253dSPrathap Kumar Valsan 	return offset;
23247f8253dSPrathap Kumar Valsan }
23347f8253dSPrathap Kumar Valsan 
23447f8253dSPrathap Kumar Valsan static void
gen7_emit_state_base_address(struct batch_chunk * batch,u32 surface_state_base)23547f8253dSPrathap Kumar Valsan gen7_emit_state_base_address(struct batch_chunk *batch,
23647f8253dSPrathap Kumar Valsan 			     u32 surface_state_base)
23747f8253dSPrathap Kumar Valsan {
238eebfb32eSChris Wilson 	u32 *cs = batch_alloc_items(batch, 0, 10);
23947f8253dSPrathap Kumar Valsan 
240eebfb32eSChris Wilson 	*cs++ = STATE_BASE_ADDRESS | (10 - 2);
24147f8253dSPrathap Kumar Valsan 	/* general */
24247f8253dSPrathap Kumar Valsan 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
24347f8253dSPrathap Kumar Valsan 	/* surface */
24481ce8f04SChris Wilson 	*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
24547f8253dSPrathap Kumar Valsan 	/* dynamic */
24647f8253dSPrathap Kumar Valsan 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
24747f8253dSPrathap Kumar Valsan 	/* indirect */
24847f8253dSPrathap Kumar Valsan 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
24947f8253dSPrathap Kumar Valsan 	/* instruction */
25047f8253dSPrathap Kumar Valsan 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
25147f8253dSPrathap Kumar Valsan 
25247f8253dSPrathap Kumar Valsan 	/* general/dynamic/indirect/instruction access Bound */
25347f8253dSPrathap Kumar Valsan 	*cs++ = 0;
25447f8253dSPrathap Kumar Valsan 	*cs++ = BASE_ADDRESS_MODIFY;
25547f8253dSPrathap Kumar Valsan 	*cs++ = 0;
25647f8253dSPrathap Kumar Valsan 	*cs++ = BASE_ADDRESS_MODIFY;
25747f8253dSPrathap Kumar Valsan 	batch_advance(batch, cs);
25847f8253dSPrathap Kumar Valsan }
25947f8253dSPrathap Kumar Valsan 
26047f8253dSPrathap Kumar Valsan static void
gen7_emit_vfe_state(struct batch_chunk * batch,const struct batch_vals * bv,u32 urb_size,u32 curbe_size,u32 mode)26147f8253dSPrathap Kumar Valsan gen7_emit_vfe_state(struct batch_chunk *batch,
26247f8253dSPrathap Kumar Valsan 		    const struct batch_vals *bv,
26347f8253dSPrathap Kumar Valsan 		    u32 urb_size, u32 curbe_size,
26447f8253dSPrathap Kumar Valsan 		    u32 mode)
26547f8253dSPrathap Kumar Valsan {
266eebfb32eSChris Wilson 	u32 threads = bv->max_threads - 1;
26747f8253dSPrathap Kumar Valsan 	u32 *cs = batch_alloc_items(batch, 32, 8);
26847f8253dSPrathap Kumar Valsan 
26947f8253dSPrathap Kumar Valsan 	*cs++ = MEDIA_VFE_STATE | (8 - 2);
27047f8253dSPrathap Kumar Valsan 
27147f8253dSPrathap Kumar Valsan 	/* scratch buffer */
27247f8253dSPrathap Kumar Valsan 	*cs++ = 0;
27347f8253dSPrathap Kumar Valsan 
27447f8253dSPrathap Kumar Valsan 	/* number of threads & urb entries for GPGPU vs Media Mode */
275eebfb32eSChris Wilson 	*cs++ = threads << 16 | 1 << 8 | mode << 2;
27647f8253dSPrathap Kumar Valsan 
27747f8253dSPrathap Kumar Valsan 	*cs++ = 0;
27847f8253dSPrathap Kumar Valsan 
27947f8253dSPrathap Kumar Valsan 	/* urb entry size & curbe size in 256 bits unit */
28047f8253dSPrathap Kumar Valsan 	*cs++ = urb_size << 16 | curbe_size;
28147f8253dSPrathap Kumar Valsan 
28247f8253dSPrathap Kumar Valsan 	/* scoreboard */
28347f8253dSPrathap Kumar Valsan 	*cs++ = 0;
28447f8253dSPrathap Kumar Valsan 	*cs++ = 0;
28547f8253dSPrathap Kumar Valsan 	*cs++ = 0;
28647f8253dSPrathap Kumar Valsan 	batch_advance(batch, cs);
28747f8253dSPrathap Kumar Valsan }
28847f8253dSPrathap Kumar Valsan 
28947f8253dSPrathap Kumar Valsan static void
gen7_emit_interface_descriptor_load(struct batch_chunk * batch,const u32 interface_descriptor,unsigned int count)29047f8253dSPrathap Kumar Valsan gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
29147f8253dSPrathap Kumar Valsan 				    const u32 interface_descriptor,
29247f8253dSPrathap Kumar Valsan 				    unsigned int count)
29347f8253dSPrathap Kumar Valsan {
29447f8253dSPrathap Kumar Valsan 	u32 *cs = batch_alloc_items(batch, 8, 4);
29547f8253dSPrathap Kumar Valsan 
29647f8253dSPrathap Kumar Valsan 	*cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
29747f8253dSPrathap Kumar Valsan 	*cs++ = 0;
29847f8253dSPrathap Kumar Valsan 	*cs++ = count * 8 * sizeof(*cs);
29947f8253dSPrathap Kumar Valsan 
30047f8253dSPrathap Kumar Valsan 	/*
30147f8253dSPrathap Kumar Valsan 	 * interface descriptor address - it is relative to the dynamics base
30247f8253dSPrathap Kumar Valsan 	 * address
30347f8253dSPrathap Kumar Valsan 	 */
30447f8253dSPrathap Kumar Valsan 	*cs++ = interface_descriptor;
30547f8253dSPrathap Kumar Valsan 	batch_advance(batch, cs);
30647f8253dSPrathap Kumar Valsan }
30747f8253dSPrathap Kumar Valsan 
30847f8253dSPrathap Kumar Valsan static void
gen7_emit_media_object(struct batch_chunk * batch,unsigned int media_object_index)30947f8253dSPrathap Kumar Valsan gen7_emit_media_object(struct batch_chunk *batch,
31047f8253dSPrathap Kumar Valsan 		       unsigned int media_object_index)
31147f8253dSPrathap Kumar Valsan {
31247f8253dSPrathap Kumar Valsan 	unsigned int x_offset = (media_object_index % 16) * 64;
31347f8253dSPrathap Kumar Valsan 	unsigned int y_offset = (media_object_index / 16) * 16;
314eebfb32eSChris Wilson 	unsigned int pkt = 6 + 3;
31547f8253dSPrathap Kumar Valsan 	u32 *cs;
31647f8253dSPrathap Kumar Valsan 
317eebfb32eSChris Wilson 	cs = batch_alloc_items(batch, 8, pkt);
31847f8253dSPrathap Kumar Valsan 
319eebfb32eSChris Wilson 	*cs++ = MEDIA_OBJECT | (pkt - 2);
32047f8253dSPrathap Kumar Valsan 
32147f8253dSPrathap Kumar Valsan 	/* interface descriptor offset */
32247f8253dSPrathap Kumar Valsan 	*cs++ = 0;
32347f8253dSPrathap Kumar Valsan 
32447f8253dSPrathap Kumar Valsan 	/* without indirect data */
32547f8253dSPrathap Kumar Valsan 	*cs++ = 0;
32647f8253dSPrathap Kumar Valsan 	*cs++ = 0;
32747f8253dSPrathap Kumar Valsan 
32847f8253dSPrathap Kumar Valsan 	/* scoreboard */
32947f8253dSPrathap Kumar Valsan 	*cs++ = 0;
33047f8253dSPrathap Kumar Valsan 	*cs++ = 0;
33147f8253dSPrathap Kumar Valsan 
33247f8253dSPrathap Kumar Valsan 	/* inline */
333eebfb32eSChris Wilson 	*cs++ = y_offset << 16 | x_offset;
33447f8253dSPrathap Kumar Valsan 	*cs++ = 0;
33547f8253dSPrathap Kumar Valsan 	*cs++ = GT3_INLINE_DATA_DELAYS;
33647f8253dSPrathap Kumar Valsan 
33747f8253dSPrathap Kumar Valsan 	batch_advance(batch, cs);
33847f8253dSPrathap Kumar Valsan }
33947f8253dSPrathap Kumar Valsan 
gen7_emit_pipeline_flush(struct batch_chunk * batch)34047f8253dSPrathap Kumar Valsan static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
34147f8253dSPrathap Kumar Valsan {
342eebfb32eSChris Wilson 	u32 *cs = batch_alloc_items(batch, 0, 4);
34347f8253dSPrathap Kumar Valsan 
344eebfb32eSChris Wilson 	*cs++ = GFX_OP_PIPE_CONTROL(4);
345eebfb32eSChris Wilson 	*cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
346eebfb32eSChris Wilson 		PIPE_CONTROL_DEPTH_CACHE_FLUSH |
347eebfb32eSChris Wilson 		PIPE_CONTROL_DC_FLUSH_ENABLE |
348eebfb32eSChris Wilson 		PIPE_CONTROL_CS_STALL;
34947f8253dSPrathap Kumar Valsan 	*cs++ = 0;
35047f8253dSPrathap Kumar Valsan 	*cs++ = 0;
351eebfb32eSChris Wilson 
352eebfb32eSChris Wilson 	batch_advance(batch, cs);
353eebfb32eSChris Wilson }
354eebfb32eSChris Wilson 
gen7_emit_pipeline_invalidate(struct batch_chunk * batch)355eebfb32eSChris Wilson static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
356eebfb32eSChris Wilson {
357e627d592SChris Wilson 	u32 *cs = batch_alloc_items(batch, 0, 10);
358eebfb32eSChris Wilson 
359eebfb32eSChris Wilson 	/* ivb: Stall before STATE_CACHE_INVALIDATE */
360e627d592SChris Wilson 	*cs++ = GFX_OP_PIPE_CONTROL(5);
361eebfb32eSChris Wilson 	*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
362eebfb32eSChris Wilson 		PIPE_CONTROL_CS_STALL;
36347f8253dSPrathap Kumar Valsan 	*cs++ = 0;
364eebfb32eSChris Wilson 	*cs++ = 0;
365e627d592SChris Wilson 	*cs++ = 0;
366eebfb32eSChris Wilson 
367e627d592SChris Wilson 	*cs++ = GFX_OP_PIPE_CONTROL(5);
368eebfb32eSChris Wilson 	*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
369eebfb32eSChris Wilson 	*cs++ = 0;
370eebfb32eSChris Wilson 	*cs++ = 0;
371e627d592SChris Wilson 	*cs++ = 0;
372eebfb32eSChris Wilson 
37347f8253dSPrathap Kumar Valsan 	batch_advance(batch, cs);
37447f8253dSPrathap Kumar Valsan }
37547f8253dSPrathap Kumar Valsan 
emit_batch(struct i915_vma * const vma,u32 * start,const struct batch_vals * bv)37647f8253dSPrathap Kumar Valsan static void emit_batch(struct i915_vma * const vma,
37747f8253dSPrathap Kumar Valsan 		       u32 *start,
37847f8253dSPrathap Kumar Valsan 		       const struct batch_vals *bv)
37947f8253dSPrathap Kumar Valsan {
38047f8253dSPrathap Kumar Valsan 	struct drm_i915_private *i915 = vma->vm->i915;
381eebfb32eSChris Wilson 	const unsigned int desc_count = 1;
382eebfb32eSChris Wilson 	const unsigned int urb_size = 1;
38347f8253dSPrathap Kumar Valsan 	struct batch_chunk cmds, state;
384eebfb32eSChris Wilson 	u32 descriptors;
38547f8253dSPrathap Kumar Valsan 	unsigned int i;
38647f8253dSPrathap Kumar Valsan 
387eebfb32eSChris Wilson 	batch_init(&cmds, vma, start, 0, bv->state_start);
388eebfb32eSChris Wilson 	batch_init(&state, vma, start, bv->state_start, SZ_4K);
38947f8253dSPrathap Kumar Valsan 
390eebfb32eSChris Wilson 	descriptors = gen7_fill_interface_descriptor(&state, bv,
39147f8253dSPrathap Kumar Valsan 						     IS_HASWELL(i915) ?
39247f8253dSPrathap Kumar Valsan 						     &cb_kernel_hsw :
39347f8253dSPrathap Kumar Valsan 						     &cb_kernel_ivb,
39447f8253dSPrathap Kumar Valsan 						     desc_count);
395eebfb32eSChris Wilson 
396ace44e13SChris Wilson 	/* Reset inherited context registers */
397d5109f73SChris Wilson 	gen7_emit_pipeline_flush(&cmds);
398ace44e13SChris Wilson 	gen7_emit_pipeline_invalidate(&cmds);
399ace44e13SChris Wilson 	batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
400ace44e13SChris Wilson 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
401929b734aSSimon Rettberg 	batch_add(&cmds, 0xffff0000 |
402929b734aSSimon Rettberg 			((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
403929b734aSSimon Rettberg 			 HIZ_RAW_STALL_OPT_DISABLE :
404929b734aSSimon Rettberg 			 0));
405ace44e13SChris Wilson 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
406ace44e13SChris Wilson 	batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
407e627d592SChris Wilson 	gen7_emit_pipeline_invalidate(&cmds);
408ace44e13SChris Wilson 	gen7_emit_pipeline_flush(&cmds);
409ace44e13SChris Wilson 
410ace44e13SChris Wilson 	/* Switch to the media pipeline and our base address */
411eebfb32eSChris Wilson 	gen7_emit_pipeline_invalidate(&cmds);
41247f8253dSPrathap Kumar Valsan 	batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
41347f8253dSPrathap Kumar Valsan 	batch_add(&cmds, MI_NOOP);
414eebfb32eSChris Wilson 	gen7_emit_pipeline_invalidate(&cmds);
415eebfb32eSChris Wilson 
41647f8253dSPrathap Kumar Valsan 	gen7_emit_pipeline_flush(&cmds);
417eebfb32eSChris Wilson 	gen7_emit_state_base_address(&cmds, descriptors);
418eebfb32eSChris Wilson 	gen7_emit_pipeline_invalidate(&cmds);
41947f8253dSPrathap Kumar Valsan 
420ace44e13SChris Wilson 	/* Set the clear-residual kernel state */
42147f8253dSPrathap Kumar Valsan 	gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
422eebfb32eSChris Wilson 	gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
42347f8253dSPrathap Kumar Valsan 
424ace44e13SChris Wilson 	/* Execute the kernel on all HW threads */
425eebfb32eSChris Wilson 	for (i = 0; i < num_primitives(bv); i++)
42647f8253dSPrathap Kumar Valsan 		gen7_emit_media_object(&cmds, i);
42747f8253dSPrathap Kumar Valsan 
42847f8253dSPrathap Kumar Valsan 	batch_add(&cmds, MI_BATCH_BUFFER_END);
42947f8253dSPrathap Kumar Valsan }
43047f8253dSPrathap Kumar Valsan 
gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,struct i915_vma * const vma)43147f8253dSPrathap Kumar Valsan int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
43247f8253dSPrathap Kumar Valsan 			    struct i915_vma * const vma)
43347f8253dSPrathap Kumar Valsan {
43447f8253dSPrathap Kumar Valsan 	struct batch_vals bv;
43547f8253dSPrathap Kumar Valsan 	u32 *batch;
43647f8253dSPrathap Kumar Valsan 
43747f8253dSPrathap Kumar Valsan 	batch_get_defaults(engine->i915, &bv);
43847f8253dSPrathap Kumar Valsan 	if (!vma)
439eebfb32eSChris Wilson 		return bv.size;
44047f8253dSPrathap Kumar Valsan 
441eebfb32eSChris Wilson 	GEM_BUG_ON(vma->obj->base.size < bv.size);
44247f8253dSPrathap Kumar Valsan 
44347f8253dSPrathap Kumar Valsan 	batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
44447f8253dSPrathap Kumar Valsan 	if (IS_ERR(batch))
44547f8253dSPrathap Kumar Valsan 		return PTR_ERR(batch);
44647f8253dSPrathap Kumar Valsan 
447eebfb32eSChris Wilson 	emit_batch(vma, memset(batch, 0, bv.size), &bv);
44847f8253dSPrathap Kumar Valsan 
44947f8253dSPrathap Kumar Valsan 	i915_gem_object_flush_map(vma->obj);
45089d19b2bSChris Wilson 	__i915_gem_object_release_map(vma->obj);
45147f8253dSPrathap Kumar Valsan 
45247f8253dSPrathap Kumar Valsan 	return 0;
45347f8253dSPrathap Kumar Valsan }
454