1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_context.h"
8 #include "intel_gpu_commands.h"
9 #include "intel_gt.h"
10 #include "intel_gtt.h"
11 #include "intel_migrate.h"
12 #include "intel_ring.h"
13 
14 struct insert_pte_data {
15 	u64 offset;
16 };
17 
18 #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
19 
20 #define GET_CCS_BYTES(i915, size)	(HAS_FLAT_CCS(i915) ? \
21 					 DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
22 static bool engine_supports_migration(struct intel_engine_cs *engine)
23 {
24 	if (!engine)
25 		return false;
26 
27 	/*
28 	 * We need the ability to prevent aribtration (MI_ARB_ON_OFF),
29 	 * the ability to write PTE using inline data (MI_STORE_DATA)
30 	 * and of course the ability to do the block transfer (blits).
31 	 */
32 	GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
33 
34 	return true;
35 }
36 
37 static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
38 				struct i915_page_table *pt,
39 				void *data)
40 {
41 	struct insert_pte_data *d = data;
42 
43 	/*
44 	 * Insert a dummy PTE into every PT that will map to LMEM to ensure
45 	 * we have a correctly setup PDE structure for later use.
46 	 */
47 	vm->insert_page(vm, 0, d->offset, I915_CACHE_NONE, PTE_LM);
48 	GEM_BUG_ON(!pt->is_compact);
49 	d->offset += SZ_2M;
50 }
51 
52 static void xehpsdv_insert_pte(struct i915_address_space *vm,
53 			       struct i915_page_table *pt,
54 			       void *data)
55 {
56 	struct insert_pte_data *d = data;
57 
58 	/*
59 	 * We are playing tricks here, since the actual pt, from the hw
60 	 * pov, is only 256bytes with 32 entries, or 4096bytes with 512
61 	 * entries, but we are still guaranteed that the physical
62 	 * alignment is 64K underneath for the pt, and we are careful
63 	 * not to access the space in the void.
64 	 */
65 	vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, PTE_LM);
66 	d->offset += SZ_64K;
67 }
68 
69 static void insert_pte(struct i915_address_space *vm,
70 		       struct i915_page_table *pt,
71 		       void *data)
72 {
73 	struct insert_pte_data *d = data;
74 
75 	vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
76 			i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
77 	d->offset += PAGE_SIZE;
78 }
79 
80 static struct i915_address_space *migrate_vm(struct intel_gt *gt)
81 {
82 	struct i915_vm_pt_stash stash = {};
83 	struct i915_ppgtt *vm;
84 	int err;
85 	int i;
86 
87 	/*
88 	 * We construct a very special VM for use by all migration contexts,
89 	 * it is kept pinned so that it can be used at any time. As we need
90 	 * to pre-allocate the page directories for the migration VM, this
91 	 * limits us to only using a small number of prepared vma.
92 	 *
93 	 * To be able to pipeline and reschedule migration operations while
94 	 * avoiding unnecessary contention on the vm itself, the PTE updates
95 	 * are inline with the blits. All the blits use the same fixed
96 	 * addresses, with the backing store redirection being updated on the
97 	 * fly. Only 2 implicit vma are used for all migration operations.
98 	 *
99 	 * We lay the ppGTT out as:
100 	 *
101 	 *	[0, CHUNK_SZ) -> first object
102 	 *	[CHUNK_SZ, 2 * CHUNK_SZ) -> second object
103 	 *	[2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
104 	 *
105 	 * By exposing the dma addresses of the page directories themselves
106 	 * within the ppGTT, we are then able to rewrite the PTE prior to use.
107 	 * But the PTE update and subsequent migration operation must be atomic,
108 	 * i.e. within the same non-preemptible window so that we do not switch
109 	 * to another migration context that overwrites the PTE.
110 	 *
111 	 * This changes quite a bit on platforms with HAS_64K_PAGES support,
112 	 * where we instead have three windows, each CHUNK_SIZE in size. The
113 	 * first is reserved for mapping system-memory, and that just uses the
114 	 * 512 entry layout using 4K GTT pages. The other two windows just map
115 	 * lmem pages and must use the new compact 32 entry layout using 64K GTT
116 	 * pages, which ensures we can address any lmem object that the user
117 	 * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
118 	 * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
119 	 * compact layout for each of these page-tables, that fall within the
120 	 * [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
121 	 *
122 	 * We lay the ppGTT out as:
123 	 *
124 	 * [0, CHUNK_SZ) -> first window/object, maps smem
125 	 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src
126 	 * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst
127 	 *
128 	 * For the PTE window it's also quite different, since each PTE must
129 	 * point to some 64K page, one for each PT(since it's in lmem), and yet
130 	 * each is only <= 4096bytes, but since the unused space within that PTE
131 	 * range is never touched, this should be fine.
132 	 *
133 	 * So basically each PT now needs 64K of virtual memory, instead of 4K,
134 	 * which looks like:
135 	 *
136 	 * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE
137 	 */
138 
139 	vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
140 	if (IS_ERR(vm))
141 		return ERR_CAST(vm);
142 
143 	if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
144 		err = -ENODEV;
145 		goto err_vm;
146 	}
147 
148 	if (HAS_64K_PAGES(gt->i915))
149 		stash.pt_sz = I915_GTT_PAGE_SIZE_64K;
150 
151 	/*
152 	 * Each engine instance is assigned its own chunk in the VM, so
153 	 * that we can run multiple instances concurrently
154 	 */
155 	for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
156 		struct intel_engine_cs *engine;
157 		u64 base = (u64)i << 32;
158 		struct insert_pte_data d = {};
159 		struct i915_gem_ww_ctx ww;
160 		u64 sz;
161 
162 		engine = gt->engine_class[COPY_ENGINE_CLASS][i];
163 		if (!engine_supports_migration(engine))
164 			continue;
165 
166 		/*
167 		 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
168 		 * 4x2 page directories for source/destination.
169 		 */
170 		if (HAS_64K_PAGES(gt->i915))
171 			sz = 3 * CHUNK_SZ;
172 		else
173 			sz = 2 * CHUNK_SZ;
174 		d.offset = base + sz;
175 
176 		/*
177 		 * We need another page directory setup so that we can write
178 		 * the 8x512 PTE in each chunk.
179 		 */
180 		if (HAS_64K_PAGES(gt->i915))
181 			sz += (sz / SZ_2M) * SZ_64K;
182 		else
183 			sz += (sz >> 12) * sizeof(u64);
184 
185 		err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
186 		if (err)
187 			goto err_vm;
188 
189 		for_i915_gem_ww(&ww, err, true) {
190 			err = i915_vm_lock_objects(&vm->vm, &ww);
191 			if (err)
192 				continue;
193 			err = i915_vm_map_pt_stash(&vm->vm, &stash);
194 			if (err)
195 				continue;
196 
197 			vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
198 		}
199 		i915_vm_free_pt_stash(&vm->vm, &stash);
200 		if (err)
201 			goto err_vm;
202 
203 		/* Now allow the GPU to rewrite the PTE via its own ppGTT */
204 		if (HAS_64K_PAGES(gt->i915)) {
205 			vm->vm.foreach(&vm->vm, base, d.offset - base,
206 				       xehpsdv_insert_pte, &d);
207 			d.offset = base + CHUNK_SZ;
208 			vm->vm.foreach(&vm->vm,
209 				       d.offset,
210 				       2 * CHUNK_SZ,
211 				       xehpsdv_toggle_pdes, &d);
212 		} else {
213 			vm->vm.foreach(&vm->vm, base, d.offset - base,
214 				       insert_pte, &d);
215 		}
216 	}
217 
218 	return &vm->vm;
219 
220 err_vm:
221 	i915_vm_put(&vm->vm);
222 	return ERR_PTR(err);
223 }
224 
225 static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
226 {
227 	struct intel_engine_cs *engine;
228 	int i;
229 
230 	for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
231 		engine = gt->engine_class[COPY_ENGINE_CLASS][i];
232 		if (engine_supports_migration(engine))
233 			return engine;
234 	}
235 
236 	return NULL;
237 }
238 
239 static struct intel_context *pinned_context(struct intel_gt *gt)
240 {
241 	static struct lock_class_key key;
242 	struct intel_engine_cs *engine;
243 	struct i915_address_space *vm;
244 	struct intel_context *ce;
245 
246 	engine = first_copy_engine(gt);
247 	if (!engine)
248 		return ERR_PTR(-ENODEV);
249 
250 	vm = migrate_vm(gt);
251 	if (IS_ERR(vm))
252 		return ERR_CAST(vm);
253 
254 	ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
255 						I915_GEM_HWS_MIGRATE,
256 						&key, "migrate");
257 	i915_vm_put(vm);
258 	return ce;
259 }
260 
261 int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
262 {
263 	struct intel_context *ce;
264 
265 	memset(m, 0, sizeof(*m));
266 
267 	ce = pinned_context(gt);
268 	if (IS_ERR(ce))
269 		return PTR_ERR(ce);
270 
271 	m->context = ce;
272 	return 0;
273 }
274 
275 static int random_index(unsigned int max)
276 {
277 	return upper_32_bits(mul_u32_u32(get_random_u32(), max));
278 }
279 
280 static struct intel_context *__migrate_engines(struct intel_gt *gt)
281 {
282 	struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
283 	struct intel_engine_cs *engine;
284 	unsigned int count, i;
285 
286 	count = 0;
287 	for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
288 		engine = gt->engine_class[COPY_ENGINE_CLASS][i];
289 		if (engine_supports_migration(engine))
290 			engines[count++] = engine;
291 	}
292 
293 	return intel_context_create(engines[random_index(count)]);
294 }
295 
296 struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
297 {
298 	struct intel_context *ce;
299 
300 	/*
301 	 * We randomly distribute contexts across the engines upon constrction,
302 	 * as they all share the same pinned vm, and so in order to allow
303 	 * multiple blits to run in parallel, we must construct each blit
304 	 * to use a different range of the vm for its GTT. This has to be
305 	 * known at construction, so we can not use the late greedy load
306 	 * balancing of the virtual-engine.
307 	 */
308 	ce = __migrate_engines(m->context->engine->gt);
309 	if (IS_ERR(ce))
310 		return ce;
311 
312 	ce->ring = NULL;
313 	ce->ring_size = SZ_256K;
314 
315 	i915_vm_put(ce->vm);
316 	ce->vm = i915_vm_get(m->context->vm);
317 
318 	return ce;
319 }
320 
321 static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
322 {
323 	dma_addr_t addr = sg_dma_address(sg);
324 
325 	return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
326 }
327 
328 static int emit_no_arbitration(struct i915_request *rq)
329 {
330 	u32 *cs;
331 
332 	cs = intel_ring_begin(rq, 2);
333 	if (IS_ERR(cs))
334 		return PTR_ERR(cs);
335 
336 	/* Explicitly disable preemption for this request. */
337 	*cs++ = MI_ARB_ON_OFF;
338 	*cs++ = MI_NOOP;
339 	intel_ring_advance(rq, cs);
340 
341 	return 0;
342 }
343 
344 static int emit_pte(struct i915_request *rq,
345 		    struct sgt_dma *it,
346 		    enum i915_cache_level cache_level,
347 		    bool is_lmem,
348 		    u64 offset,
349 		    int length)
350 {
351 	bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
352 	const u64 encode = rq->context->vm->pte_encode(0, cache_level,
353 						       is_lmem ? PTE_LM : 0);
354 	struct intel_ring *ring = rq->ring;
355 	int pkt, dword_length;
356 	u32 total = 0;
357 	u32 page_size;
358 	u32 *hdr, *cs;
359 
360 	GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
361 
362 	page_size = I915_GTT_PAGE_SIZE;
363 	dword_length = 0x400;
364 
365 	/* Compute the page directory offset for the target address range */
366 	if (has_64K_pages) {
367 		GEM_BUG_ON(!IS_ALIGNED(offset, SZ_2M));
368 
369 		offset /= SZ_2M;
370 		offset *= SZ_64K;
371 		offset += 3 * CHUNK_SZ;
372 
373 		if (is_lmem) {
374 			page_size = I915_GTT_PAGE_SIZE_64K;
375 			dword_length = 0x40;
376 		}
377 	} else {
378 		offset >>= 12;
379 		offset *= sizeof(u64);
380 		offset += 2 * CHUNK_SZ;
381 	}
382 
383 	offset += (u64)rq->engine->instance << 32;
384 
385 	cs = intel_ring_begin(rq, 6);
386 	if (IS_ERR(cs))
387 		return PTR_ERR(cs);
388 
389 	/* Pack as many PTE updates as possible into a single MI command */
390 	pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
391 	pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
392 
393 	hdr = cs;
394 	*cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
395 	*cs++ = lower_32_bits(offset);
396 	*cs++ = upper_32_bits(offset);
397 
398 	do {
399 		if (cs - hdr >= pkt) {
400 			int dword_rem;
401 
402 			*hdr += cs - hdr - 2;
403 			*cs++ = MI_NOOP;
404 
405 			ring->emit = (void *)cs - ring->vaddr;
406 			intel_ring_advance(rq, cs);
407 			intel_ring_update_space(ring);
408 
409 			cs = intel_ring_begin(rq, 6);
410 			if (IS_ERR(cs))
411 				return PTR_ERR(cs);
412 
413 			dword_rem = dword_length;
414 			if (has_64K_pages) {
415 				if (IS_ALIGNED(total, SZ_2M)) {
416 					offset = round_up(offset, SZ_64K);
417 				} else {
418 					dword_rem = SZ_2M - (total & (SZ_2M - 1));
419 					dword_rem /= page_size;
420 					dword_rem *= 2;
421 				}
422 			}
423 
424 			pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5);
425 			pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
426 
427 			hdr = cs;
428 			*cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
429 			*cs++ = lower_32_bits(offset);
430 			*cs++ = upper_32_bits(offset);
431 		}
432 
433 		GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size));
434 
435 		*cs++ = lower_32_bits(encode | it->dma);
436 		*cs++ = upper_32_bits(encode | it->dma);
437 
438 		offset += 8;
439 		total += page_size;
440 
441 		it->dma += page_size;
442 		if (it->dma >= it->max) {
443 			it->sg = __sg_next(it->sg);
444 			if (!it->sg || sg_dma_len(it->sg) == 0)
445 				break;
446 
447 			it->dma = sg_dma_address(it->sg);
448 			it->max = it->dma + sg_dma_len(it->sg);
449 		}
450 	} while (total < length);
451 
452 	*hdr += cs - hdr - 2;
453 	*cs++ = MI_NOOP;
454 
455 	ring->emit = (void *)cs - ring->vaddr;
456 	intel_ring_advance(rq, cs);
457 	intel_ring_update_space(ring);
458 
459 	return total;
460 }
461 
462 static bool wa_1209644611_applies(int ver, u32 size)
463 {
464 	u32 height = size >> PAGE_SHIFT;
465 
466 	if (ver != 11)
467 		return false;
468 
469 	return height % 4 == 3 && height <= 8;
470 }
471 
472 /**
473  * DOC: Flat-CCS - Memory compression for Local memory
474  *
475  * On Xe-HP and later devices, we use dedicated compression control state (CCS)
476  * stored in local memory for each surface, to support the 3D and media
477  * compression formats.
478  *
479  * The memory required for the CCS of the entire local memory is 1/256 of the
480  * local memory size. So before the kernel boot, the required memory is reserved
481  * for the CCS data and a secure register will be programmed with the CCS base
482  * address.
483  *
484  * Flat CCS data needs to be cleared when a lmem object is allocated.
485  * And CCS data can be copied in and out of CCS region through
486  * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
487  *
488  * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
489  * its preference list, on memory pressure, i915 needs to migrate the lmem
490  * content into smem. If the lmem object is Flat-CCS compressed by userspace,
491  * then i915 needs to decompress it. But I915 lack the required information
492  * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
493  *
494  * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
495  * be temporarily evicted to smem, along with the auxiliary CCS state, where
496  * it can be potentially swapped-out at a later point, if required.
497  * If userspace later touches the evicted pages, then we always move
498  * the backing memory back to lmem, which includes restoring the saved CCS state,
499  * and potentially performing any required swap-in.
500  *
501  * For the migration of the lmem objects with smem in placement list, such as
502  * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
503  */
504 
505 static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
506 {
507 	*cmd++ = MI_FLUSH_DW | flags;
508 	*cmd++ = 0;
509 	*cmd++ = 0;
510 
511 	return cmd;
512 }
513 
514 static int emit_copy_ccs(struct i915_request *rq,
515 			 u32 dst_offset, u8 dst_access,
516 			 u32 src_offset, u8 src_access, int size)
517 {
518 	struct drm_i915_private *i915 = rq->engine->i915;
519 	int mocs = rq->engine->gt->mocs.uc_index << 1;
520 	u32 num_ccs_blks;
521 	u32 *cs;
522 
523 	cs = intel_ring_begin(rq, 12);
524 	if (IS_ERR(cs))
525 		return PTR_ERR(cs);
526 
527 	num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
528 				    NUM_CCS_BYTES_PER_BLOCK);
529 	GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
530 	cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
531 
532 	/*
533 	 * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
534 	 * data in and out of the CCS region.
535 	 *
536 	 * We can copy at most 1024 blocks of 256 bytes using one
537 	 * XY_CTRL_SURF_COPY_BLT instruction.
538 	 *
539 	 * In case we need to copy more than 1024 blocks, we need to add
540 	 * another instruction to the same batch buffer.
541 	 *
542 	 * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
543 	 *
544 	 * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
545 	 */
546 	*cs++ = XY_CTRL_SURF_COPY_BLT |
547 		src_access << SRC_ACCESS_TYPE_SHIFT |
548 		dst_access << DST_ACCESS_TYPE_SHIFT |
549 		((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
550 	*cs++ = src_offset;
551 	*cs++ = rq->engine->instance |
552 		FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
553 	*cs++ = dst_offset;
554 	*cs++ = rq->engine->instance |
555 		FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
556 
557 	cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
558 	*cs++ = MI_NOOP;
559 
560 	intel_ring_advance(rq, cs);
561 
562 	return 0;
563 }
564 
565 static int emit_copy(struct i915_request *rq,
566 		     u32 dst_offset, u32 src_offset, int size)
567 {
568 	const int ver = GRAPHICS_VER(rq->engine->i915);
569 	u32 instance = rq->engine->instance;
570 	u32 *cs;
571 
572 	cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
573 	if (IS_ERR(cs))
574 		return PTR_ERR(cs);
575 
576 	if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
577 		*cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
578 		*cs++ = BLT_DEPTH_32 | PAGE_SIZE;
579 		*cs++ = 0;
580 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
581 		*cs++ = dst_offset;
582 		*cs++ = instance;
583 		*cs++ = 0;
584 		*cs++ = PAGE_SIZE;
585 		*cs++ = src_offset;
586 		*cs++ = instance;
587 	} else if (ver >= 8) {
588 		*cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
589 		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
590 		*cs++ = 0;
591 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
592 		*cs++ = dst_offset;
593 		*cs++ = instance;
594 		*cs++ = 0;
595 		*cs++ = PAGE_SIZE;
596 		*cs++ = src_offset;
597 		*cs++ = instance;
598 	} else {
599 		GEM_BUG_ON(instance);
600 		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
601 		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
602 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
603 		*cs++ = dst_offset;
604 		*cs++ = PAGE_SIZE;
605 		*cs++ = src_offset;
606 	}
607 
608 	intel_ring_advance(rq, cs);
609 	return 0;
610 }
611 
612 static u64 scatter_list_length(struct scatterlist *sg)
613 {
614 	u64 len = 0;
615 
616 	while (sg && sg_dma_len(sg)) {
617 		len += sg_dma_len(sg);
618 		sg = sg_next(sg);
619 	}
620 
621 	return len;
622 }
623 
624 static int
625 calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
626 		   u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
627 {
628 	if (ccs_bytes_to_cpy && !src_is_lmem)
629 		/*
630 		 * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
631 		 * will be taken for the blt. in Flat-ccs supported
632 		 * platform Smem obj will have more pages than required
633 		 * for main meory hence limit it to the required size
634 		 * for main memory
635 		 */
636 		return min_t(u64, bytes_to_cpy, CHUNK_SZ);
637 	else
638 		return CHUNK_SZ;
639 }
640 
641 static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
642 {
643 	u64 len;
644 
645 	do {
646 		GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
647 		len = it->max - it->dma;
648 		if (len > bytes_to_cpy) {
649 			it->dma += bytes_to_cpy;
650 			break;
651 		}
652 
653 		bytes_to_cpy -= len;
654 
655 		it->sg = __sg_next(it->sg);
656 		it->dma = sg_dma_address(it->sg);
657 		it->max = it->dma + sg_dma_len(it->sg);
658 	} while (bytes_to_cpy);
659 }
660 
661 int
662 intel_context_migrate_copy(struct intel_context *ce,
663 			   const struct i915_deps *deps,
664 			   struct scatterlist *src,
665 			   enum i915_cache_level src_cache_level,
666 			   bool src_is_lmem,
667 			   struct scatterlist *dst,
668 			   enum i915_cache_level dst_cache_level,
669 			   bool dst_is_lmem,
670 			   struct i915_request **out)
671 {
672 	struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
673 	struct drm_i915_private *i915 = ce->engine->i915;
674 	u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
675 	enum i915_cache_level ccs_cache_level;
676 	u32 src_offset, dst_offset;
677 	u8 src_access, dst_access;
678 	struct i915_request *rq;
679 	u64 src_sz, dst_sz;
680 	bool ccs_is_src, overwrite_ccs;
681 	int err;
682 
683 	GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
684 	GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
685 	*out = NULL;
686 
687 	GEM_BUG_ON(ce->ring->size < SZ_64K);
688 
689 	src_sz = scatter_list_length(src);
690 	bytes_to_cpy = src_sz;
691 
692 	if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
693 		src_access = !src_is_lmem && dst_is_lmem;
694 		dst_access = !src_access;
695 
696 		dst_sz = scatter_list_length(dst);
697 		if (src_is_lmem) {
698 			it_ccs = it_dst;
699 			ccs_cache_level = dst_cache_level;
700 			ccs_is_src = false;
701 		} else if (dst_is_lmem) {
702 			bytes_to_cpy = dst_sz;
703 			it_ccs = it_src;
704 			ccs_cache_level = src_cache_level;
705 			ccs_is_src = true;
706 		}
707 
708 		/*
709 		 * When there is a eviction of ccs needed smem will have the
710 		 * extra pages for the ccs data
711 		 *
712 		 * TO-DO: Want to move the size mismatch check to a WARN_ON,
713 		 * but still we have some requests of smem->lmem with same size.
714 		 * Need to fix it.
715 		 */
716 		ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
717 		if (ccs_bytes_to_cpy)
718 			get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
719 	}
720 
721 	overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
722 
723 	src_offset = 0;
724 	dst_offset = CHUNK_SZ;
725 	if (HAS_64K_PAGES(ce->engine->i915)) {
726 		src_offset = 0;
727 		dst_offset = 0;
728 		if (src_is_lmem)
729 			src_offset = CHUNK_SZ;
730 		if (dst_is_lmem)
731 			dst_offset = 2 * CHUNK_SZ;
732 	}
733 
734 	do {
735 		int len;
736 
737 		rq = i915_request_create(ce);
738 		if (IS_ERR(rq)) {
739 			err = PTR_ERR(rq);
740 			goto out_ce;
741 		}
742 
743 		if (deps) {
744 			err = i915_request_await_deps(rq, deps);
745 			if (err)
746 				goto out_rq;
747 
748 			if (rq->engine->emit_init_breadcrumb) {
749 				err = rq->engine->emit_init_breadcrumb(rq);
750 				if (err)
751 					goto out_rq;
752 			}
753 
754 			deps = NULL;
755 		}
756 
757 		/* The PTE updates + copy must not be interrupted. */
758 		err = emit_no_arbitration(rq);
759 		if (err)
760 			goto out_rq;
761 
762 		src_sz = calculate_chunk_sz(i915, src_is_lmem,
763 					    bytes_to_cpy, ccs_bytes_to_cpy);
764 
765 		len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
766 			       src_offset, src_sz);
767 		if (!len) {
768 			err = -EINVAL;
769 			goto out_rq;
770 		}
771 		if (len < 0) {
772 			err = len;
773 			goto out_rq;
774 		}
775 
776 		err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem,
777 			       dst_offset, len);
778 		if (err < 0)
779 			goto out_rq;
780 		if (err < len) {
781 			err = -EINVAL;
782 			goto out_rq;
783 		}
784 
785 		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
786 		if (err)
787 			goto out_rq;
788 
789 		err = emit_copy(rq, dst_offset,	src_offset, len);
790 		if (err)
791 			goto out_rq;
792 
793 		bytes_to_cpy -= len;
794 
795 		if (ccs_bytes_to_cpy) {
796 			int ccs_sz;
797 
798 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
799 			if (err)
800 				goto out_rq;
801 
802 			ccs_sz = GET_CCS_BYTES(i915, len);
803 			err = emit_pte(rq, &it_ccs, ccs_cache_level, false,
804 				       ccs_is_src ? src_offset : dst_offset,
805 				       ccs_sz);
806 			if (err < 0)
807 				goto out_rq;
808 			if (err < ccs_sz) {
809 				err = -EINVAL;
810 				goto out_rq;
811 			}
812 
813 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
814 			if (err)
815 				goto out_rq;
816 
817 			err = emit_copy_ccs(rq, dst_offset, dst_access,
818 					    src_offset, src_access, len);
819 			if (err)
820 				goto out_rq;
821 
822 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
823 			if (err)
824 				goto out_rq;
825 			ccs_bytes_to_cpy -= ccs_sz;
826 		} else if (overwrite_ccs) {
827 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
828 			if (err)
829 				goto out_rq;
830 
831 			/*
832 			 * While we can't always restore/manage the CCS state,
833 			 * we still need to ensure we don't leak the CCS state
834 			 * from the previous user, so make sure we overwrite it
835 			 * with something.
836 			 */
837 			err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
838 					    dst_offset, DIRECT_ACCESS, len);
839 			if (err)
840 				goto out_rq;
841 
842 			err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
843 			if (err)
844 				goto out_rq;
845 		}
846 
847 		/* Arbitration is re-enabled between requests. */
848 out_rq:
849 		if (*out)
850 			i915_request_put(*out);
851 		*out = i915_request_get(rq);
852 		i915_request_add(rq);
853 
854 		if (err)
855 			break;
856 
857 		if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
858 			if (src_is_lmem)
859 				WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
860 			else
861 				WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
862 			break;
863 		}
864 
865 		if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
866 			    !it_dst.sg || !sg_dma_len(it_dst.sg) ||
867 			    (ccs_bytes_to_cpy && (!it_ccs.sg ||
868 						  !sg_dma_len(it_ccs.sg))))) {
869 			err = -EINVAL;
870 			break;
871 		}
872 
873 		cond_resched();
874 	} while (1);
875 
876 out_ce:
877 	return err;
878 }
879 
880 static int emit_clear(struct i915_request *rq, u32 offset, int size,
881 		      u32 value, bool is_lmem)
882 {
883 	struct drm_i915_private *i915 = rq->engine->i915;
884 	int mocs = rq->engine->gt->mocs.uc_index << 1;
885 	const int ver = GRAPHICS_VER(i915);
886 	int ring_sz;
887 	u32 *cs;
888 
889 	GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
890 
891 	if (HAS_FLAT_CCS(i915) && ver >= 12)
892 		ring_sz = XY_FAST_COLOR_BLT_DW;
893 	else if (ver >= 8)
894 		ring_sz = 8;
895 	else
896 		ring_sz = 6;
897 
898 	cs = intel_ring_begin(rq, ring_sz);
899 	if (IS_ERR(cs))
900 		return PTR_ERR(cs);
901 
902 	if (HAS_FLAT_CCS(i915) && ver >= 12) {
903 		*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
904 			(XY_FAST_COLOR_BLT_DW - 2);
905 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
906 			(PAGE_SIZE - 1);
907 		*cs++ = 0;
908 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
909 		*cs++ = offset;
910 		*cs++ = rq->engine->instance;
911 		*cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
912 		/* BG7 */
913 		*cs++ = value;
914 		*cs++ = 0;
915 		*cs++ = 0;
916 		*cs++ = 0;
917 		/* BG11 */
918 		*cs++ = 0;
919 		*cs++ = 0;
920 		/* BG13 */
921 		*cs++ = 0;
922 		*cs++ = 0;
923 		*cs++ = 0;
924 	} else if (ver >= 8) {
925 		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
926 		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
927 		*cs++ = 0;
928 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
929 		*cs++ = offset;
930 		*cs++ = rq->engine->instance;
931 		*cs++ = value;
932 		*cs++ = MI_NOOP;
933 	} else {
934 		*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
935 		*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
936 		*cs++ = 0;
937 		*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
938 		*cs++ = offset;
939 		*cs++ = value;
940 	}
941 
942 	intel_ring_advance(rq, cs);
943 	return 0;
944 }
945 
946 int
947 intel_context_migrate_clear(struct intel_context *ce,
948 			    const struct i915_deps *deps,
949 			    struct scatterlist *sg,
950 			    enum i915_cache_level cache_level,
951 			    bool is_lmem,
952 			    u32 value,
953 			    struct i915_request **out)
954 {
955 	struct drm_i915_private *i915 = ce->engine->i915;
956 	struct sgt_dma it = sg_sgt(sg);
957 	struct i915_request *rq;
958 	u32 offset;
959 	int err;
960 
961 	GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
962 	*out = NULL;
963 
964 	GEM_BUG_ON(ce->ring->size < SZ_64K);
965 
966 	offset = 0;
967 	if (HAS_64K_PAGES(i915) && is_lmem)
968 		offset = CHUNK_SZ;
969 
970 	do {
971 		int len;
972 
973 		rq = i915_request_create(ce);
974 		if (IS_ERR(rq)) {
975 			err = PTR_ERR(rq);
976 			goto out_ce;
977 		}
978 
979 		if (deps) {
980 			err = i915_request_await_deps(rq, deps);
981 			if (err)
982 				goto out_rq;
983 
984 			if (rq->engine->emit_init_breadcrumb) {
985 				err = rq->engine->emit_init_breadcrumb(rq);
986 				if (err)
987 					goto out_rq;
988 			}
989 
990 			deps = NULL;
991 		}
992 
993 		/* The PTE updates + clear must not be interrupted. */
994 		err = emit_no_arbitration(rq);
995 		if (err)
996 			goto out_rq;
997 
998 		len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ);
999 		if (len <= 0) {
1000 			err = len;
1001 			goto out_rq;
1002 		}
1003 
1004 		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1005 		if (err)
1006 			goto out_rq;
1007 
1008 		err = emit_clear(rq, offset, len, value, is_lmem);
1009 		if (err)
1010 			goto out_rq;
1011 
1012 		if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
1013 			/*
1014 			 * copy the content of memory into corresponding
1015 			 * ccs surface
1016 			 */
1017 			err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
1018 					    DIRECT_ACCESS, len);
1019 			if (err)
1020 				goto out_rq;
1021 		}
1022 
1023 		err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1024 
1025 		/* Arbitration is re-enabled between requests. */
1026 out_rq:
1027 		if (*out)
1028 			i915_request_put(*out);
1029 		*out = i915_request_get(rq);
1030 		i915_request_add(rq);
1031 		if (err || !it.sg || !sg_dma_len(it.sg))
1032 			break;
1033 
1034 		cond_resched();
1035 	} while (1);
1036 
1037 out_ce:
1038 	return err;
1039 }
1040 
1041 int intel_migrate_copy(struct intel_migrate *m,
1042 		       struct i915_gem_ww_ctx *ww,
1043 		       const struct i915_deps *deps,
1044 		       struct scatterlist *src,
1045 		       enum i915_cache_level src_cache_level,
1046 		       bool src_is_lmem,
1047 		       struct scatterlist *dst,
1048 		       enum i915_cache_level dst_cache_level,
1049 		       bool dst_is_lmem,
1050 		       struct i915_request **out)
1051 {
1052 	struct intel_context *ce;
1053 	int err;
1054 
1055 	*out = NULL;
1056 	if (!m->context)
1057 		return -ENODEV;
1058 
1059 	ce = intel_migrate_create_context(m);
1060 	if (IS_ERR(ce))
1061 		ce = intel_context_get(m->context);
1062 	GEM_BUG_ON(IS_ERR(ce));
1063 
1064 	err = intel_context_pin_ww(ce, ww);
1065 	if (err)
1066 		goto out;
1067 
1068 	err = intel_context_migrate_copy(ce, deps,
1069 					 src, src_cache_level, src_is_lmem,
1070 					 dst, dst_cache_level, dst_is_lmem,
1071 					 out);
1072 
1073 	intel_context_unpin(ce);
1074 out:
1075 	intel_context_put(ce);
1076 	return err;
1077 }
1078 
1079 int
1080 intel_migrate_clear(struct intel_migrate *m,
1081 		    struct i915_gem_ww_ctx *ww,
1082 		    const struct i915_deps *deps,
1083 		    struct scatterlist *sg,
1084 		    enum i915_cache_level cache_level,
1085 		    bool is_lmem,
1086 		    u32 value,
1087 		    struct i915_request **out)
1088 {
1089 	struct intel_context *ce;
1090 	int err;
1091 
1092 	*out = NULL;
1093 	if (!m->context)
1094 		return -ENODEV;
1095 
1096 	ce = intel_migrate_create_context(m);
1097 	if (IS_ERR(ce))
1098 		ce = intel_context_get(m->context);
1099 	GEM_BUG_ON(IS_ERR(ce));
1100 
1101 	err = intel_context_pin_ww(ce, ww);
1102 	if (err)
1103 		goto out;
1104 
1105 	err = intel_context_migrate_clear(ce, deps, sg, cache_level,
1106 					  is_lmem, value, out);
1107 
1108 	intel_context_unpin(ce);
1109 out:
1110 	intel_context_put(ce);
1111 	return err;
1112 }
1113 
1114 void intel_migrate_fini(struct intel_migrate *m)
1115 {
1116 	struct intel_context *ce;
1117 
1118 	ce = fetch_and_zero(&m->context);
1119 	if (!ce)
1120 		return;
1121 
1122 	intel_engine_destroy_pinned_context(ce);
1123 }
1124 
1125 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1126 #include "selftest_migrate.c"
1127 #endif
1128