1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include "display/intel_frontbuffer.h"
8 #include "gt/intel_gt.h"
9 
10 #include "i915_drv.h"
11 #include "i915_gem_clflush.h"
12 #include "i915_gem_gtt.h"
13 #include "i915_gem_ioctls.h"
14 #include "i915_gem_object.h"
15 #include "i915_vma.h"
16 #include "i915_gem_lmem.h"
17 #include "i915_gem_mman.h"
18 
19 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
20 {
21 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
22 
23 	if (IS_DGFX(i915))
24 		return false;
25 
26 	return !(obj->cache_level == I915_CACHE_NONE ||
27 		 obj->cache_level == I915_CACHE_WT);
28 }
29 
30 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
31 {
32 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
33 
34 	if (obj->cache_dirty)
35 		return false;
36 
37 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
38 		return true;
39 
40 	if (IS_DGFX(i915))
41 		return false;
42 
43 	/* Currently in use by HW (display engine)? Keep flushed. */
44 	return i915_gem_object_is_framebuffer(obj);
45 }
46 
47 static void
48 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
49 {
50 	struct i915_vma *vma;
51 
52 	assert_object_held(obj);
53 
54 	if (!(obj->write_domain & flush_domains))
55 		return;
56 
57 	switch (obj->write_domain) {
58 	case I915_GEM_DOMAIN_GTT:
59 		spin_lock(&obj->vma.lock);
60 		for_each_ggtt_vma(vma, obj) {
61 			if (i915_vma_unset_ggtt_write(vma))
62 				intel_gt_flush_ggtt_writes(vma->vm->gt);
63 		}
64 		spin_unlock(&obj->vma.lock);
65 
66 		i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
67 		break;
68 
69 	case I915_GEM_DOMAIN_WC:
70 		wmb();
71 		break;
72 
73 	case I915_GEM_DOMAIN_CPU:
74 		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
75 		break;
76 
77 	case I915_GEM_DOMAIN_RENDER:
78 		if (gpu_write_needs_clflush(obj))
79 			obj->cache_dirty = true;
80 		break;
81 	}
82 
83 	obj->write_domain = 0;
84 }
85 
86 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
87 {
88 	/*
89 	 * We manually flush the CPU domain so that we can override and
90 	 * force the flush for the display, and perform it asyncrhonously.
91 	 */
92 	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
93 	if (obj->cache_dirty)
94 		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
95 	obj->write_domain = 0;
96 }
97 
98 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
99 {
100 	if (!i915_gem_object_is_framebuffer(obj))
101 		return;
102 
103 	i915_gem_object_lock(obj, NULL);
104 	__i915_gem_object_flush_for_display(obj);
105 	i915_gem_object_unlock(obj);
106 }
107 
108 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
109 {
110 	if (i915_gem_object_is_framebuffer(obj))
111 		__i915_gem_object_flush_for_display(obj);
112 }
113 
114 /**
115  * Moves a single object to the WC read, and possibly write domain.
116  * @obj: object to act on
117  * @write: ask for write access or read only
118  *
119  * This function returns when the move is complete, including waiting on
120  * flushes to occur.
121  */
122 int
123 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
124 {
125 	int ret;
126 
127 	assert_object_held(obj);
128 
129 	ret = i915_gem_object_wait(obj,
130 				   I915_WAIT_INTERRUPTIBLE |
131 				   (write ? I915_WAIT_ALL : 0),
132 				   MAX_SCHEDULE_TIMEOUT);
133 	if (ret)
134 		return ret;
135 
136 	if (obj->write_domain == I915_GEM_DOMAIN_WC)
137 		return 0;
138 
139 	/* Flush and acquire obj->pages so that we are coherent through
140 	 * direct access in memory with previous cached writes through
141 	 * shmemfs and that our cache domain tracking remains valid.
142 	 * For example, if the obj->filp was moved to swap without us
143 	 * being notified and releasing the pages, we would mistakenly
144 	 * continue to assume that the obj remained out of the CPU cached
145 	 * domain.
146 	 */
147 	ret = i915_gem_object_pin_pages(obj);
148 	if (ret)
149 		return ret;
150 
151 	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
152 
153 	/* Serialise direct access to this object with the barriers for
154 	 * coherent writes from the GPU, by effectively invalidating the
155 	 * WC domain upon first access.
156 	 */
157 	if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
158 		mb();
159 
160 	/* It should now be out of any other write domains, and we can update
161 	 * the domain values for our changes.
162 	 */
163 	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
164 	obj->read_domains |= I915_GEM_DOMAIN_WC;
165 	if (write) {
166 		obj->read_domains = I915_GEM_DOMAIN_WC;
167 		obj->write_domain = I915_GEM_DOMAIN_WC;
168 		obj->mm.dirty = true;
169 	}
170 
171 	i915_gem_object_unpin_pages(obj);
172 	return 0;
173 }
174 
175 /**
176  * Moves a single object to the GTT read, and possibly write domain.
177  * @obj: object to act on
178  * @write: ask for write access or read only
179  *
180  * This function returns when the move is complete, including waiting on
181  * flushes to occur.
182  */
183 int
184 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
185 {
186 	int ret;
187 
188 	assert_object_held(obj);
189 
190 	ret = i915_gem_object_wait(obj,
191 				   I915_WAIT_INTERRUPTIBLE |
192 				   (write ? I915_WAIT_ALL : 0),
193 				   MAX_SCHEDULE_TIMEOUT);
194 	if (ret)
195 		return ret;
196 
197 	if (obj->write_domain == I915_GEM_DOMAIN_GTT)
198 		return 0;
199 
200 	/* Flush and acquire obj->pages so that we are coherent through
201 	 * direct access in memory with previous cached writes through
202 	 * shmemfs and that our cache domain tracking remains valid.
203 	 * For example, if the obj->filp was moved to swap without us
204 	 * being notified and releasing the pages, we would mistakenly
205 	 * continue to assume that the obj remained out of the CPU cached
206 	 * domain.
207 	 */
208 	ret = i915_gem_object_pin_pages(obj);
209 	if (ret)
210 		return ret;
211 
212 	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
213 
214 	/* Serialise direct access to this object with the barriers for
215 	 * coherent writes from the GPU, by effectively invalidating the
216 	 * GTT domain upon first access.
217 	 */
218 	if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
219 		mb();
220 
221 	/* It should now be out of any other write domains, and we can update
222 	 * the domain values for our changes.
223 	 */
224 	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
225 	obj->read_domains |= I915_GEM_DOMAIN_GTT;
226 	if (write) {
227 		struct i915_vma *vma;
228 
229 		obj->read_domains = I915_GEM_DOMAIN_GTT;
230 		obj->write_domain = I915_GEM_DOMAIN_GTT;
231 		obj->mm.dirty = true;
232 
233 		spin_lock(&obj->vma.lock);
234 		for_each_ggtt_vma(vma, obj)
235 			if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
236 				i915_vma_set_ggtt_write(vma);
237 		spin_unlock(&obj->vma.lock);
238 	}
239 
240 	i915_gem_object_unpin_pages(obj);
241 	return 0;
242 }
243 
244 /**
245  * Changes the cache-level of an object across all VMA.
246  * @obj: object to act on
247  * @cache_level: new cache level to set for the object
248  *
249  * After this function returns, the object will be in the new cache-level
250  * across all GTT and the contents of the backing storage will be coherent,
251  * with respect to the new cache-level. In order to keep the backing storage
252  * coherent for all users, we only allow a single cache level to be set
253  * globally on the object and prevent it from being changed whilst the
254  * hardware is reading from the object. That is if the object is currently
255  * on the scanout it will be set to uncached (or equivalent display
256  * cache coherency) and all non-MOCS GPU access will also be uncached so
257  * that all direct access to the scanout remains coherent.
258  */
259 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
260 				    enum i915_cache_level cache_level)
261 {
262 	int ret;
263 
264 	if (obj->cache_level == cache_level)
265 		return 0;
266 
267 	ret = i915_gem_object_wait(obj,
268 				   I915_WAIT_INTERRUPTIBLE |
269 				   I915_WAIT_ALL,
270 				   MAX_SCHEDULE_TIMEOUT);
271 	if (ret)
272 		return ret;
273 
274 	/* Always invalidate stale cachelines */
275 	if (obj->cache_level != cache_level) {
276 		i915_gem_object_set_cache_coherency(obj, cache_level);
277 		obj->cache_dirty = true;
278 	}
279 
280 	/* The cache-level will be applied when each vma is rebound. */
281 	return i915_gem_object_unbind(obj,
282 				      I915_GEM_OBJECT_UNBIND_ACTIVE |
283 				      I915_GEM_OBJECT_UNBIND_BARRIER);
284 }
285 
286 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
287 			       struct drm_file *file)
288 {
289 	struct drm_i915_gem_caching *args = data;
290 	struct drm_i915_gem_object *obj;
291 	int err = 0;
292 
293 	if (IS_DGFX(to_i915(dev)))
294 		return -ENODEV;
295 
296 	rcu_read_lock();
297 	obj = i915_gem_object_lookup_rcu(file, args->handle);
298 	if (!obj) {
299 		err = -ENOENT;
300 		goto out;
301 	}
302 
303 	switch (obj->cache_level) {
304 	case I915_CACHE_LLC:
305 	case I915_CACHE_L3_LLC:
306 		args->caching = I915_CACHING_CACHED;
307 		break;
308 
309 	case I915_CACHE_WT:
310 		args->caching = I915_CACHING_DISPLAY;
311 		break;
312 
313 	default:
314 		args->caching = I915_CACHING_NONE;
315 		break;
316 	}
317 out:
318 	rcu_read_unlock();
319 	return err;
320 }
321 
322 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
323 			       struct drm_file *file)
324 {
325 	struct drm_i915_private *i915 = to_i915(dev);
326 	struct drm_i915_gem_caching *args = data;
327 	struct drm_i915_gem_object *obj;
328 	enum i915_cache_level level;
329 	int ret = 0;
330 
331 	if (IS_DGFX(i915))
332 		return -ENODEV;
333 
334 	switch (args->caching) {
335 	case I915_CACHING_NONE:
336 		level = I915_CACHE_NONE;
337 		break;
338 	case I915_CACHING_CACHED:
339 		/*
340 		 * Due to a HW issue on BXT A stepping, GPU stores via a
341 		 * snooped mapping may leave stale data in a corresponding CPU
342 		 * cacheline, whereas normally such cachelines would get
343 		 * invalidated.
344 		 */
345 		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
346 			return -ENODEV;
347 
348 		level = I915_CACHE_LLC;
349 		break;
350 	case I915_CACHING_DISPLAY:
351 		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
352 		break;
353 	default:
354 		return -EINVAL;
355 	}
356 
357 	obj = i915_gem_object_lookup(file, args->handle);
358 	if (!obj)
359 		return -ENOENT;
360 
361 	/*
362 	 * The caching mode of proxy object is handled by its generator, and
363 	 * not allowed to be changed by userspace.
364 	 */
365 	if (i915_gem_object_is_proxy(obj)) {
366 		/*
367 		 * Silently allow cached for userptr; the vulkan driver
368 		 * sets all objects to cached
369 		 */
370 		if (!i915_gem_object_is_userptr(obj) ||
371 		    args->caching != I915_CACHING_CACHED)
372 			ret = -ENXIO;
373 
374 		goto out;
375 	}
376 
377 	ret = i915_gem_object_lock_interruptible(obj, NULL);
378 	if (ret)
379 		goto out;
380 
381 	ret = i915_gem_object_set_cache_level(obj, level);
382 	i915_gem_object_unlock(obj);
383 
384 out:
385 	i915_gem_object_put(obj);
386 	return ret;
387 }
388 
389 /*
390  * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
391  * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
392  * (for pageflips). We only flush the caches while preparing the buffer for
393  * display, the callers are responsible for frontbuffer flush.
394  */
395 struct i915_vma *
396 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
397 				     struct i915_gem_ww_ctx *ww,
398 				     u32 alignment,
399 				     const struct i915_ggtt_view *view,
400 				     unsigned int flags)
401 {
402 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
403 	struct i915_vma *vma;
404 	int ret;
405 
406 	/* Frame buffer must be in LMEM */
407 	if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
408 		return ERR_PTR(-EINVAL);
409 
410 	/*
411 	 * The display engine is not coherent with the LLC cache on gen6.  As
412 	 * a result, we make sure that the pinning that is about to occur is
413 	 * done with uncached PTEs. This is lowest common denominator for all
414 	 * chipsets.
415 	 *
416 	 * However for gen6+, we could do better by using the GFDT bit instead
417 	 * of uncaching, which would allow us to flush all the LLC-cached data
418 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
419 	 */
420 	ret = i915_gem_object_set_cache_level(obj,
421 					      HAS_WT(i915) ?
422 					      I915_CACHE_WT : I915_CACHE_NONE);
423 	if (ret)
424 		return ERR_PTR(ret);
425 
426 	/*
427 	 * As the user may map the buffer once pinned in the display plane
428 	 * (e.g. libkms for the bootup splash), we have to ensure that we
429 	 * always use map_and_fenceable for all scanout buffers. However,
430 	 * it may simply be too big to fit into mappable, in which case
431 	 * put it anyway and hope that userspace can cope (but always first
432 	 * try to preserve the existing ABI).
433 	 */
434 	vma = ERR_PTR(-ENOSPC);
435 	if ((flags & PIN_MAPPABLE) == 0 &&
436 	    (!view || view->type == I915_GGTT_VIEW_NORMAL))
437 		vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
438 						  flags | PIN_MAPPABLE |
439 						  PIN_NONBLOCK);
440 	if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK))
441 		vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0,
442 						  alignment, flags);
443 	if (IS_ERR(vma))
444 		return vma;
445 
446 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
447 	i915_vma_mark_scanout(vma);
448 
449 	i915_gem_object_flush_if_display_locked(obj);
450 
451 	return vma;
452 }
453 
454 /**
455  * Moves a single object to the CPU read, and possibly write domain.
456  * @obj: object to act on
457  * @write: requesting write or read-only access
458  *
459  * This function returns when the move is complete, including waiting on
460  * flushes to occur.
461  */
462 int
463 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
464 {
465 	int ret;
466 
467 	assert_object_held(obj);
468 
469 	ret = i915_gem_object_wait(obj,
470 				   I915_WAIT_INTERRUPTIBLE |
471 				   (write ? I915_WAIT_ALL : 0),
472 				   MAX_SCHEDULE_TIMEOUT);
473 	if (ret)
474 		return ret;
475 
476 	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
477 
478 	/* Flush the CPU cache if it's still invalid. */
479 	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
480 		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
481 		obj->read_domains |= I915_GEM_DOMAIN_CPU;
482 	}
483 
484 	/* It should now be out of any other write domains, and we can update
485 	 * the domain values for our changes.
486 	 */
487 	GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
488 
489 	/* If we're writing through the CPU, then the GPU read domains will
490 	 * need to be invalidated at next use.
491 	 */
492 	if (write)
493 		__start_cpu_write(obj);
494 
495 	return 0;
496 }
497 
498 /**
499  * Called when user space prepares to use an object with the CPU, either
500  * through the mmap ioctl's mapping or a GTT mapping.
501  * @dev: drm device
502  * @data: ioctl data blob
503  * @file: drm file
504  */
505 int
506 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
507 			  struct drm_file *file)
508 {
509 	struct drm_i915_gem_set_domain *args = data;
510 	struct drm_i915_gem_object *obj;
511 	u32 read_domains = args->read_domains;
512 	u32 write_domain = args->write_domain;
513 	int err;
514 
515 	if (IS_DGFX(to_i915(dev)))
516 		return -ENODEV;
517 
518 	/* Only handle setting domains to types used by the CPU. */
519 	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
520 		return -EINVAL;
521 
522 	/*
523 	 * Having something in the write domain implies it's in the read
524 	 * domain, and only that read domain.  Enforce that in the request.
525 	 */
526 	if (write_domain && read_domains != write_domain)
527 		return -EINVAL;
528 
529 	if (!read_domains)
530 		return 0;
531 
532 	obj = i915_gem_object_lookup(file, args->handle);
533 	if (!obj)
534 		return -ENOENT;
535 
536 	/*
537 	 * Try to flush the object off the GPU without holding the lock.
538 	 * We will repeat the flush holding the lock in the normal manner
539 	 * to catch cases where we are gazumped.
540 	 */
541 	err = i915_gem_object_wait(obj,
542 				   I915_WAIT_INTERRUPTIBLE |
543 				   I915_WAIT_PRIORITY |
544 				   (write_domain ? I915_WAIT_ALL : 0),
545 				   MAX_SCHEDULE_TIMEOUT);
546 	if (err)
547 		goto out;
548 
549 	if (i915_gem_object_is_userptr(obj)) {
550 		/*
551 		 * Try to grab userptr pages, iris uses set_domain to check
552 		 * userptr validity
553 		 */
554 		err = i915_gem_object_userptr_validate(obj);
555 		if (!err)
556 			err = i915_gem_object_wait(obj,
557 						   I915_WAIT_INTERRUPTIBLE |
558 						   I915_WAIT_PRIORITY |
559 						   (write_domain ? I915_WAIT_ALL : 0),
560 						   MAX_SCHEDULE_TIMEOUT);
561 		goto out;
562 	}
563 
564 	/*
565 	 * Proxy objects do not control access to the backing storage, ergo
566 	 * they cannot be used as a means to manipulate the cache domain
567 	 * tracking for that backing storage. The proxy object is always
568 	 * considered to be outside of any cache domain.
569 	 */
570 	if (i915_gem_object_is_proxy(obj)) {
571 		err = -ENXIO;
572 		goto out;
573 	}
574 
575 	err = i915_gem_object_lock_interruptible(obj, NULL);
576 	if (err)
577 		goto out;
578 
579 	/*
580 	 * Flush and acquire obj->pages so that we are coherent through
581 	 * direct access in memory with previous cached writes through
582 	 * shmemfs and that our cache domain tracking remains valid.
583 	 * For example, if the obj->filp was moved to swap without us
584 	 * being notified and releasing the pages, we would mistakenly
585 	 * continue to assume that the obj remained out of the CPU cached
586 	 * domain.
587 	 */
588 	err = i915_gem_object_pin_pages(obj);
589 	if (err)
590 		goto out_unlock;
591 
592 	/*
593 	 * Already in the desired write domain? Nothing for us to do!
594 	 *
595 	 * We apply a little bit of cunning here to catch a broader set of
596 	 * no-ops. If obj->write_domain is set, we must be in the same
597 	 * obj->read_domains, and only that domain. Therefore, if that
598 	 * obj->write_domain matches the request read_domains, we are
599 	 * already in the same read/write domain and can skip the operation,
600 	 * without having to further check the requested write_domain.
601 	 */
602 	if (READ_ONCE(obj->write_domain) == read_domains)
603 		goto out_unpin;
604 
605 	if (read_domains & I915_GEM_DOMAIN_WC)
606 		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
607 	else if (read_domains & I915_GEM_DOMAIN_GTT)
608 		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
609 	else
610 		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
611 
612 out_unpin:
613 	i915_gem_object_unpin_pages(obj);
614 
615 out_unlock:
616 	i915_gem_object_unlock(obj);
617 
618 	if (!err && write_domain)
619 		i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
620 
621 out:
622 	i915_gem_object_put(obj);
623 	return err;
624 }
625 
626 /*
627  * Pins the specified object's pages and synchronizes the object with
628  * GPU accesses. Sets needs_clflush to non-zero if the caller should
629  * flush the object from the CPU cache.
630  */
631 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
632 				 unsigned int *needs_clflush)
633 {
634 	int ret;
635 
636 	*needs_clflush = 0;
637 	if (!i915_gem_object_has_struct_page(obj))
638 		return -ENODEV;
639 
640 	assert_object_held(obj);
641 
642 	ret = i915_gem_object_wait(obj,
643 				   I915_WAIT_INTERRUPTIBLE,
644 				   MAX_SCHEDULE_TIMEOUT);
645 	if (ret)
646 		return ret;
647 
648 	ret = i915_gem_object_pin_pages(obj);
649 	if (ret)
650 		return ret;
651 
652 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
653 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
654 		ret = i915_gem_object_set_to_cpu_domain(obj, false);
655 		if (ret)
656 			goto err_unpin;
657 		else
658 			goto out;
659 	}
660 
661 	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
662 
663 	/* If we're not in the cpu read domain, set ourself into the gtt
664 	 * read domain and manually flush cachelines (if required). This
665 	 * optimizes for the case when the gpu will dirty the data
666 	 * anyway again before the next pread happens.
667 	 */
668 	if (!obj->cache_dirty &&
669 	    !(obj->read_domains & I915_GEM_DOMAIN_CPU))
670 		*needs_clflush = CLFLUSH_BEFORE;
671 
672 out:
673 	/* return with the pages pinned */
674 	return 0;
675 
676 err_unpin:
677 	i915_gem_object_unpin_pages(obj);
678 	return ret;
679 }
680 
681 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
682 				  unsigned int *needs_clflush)
683 {
684 	int ret;
685 
686 	*needs_clflush = 0;
687 	if (!i915_gem_object_has_struct_page(obj))
688 		return -ENODEV;
689 
690 	assert_object_held(obj);
691 
692 	ret = i915_gem_object_wait(obj,
693 				   I915_WAIT_INTERRUPTIBLE |
694 				   I915_WAIT_ALL,
695 				   MAX_SCHEDULE_TIMEOUT);
696 	if (ret)
697 		return ret;
698 
699 	ret = i915_gem_object_pin_pages(obj);
700 	if (ret)
701 		return ret;
702 
703 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
704 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
705 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
706 		if (ret)
707 			goto err_unpin;
708 		else
709 			goto out;
710 	}
711 
712 	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
713 
714 	/* If we're not in the cpu write domain, set ourself into the
715 	 * gtt write domain and manually flush cachelines (as required).
716 	 * This optimizes for the case when the gpu will use the data
717 	 * right away and we therefore have to clflush anyway.
718 	 */
719 	if (!obj->cache_dirty) {
720 		*needs_clflush |= CLFLUSH_AFTER;
721 
722 		/*
723 		 * Same trick applies to invalidate partially written
724 		 * cachelines read before writing.
725 		 */
726 		if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
727 			*needs_clflush |= CLFLUSH_BEFORE;
728 	}
729 
730 out:
731 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
732 	obj->mm.dirty = true;
733 	/* return with the pages pinned */
734 	return 0;
735 
736 err_unpin:
737 	i915_gem_object_unpin_pages(obj);
738 	return ret;
739 }
740