1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include "display/intel_frontbuffer.h"
8 
9 #include "i915_drv.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_sw_fence_work.h"
12 #include "i915_trace.h"
13 
14 struct clflush {
15 	struct dma_fence_work base;
16 	struct drm_i915_gem_object *obj;
17 };
18 
19 static void __do_clflush(struct drm_i915_gem_object *obj)
20 {
21 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
22 	drm_clflush_sg(obj->mm.pages);
23 
24 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
25 }
26 
27 static void clflush_work(struct dma_fence_work *base)
28 {
29 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
30 
31 	__do_clflush(clflush->obj);
32 }
33 
34 static void clflush_release(struct dma_fence_work *base)
35 {
36 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
37 
38 	i915_gem_object_unpin_pages(clflush->obj);
39 	i915_gem_object_put(clflush->obj);
40 }
41 
42 static const struct dma_fence_work_ops clflush_ops = {
43 	.name = "clflush",
44 	.work = clflush_work,
45 	.release = clflush_release,
46 };
47 
48 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
49 {
50 	struct clflush *clflush;
51 
52 	GEM_BUG_ON(!obj->cache_dirty);
53 
54 	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
55 	if (!clflush)
56 		return NULL;
57 
58 	if (__i915_gem_object_get_pages(obj) < 0) {
59 		kfree(clflush);
60 		return NULL;
61 	}
62 
63 	dma_fence_work_init(&clflush->base, &clflush_ops);
64 	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
65 
66 	return clflush;
67 }
68 
69 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
70 			     unsigned int flags)
71 {
72 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
73 	struct clflush *clflush;
74 
75 	assert_object_held(obj);
76 
77 	if (IS_DGFX(i915)) {
78 		WARN_ON_ONCE(obj->cache_dirty);
79 		return false;
80 	}
81 
82 	/*
83 	 * Stolen memory is always coherent with the GPU as it is explicitly
84 	 * marked as wc by the system, or the system is cache-coherent.
85 	 * Similarly, we only access struct pages through the CPU cache, so
86 	 * anything not backed by physical memory we consider to be always
87 	 * coherent and not need clflushing.
88 	 */
89 	if (!i915_gem_object_has_struct_page(obj)) {
90 		obj->cache_dirty = false;
91 		return false;
92 	}
93 
94 	/* If the GPU is snooping the contents of the CPU cache,
95 	 * we do not need to manually clear the CPU cache lines.  However,
96 	 * the caches are only snooped when the render cache is
97 	 * flushed/invalidated.  As we always have to emit invalidations
98 	 * and flushes when moving into and out of the RENDER domain, correct
99 	 * snooping behaviour occurs naturally as the result of our domain
100 	 * tracking.
101 	 */
102 	if (!(flags & I915_CLFLUSH_FORCE) &&
103 	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
104 		return false;
105 
106 	trace_i915_gem_object_clflush(obj);
107 
108 	clflush = NULL;
109 	if (!(flags & I915_CLFLUSH_SYNC))
110 		clflush = clflush_work_create(obj);
111 	if (clflush) {
112 		i915_sw_fence_await_reservation(&clflush->base.chain,
113 						obj->base.resv, NULL, true,
114 						i915_fence_timeout(i915),
115 						I915_FENCE_GFP);
116 		dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
117 		dma_fence_work_commit(&clflush->base);
118 		/*
119 		 * We must have successfully populated the pages(since we are
120 		 * holding a pin on the pages as per the flush worker) to reach
121 		 * this point, which must mean we have already done the required
122 		 * flush-on-acquire, hence resetting cache_dirty here should be
123 		 * safe.
124 		 */
125 		obj->cache_dirty = false;
126 	} else if (obj->mm.pages) {
127 		__do_clflush(obj);
128 		obj->cache_dirty = false;
129 	} else {
130 		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
131 	}
132 
133 	return true;
134 }
135