1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include "display/intel_frontbuffer.h"
8 
9 #include "i915_drv.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_sw_fence_work.h"
12 #include "i915_trace.h"
13 
14 struct clflush {
15 	struct dma_fence_work base;
16 	struct drm_i915_gem_object *obj;
17 };
18 
19 static void __do_clflush(struct drm_i915_gem_object *obj)
20 {
21 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
22 	drm_clflush_sg(obj->mm.pages);
23 	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
24 }
25 
26 static int clflush_work(struct dma_fence_work *base)
27 {
28 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
29 	struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
30 	int err;
31 
32 	err = i915_gem_object_pin_pages(obj);
33 	if (err)
34 		goto put;
35 
36 	__do_clflush(obj);
37 	i915_gem_object_unpin_pages(obj);
38 
39 put:
40 	i915_gem_object_put(obj);
41 	return err;
42 }
43 
44 static void clflush_release(struct dma_fence_work *base)
45 {
46 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
47 
48 	if (clflush->obj)
49 		i915_gem_object_put(clflush->obj);
50 }
51 
52 static const struct dma_fence_work_ops clflush_ops = {
53 	.name = "clflush",
54 	.work = clflush_work,
55 	.release = clflush_release,
56 };
57 
58 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
59 {
60 	struct clflush *clflush;
61 
62 	GEM_BUG_ON(!obj->cache_dirty);
63 
64 	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
65 	if (!clflush)
66 		return NULL;
67 
68 	dma_fence_work_init(&clflush->base, &clflush_ops);
69 	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
70 
71 	return clflush;
72 }
73 
74 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
75 			     unsigned int flags)
76 {
77 	struct clflush *clflush;
78 
79 	assert_object_held(obj);
80 
81 	/*
82 	 * Stolen memory is always coherent with the GPU as it is explicitly
83 	 * marked as wc by the system, or the system is cache-coherent.
84 	 * Similarly, we only access struct pages through the CPU cache, so
85 	 * anything not backed by physical memory we consider to be always
86 	 * coherent and not need clflushing.
87 	 */
88 	if (!i915_gem_object_has_struct_page(obj)) {
89 		obj->cache_dirty = false;
90 		return false;
91 	}
92 
93 	/* If the GPU is snooping the contents of the CPU cache,
94 	 * we do not need to manually clear the CPU cache lines.  However,
95 	 * the caches are only snooped when the render cache is
96 	 * flushed/invalidated.  As we always have to emit invalidations
97 	 * and flushes when moving into and out of the RENDER domain, correct
98 	 * snooping behaviour occurs naturally as the result of our domain
99 	 * tracking.
100 	 */
101 	if (!(flags & I915_CLFLUSH_FORCE) &&
102 	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
103 		return false;
104 
105 	trace_i915_gem_object_clflush(obj);
106 
107 	clflush = NULL;
108 	if (!(flags & I915_CLFLUSH_SYNC))
109 		clflush = clflush_work_create(obj);
110 	if (clflush) {
111 		i915_sw_fence_await_reservation(&clflush->base.chain,
112 						obj->base.resv, NULL, true,
113 						I915_FENCE_TIMEOUT,
114 						I915_FENCE_GFP);
115 		dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
116 		dma_fence_work_commit(&clflush->base);
117 	} else if (obj->mm.pages) {
118 		__do_clflush(obj);
119 	} else {
120 		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
121 	}
122 
123 	obj->cache_dirty = false;
124 	return true;
125 }
126