xref: /openbmc/linux/drivers/gpu/drm/drm_cache.c (revision dc90f084)
1673a394bSEric Anholt /**************************************************************************
2673a394bSEric Anholt  *
3673a394bSEric Anholt  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4673a394bSEric Anholt  * All Rights Reserved.
5673a394bSEric Anholt  *
6673a394bSEric Anholt  * Permission is hereby granted, free of charge, to any person obtaining a
7673a394bSEric Anholt  * copy of this software and associated documentation files (the
8673a394bSEric Anholt  * "Software"), to deal in the Software without restriction, including
9673a394bSEric Anholt  * without limitation the rights to use, copy, modify, merge, publish,
10673a394bSEric Anholt  * distribute, sub license, and/or sell copies of the Software, and to
11673a394bSEric Anholt  * permit persons to whom the Software is furnished to do so, subject to
12673a394bSEric Anholt  * the following conditions:
13673a394bSEric Anholt  *
14673a394bSEric Anholt  * The above copyright notice and this permission notice (including the
15673a394bSEric Anholt  * next paragraph) shall be included in all copies or substantial portions
16673a394bSEric Anholt  * of the Software.
17673a394bSEric Anholt  *
18673a394bSEric Anholt  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19673a394bSEric Anholt  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20673a394bSEric Anholt  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21673a394bSEric Anholt  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22673a394bSEric Anholt  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23673a394bSEric Anholt  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24673a394bSEric Anholt  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25673a394bSEric Anholt  *
26673a394bSEric Anholt  **************************************************************************/
27673a394bSEric Anholt /*
28673a394bSEric Anholt  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29673a394bSEric Anholt  */
30b7e32befSThomas Hellström #include <linux/cc_platform.h>
312d1a8a48SPaul Gortmaker #include <linux/export.h>
32b46b54a5SChris Wilson #include <linux/highmem.h>
33e9d1d2bbSTom Lendacky #include <linux/ioport.h>
34*dc90f084SChristoph Hellwig #include <linux/iosys-map.h>
353abc6670SThomas Zimmermann #include <xen/xen.h>
36b46b54a5SChris Wilson 
37b46b54a5SChris Wilson #include <drm/drm_cache.h>
38673a394bSEric Anholt 
39b7e32befSThomas Hellström /* A small bounce buffer that fits on the stack. */
40b7e32befSThomas Hellström #define MEMCPY_BOUNCE_SIZE 128
41b7e32befSThomas Hellström 
42673a394bSEric Anholt #if defined(CONFIG_X86)
43b04d4a38SBen Widawsky #include <asm/smp.h>
442a0c772fSRoss Zwisler 
452a0c772fSRoss Zwisler /*
462a0c772fSRoss Zwisler  * clflushopt is an unordered instruction which needs fencing with mfence or
472a0c772fSRoss Zwisler  * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
482a0c772fSRoss Zwisler  * in the caller.
492a0c772fSRoss Zwisler  */
50673a394bSEric Anholt static void
drm_clflush_page(struct page * page)51673a394bSEric Anholt drm_clflush_page(struct page *page)
52673a394bSEric Anholt {
53673a394bSEric Anholt 	uint8_t *page_virtual;
54673a394bSEric Anholt 	unsigned int i;
5587229ad9SDave Airlie 	const int size = boot_cpu_data.x86_clflush_size;
56673a394bSEric Anholt 
57673a394bSEric Anholt 	if (unlikely(page == NULL))
58673a394bSEric Anholt 		return;
59673a394bSEric Anholt 
601c9c20f6SCong Wang 	page_virtual = kmap_atomic(page);
6187229ad9SDave Airlie 	for (i = 0; i < PAGE_SIZE; i += size)
622a0c772fSRoss Zwisler 		clflushopt(page_virtual + i);
631c9c20f6SCong Wang 	kunmap_atomic(page_virtual);
64673a394bSEric Anholt }
65673a394bSEric Anholt 
drm_cache_flush_clflush(struct page * pages[],unsigned long num_pages)66c9c97b8cSDave Airlie static void drm_cache_flush_clflush(struct page *pages[],
67c9c97b8cSDave Airlie 				    unsigned long num_pages)
68c9c97b8cSDave Airlie {
69c9c97b8cSDave Airlie 	unsigned long i;
70c9c97b8cSDave Airlie 
71606be047SBhanusree 	mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
72c9c97b8cSDave Airlie 	for (i = 0; i < num_pages; i++)
73c9c97b8cSDave Airlie 		drm_clflush_page(*pages++);
74606be047SBhanusree 	mb(); /*Also used after CLFLUSH so that all cache is flushed*/
75c9c97b8cSDave Airlie }
76c9c97b8cSDave Airlie #endif
77ed017d9fSDave Airlie 
78f0e36723SGabriel Krisman Bertazi /**
79f0e36723SGabriel Krisman Bertazi  * drm_clflush_pages - Flush dcache lines of a set of pages.
80f0e36723SGabriel Krisman Bertazi  * @pages: List of pages to be flushed.
81f0e36723SGabriel Krisman Bertazi  * @num_pages: Number of pages in the array.
82f0e36723SGabriel Krisman Bertazi  *
83f0e36723SGabriel Krisman Bertazi  * Flush every data cache line entry that points to an address belonging
84f0e36723SGabriel Krisman Bertazi  * to a page in the array.
85f0e36723SGabriel Krisman Bertazi  */
86673a394bSEric Anholt void
drm_clflush_pages(struct page * pages[],unsigned long num_pages)87673a394bSEric Anholt drm_clflush_pages(struct page *pages[], unsigned long num_pages)
88673a394bSEric Anholt {
89673a394bSEric Anholt 
90673a394bSEric Anholt #if defined(CONFIG_X86)
91906bf7fdSBorislav Petkov 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
92c9c97b8cSDave Airlie 		drm_cache_flush_clflush(pages, num_pages);
93673a394bSEric Anholt 		return;
94673a394bSEric Anholt 	}
95673a394bSEric Anholt 
96b04d4a38SBen Widawsky 	if (wbinvd_on_all_cpus())
97499447dbSJoe Perches 		pr_err("Timed out waiting for cache flush\n");
98c9c97b8cSDave Airlie 
99c9c97b8cSDave Airlie #elif defined(__powerpc__)
100c9c97b8cSDave Airlie 	unsigned long i;
101e109c6dbSBhanusree 
102c9c97b8cSDave Airlie 	for (i = 0; i < num_pages; i++) {
103c9c97b8cSDave Airlie 		struct page *page = pages[i];
104c9c97b8cSDave Airlie 		void *page_virtual;
105c9c97b8cSDave Airlie 
106c9c97b8cSDave Airlie 		if (unlikely(page == NULL))
107c9c97b8cSDave Airlie 			continue;
108c9c97b8cSDave Airlie 
1091c9c20f6SCong Wang 		page_virtual = kmap_atomic(page);
110c9c97b8cSDave Airlie 		flush_dcache_range((unsigned long)page_virtual,
111c9c97b8cSDave Airlie 				   (unsigned long)page_virtual + PAGE_SIZE);
1121c9c20f6SCong Wang 		kunmap_atomic(page_virtual);
113c9c97b8cSDave Airlie 	}
114c9c97b8cSDave Airlie #else
115499447dbSJoe Perches 	WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
116ed017d9fSDave Airlie #endif
117e0f0754fSDave Airlie }
118673a394bSEric Anholt EXPORT_SYMBOL(drm_clflush_pages);
119673a394bSEric Anholt 
1206d5cd9cbSDaniel Vetter /**
121f0e36723SGabriel Krisman Bertazi  * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
122f0e36723SGabriel Krisman Bertazi  * @st: struct sg_table.
123f0e36723SGabriel Krisman Bertazi  *
124f0e36723SGabriel Krisman Bertazi  * Flush every data cache line entry that points to an address in the
125f0e36723SGabriel Krisman Bertazi  * sg.
126f0e36723SGabriel Krisman Bertazi  */
127f0e36723SGabriel Krisman Bertazi void
drm_clflush_sg(struct sg_table * st)1286d5cd9cbSDaniel Vetter drm_clflush_sg(struct sg_table *st)
1299da3da66SChris Wilson {
1309da3da66SChris Wilson #if defined(CONFIG_X86)
1319da3da66SChris Wilson 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
132906bf7fdSBorislav Petkov 		struct sg_page_iter sg_iter;
133f5ddf697SImre Deak 
1349da3da66SChris Wilson 		mb(); /*CLFLUSH is ordered only by using memory barriers*/
135c6a5f8daSBhanusree 		for_each_sgtable_page(st, &sg_iter, 0)
1366c6fa39cSMarek Szyprowski 			drm_clflush_page(sg_page_iter_page(&sg_iter));
1372db76d7cSImre Deak 		mb(); /*Make sure that all cache line entry is flushed*/
138c6a5f8daSBhanusree 
1399da3da66SChris Wilson 		return;
1409da3da66SChris Wilson 	}
1419da3da66SChris Wilson 
1429da3da66SChris Wilson 	if (wbinvd_on_all_cpus())
143b04d4a38SBen Widawsky 		pr_err("Timed out waiting for cache flush\n");
144499447dbSJoe Perches #else
1459da3da66SChris Wilson 	WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
146499447dbSJoe Perches #endif
1479da3da66SChris Wilson }
1489da3da66SChris Wilson EXPORT_SYMBOL(drm_clflush_sg);
1499da3da66SChris Wilson 
1509da3da66SChris Wilson /**
1519da3da66SChris Wilson  * drm_clflush_virt_range - Flush dcache lines of a region
152f0e36723SGabriel Krisman Bertazi  * @addr: Initial kernel memory address.
153f0e36723SGabriel Krisman Bertazi  * @length: Region size.
154f0e36723SGabriel Krisman Bertazi  *
155f0e36723SGabriel Krisman Bertazi  * Flush every data cache line entry that points to an address in the
156f0e36723SGabriel Krisman Bertazi  * region requested.
157f0e36723SGabriel Krisman Bertazi  */
158f0e36723SGabriel Krisman Bertazi void
drm_clflush_virt_range(void * addr,unsigned long length)159f0e36723SGabriel Krisman Bertazi drm_clflush_virt_range(void *addr, unsigned long length)
1609da3da66SChris Wilson {
161c2d15359SVille Syrjälä #if defined(CONFIG_X86)
1626d5cd9cbSDaniel Vetter 	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
1636d5cd9cbSDaniel Vetter 		const int size = boot_cpu_data.x86_clflush_size;
164906bf7fdSBorislav Petkov 		void *end = addr + length;
165afcd950cSChris Wilson 
166c2d15359SVille Syrjälä 		addr = (void *)(((unsigned long)addr) & -size);
167e109c6dbSBhanusree 		mb(); /*CLFLUSH is only ordered with a full memory barrier*/
168afcd950cSChris Wilson 		for (; addr < end; addr += size)
169c6a5f8daSBhanusree 			clflushopt(addr);
170afcd950cSChris Wilson 		clflushopt(end - 1); /* force serialisation */
17179270968SRoss Zwisler 		mb(); /*Ensure that every data cache line entry is flushed*/
172396f5d62SChris Wilson 		return;
1730ae865efSCai Huoqing 	}
1746d5cd9cbSDaniel Vetter 
1756d5cd9cbSDaniel Vetter 	if (wbinvd_on_all_cpus())
1766d5cd9cbSDaniel Vetter 		pr_err("Timed out waiting for cache flush\n");
177b04d4a38SBen Widawsky #else
178499447dbSJoe Perches 	WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
1796d5cd9cbSDaniel Vetter #endif
180499447dbSJoe Perches }
1816d5cd9cbSDaniel Vetter EXPORT_SYMBOL(drm_clflush_virt_range);
1826d5cd9cbSDaniel Vetter 
drm_need_swiotlb(int dma_bits)1836d5cd9cbSDaniel Vetter bool drm_need_swiotlb(int dma_bits)
1846d5cd9cbSDaniel Vetter {
1853abc6670SThomas Zimmermann 	struct resource *tmp;
1863abc6670SThomas Zimmermann 	resource_size_t max_iomem = 0;
1873abc6670SThomas Zimmermann 
1883abc6670SThomas Zimmermann 	/*
1893abc6670SThomas Zimmermann 	 * Xen paravirtual hosts require swiotlb regardless of requested dma
1903abc6670SThomas Zimmermann 	 * transfer size.
1913abc6670SThomas Zimmermann 	 *
1923abc6670SThomas Zimmermann 	 * NOTE: Really, what it requires is use of the dma_alloc_coherent
1933abc6670SThomas Zimmermann 	 *       allocator used in ttm_dma_populate() instead of
1943abc6670SThomas Zimmermann 	 *       ttm_populate_and_map_pages(), which bounce buffers so much in
1953abc6670SThomas Zimmermann 	 *       Xen it leads to swiotlb buffer exhaustion.
1963abc6670SThomas Zimmermann 	 */
1973abc6670SThomas Zimmermann 	if (xen_pv_domain())
1983abc6670SThomas Zimmermann 		return true;
1993abc6670SThomas Zimmermann 
2003abc6670SThomas Zimmermann 	/*
2013abc6670SThomas Zimmermann 	 * Enforce dma_alloc_coherent when memory encryption is active as well
2023abc6670SThomas Zimmermann 	 * for the same reasons as for Xen paravirtual hosts.
2033abc6670SThomas Zimmermann 	 */
2043abc6670SThomas Zimmermann 	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
2053abc6670SThomas Zimmermann 		return true;
2063abc6670SThomas Zimmermann 
207e9d1d2bbSTom Lendacky 	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
2083abc6670SThomas Zimmermann 		max_iomem = max(max_iomem,  tmp->end);
2093abc6670SThomas Zimmermann 
2103abc6670SThomas Zimmermann 	return max_iomem > ((u64)1 << dma_bits);
2113abc6670SThomas Zimmermann }
2123abc6670SThomas Zimmermann EXPORT_SYMBOL(drm_need_swiotlb);
2133abc6670SThomas Zimmermann 
memcpy_fallback(struct iosys_map * dst,const struct iosys_map * src,unsigned long len)2143abc6670SThomas Zimmermann static void memcpy_fallback(struct iosys_map *dst,
2153abc6670SThomas Zimmermann 			    const struct iosys_map *src,
216b7e32befSThomas Hellström 			    unsigned long len)
217b7e32befSThomas Hellström {
218b7e32befSThomas Hellström 	if (!dst->is_iomem && !src->is_iomem) {
219b7e32befSThomas Hellström 		memcpy(dst->vaddr, src->vaddr, len);
220b7e32befSThomas Hellström 	} else if (!src->is_iomem) {
221b7e32befSThomas Hellström 		iosys_map_memcpy_to(dst, 0, src->vaddr, len);
222b7e32befSThomas Hellström 	} else if (!dst->is_iomem) {
223b7e32befSThomas Hellström 		memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
224b7e32befSThomas Hellström 	} else {
225b7e32befSThomas Hellström 		/*
226b7e32befSThomas Hellström 		 * Bounce size is not performance tuned, but using a
227b7e32befSThomas Hellström 		 * bounce buffer like this is significantly faster than
228b7e32befSThomas Hellström 		 * resorting to ioreadxx() + iowritexx().
229b7e32befSThomas Hellström 		 */
230b7e32befSThomas Hellström 		char bounce[MEMCPY_BOUNCE_SIZE];
231b7e32befSThomas Hellström 		void __iomem *_src = src->vaddr_iomem;
232b7e32befSThomas Hellström 		void __iomem *_dst = dst->vaddr_iomem;
233b7e32befSThomas Hellström 
234b7e32befSThomas Hellström 		while (len >= MEMCPY_BOUNCE_SIZE) {
235b7e32befSThomas Hellström 			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
236b7e32befSThomas Hellström 			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
237b7e32befSThomas Hellström 			_src += MEMCPY_BOUNCE_SIZE;
238b7e32befSThomas Hellström 			_dst += MEMCPY_BOUNCE_SIZE;
239b7e32befSThomas Hellström 			len -= MEMCPY_BOUNCE_SIZE;
240b7e32befSThomas Hellström 		}
241b7e32befSThomas Hellström 		if (len) {
242b7e32befSThomas Hellström 			memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
243b7e32befSThomas Hellström 			memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
244b7e32befSThomas Hellström 		}
245b7e32befSThomas Hellström 	}
246b7e32befSThomas Hellström }
247b7e32befSThomas Hellström 
248b7e32befSThomas Hellström #ifdef CONFIG_X86
249b7e32befSThomas Hellström 
250b7e32befSThomas Hellström static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
251b7e32befSThomas Hellström 
__memcpy_ntdqa(void * dst,const void * src,unsigned long len)252b7e32befSThomas Hellström static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
253b7e32befSThomas Hellström {
254b7e32befSThomas Hellström 	kernel_fpu_begin();
255b7e32befSThomas Hellström 
256b7e32befSThomas Hellström 	while (len >= 4) {
257b7e32befSThomas Hellström 		asm("movntdqa	(%0), %%xmm0\n"
258b7e32befSThomas Hellström 		    "movntdqa 16(%0), %%xmm1\n"
259b7e32befSThomas Hellström 		    "movntdqa 32(%0), %%xmm2\n"
260b7e32befSThomas Hellström 		    "movntdqa 48(%0), %%xmm3\n"
261b7e32befSThomas Hellström 		    "movaps %%xmm0,   (%1)\n"
262b7e32befSThomas Hellström 		    "movaps %%xmm1, 16(%1)\n"
263b7e32befSThomas Hellström 		    "movaps %%xmm2, 32(%1)\n"
264b7e32befSThomas Hellström 		    "movaps %%xmm3, 48(%1)\n"
265b7e32befSThomas Hellström 		    :: "r" (src), "r" (dst) : "memory");
266b7e32befSThomas Hellström 		src += 64;
267b7e32befSThomas Hellström 		dst += 64;
268b7e32befSThomas Hellström 		len -= 4;
269b7e32befSThomas Hellström 	}
270b7e32befSThomas Hellström 	while (len--) {
271b7e32befSThomas Hellström 		asm("movntdqa (%0), %%xmm0\n"
272b7e32befSThomas Hellström 		    "movaps %%xmm0, (%1)\n"
273b7e32befSThomas Hellström 		    :: "r" (src), "r" (dst) : "memory");
274b7e32befSThomas Hellström 		src += 16;
275b7e32befSThomas Hellström 		dst += 16;
276b7e32befSThomas Hellström 	}
277b7e32befSThomas Hellström 
278b7e32befSThomas Hellström 	kernel_fpu_end();
279b7e32befSThomas Hellström }
280b7e32befSThomas Hellström 
281b7e32befSThomas Hellström /*
282b7e32befSThomas Hellström  * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
283b7e32befSThomas Hellström  * non-temporal instructions where available. Note that all arguments
284b7e32befSThomas Hellström  * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
285b7e32befSThomas Hellström  * of 16.
286b7e32befSThomas Hellström  */
__drm_memcpy_from_wc(void * dst,const void * src,unsigned long len)287b7e32befSThomas Hellström static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
288b7e32befSThomas Hellström {
289b7e32befSThomas Hellström 	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
290b7e32befSThomas Hellström 		memcpy(dst, src, len);
291b7e32befSThomas Hellström 	else if (likely(len))
292b7e32befSThomas Hellström 		__memcpy_ntdqa(dst, src, len >> 4);
293b7e32befSThomas Hellström }
294b7e32befSThomas Hellström 
295b7e32befSThomas Hellström /**
296b7e32befSThomas Hellström  * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
297b7e32befSThomas Hellström  * that may be WC.
298b7e32befSThomas Hellström  * @dst: The destination pointer
299b7e32befSThomas Hellström  * @src: The source pointer
300b7e32befSThomas Hellström  * @len: The size of the area o transfer in bytes
301b7e32befSThomas Hellström  *
302b7e32befSThomas Hellström  * Tries an arch optimized memcpy for prefetching reading out of a WC region,
303b7e32befSThomas Hellström  * and if no such beast is available, falls back to a normal memcpy.
304b7e32befSThomas Hellström  */
drm_memcpy_from_wc(struct iosys_map * dst,const struct iosys_map * src,unsigned long len)305b7e32befSThomas Hellström void drm_memcpy_from_wc(struct iosys_map *dst,
306b7e32befSThomas Hellström 			const struct iosys_map *src,
307b7e32befSThomas Hellström 			unsigned long len)
308b7e32befSThomas Hellström {
309b7e32befSThomas Hellström 	if (WARN_ON(in_interrupt())) {
310b7e32befSThomas Hellström 		memcpy_fallback(dst, src, len);
311b7e32befSThomas Hellström 		return;
312b7e32befSThomas Hellström 	}
313b7e32befSThomas Hellström 
314b7e32befSThomas Hellström 	if (static_branch_likely(&has_movntdqa)) {
315b7e32befSThomas Hellström 		__drm_memcpy_from_wc(dst->is_iomem ?
316b7e32befSThomas Hellström 				     (void __force *)dst->vaddr_iomem :
317b7e32befSThomas Hellström 				     dst->vaddr,
318b7e32befSThomas Hellström 				     src->is_iomem ?
319b7e32befSThomas Hellström 				     (void const __force *)src->vaddr_iomem :
320b7e32befSThomas Hellström 				     src->vaddr,
321b7e32befSThomas Hellström 				     len);
322b7e32befSThomas Hellström 		return;
323b7e32befSThomas Hellström 	}
324b7e32befSThomas Hellström 
325b7e32befSThomas Hellström 	memcpy_fallback(dst, src, len);
326b7e32befSThomas Hellström }
327b7e32befSThomas Hellström EXPORT_SYMBOL(drm_memcpy_from_wc);
328b7e32befSThomas Hellström 
329b7e32befSThomas Hellström /*
330b7e32befSThomas Hellström  * drm_memcpy_init_early - One time initialization of the WC memcpy code
331b7e32befSThomas Hellström  */
drm_memcpy_init_early(void)332b7e32befSThomas Hellström void drm_memcpy_init_early(void)
333b7e32befSThomas Hellström {
334b7e32befSThomas Hellström 	/*
335b7e32befSThomas Hellström 	 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
336b7e32befSThomas Hellström 	 * emulation. So don't enable movntdqa in hypervisor guest.
337b7e32befSThomas Hellström 	 */
338b7e32befSThomas Hellström 	if (static_cpu_has(X86_FEATURE_XMM4_1) &&
339b7e32befSThomas Hellström 	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
340b7e32befSThomas Hellström 		static_branch_enable(&has_movntdqa);
341b7e32befSThomas Hellström }
342b7e32befSThomas Hellström #else
drm_memcpy_from_wc(struct iosys_map * dst,const struct iosys_map * src,unsigned long len)343b7e32befSThomas Hellström void drm_memcpy_from_wc(struct iosys_map *dst,
344b7e32befSThomas Hellström 			const struct iosys_map *src,
345b7e32befSThomas Hellström 			unsigned long len)
346b7e32befSThomas Hellström {
347b7e32befSThomas Hellström 	WARN_ON(in_interrupt());
348b7e32befSThomas Hellström 
349b7e32befSThomas Hellström 	memcpy_fallback(dst, src, len);
350b7e32befSThomas Hellström }
351b7e32befSThomas Hellström EXPORT_SYMBOL(drm_memcpy_from_wc);
352b7e32befSThomas Hellström 
drm_memcpy_init_early(void)353b7e32befSThomas Hellström void drm_memcpy_init_early(void)
354b7e32befSThomas Hellström {
355b7e32befSThomas Hellström }
356b7e32befSThomas Hellström #endif /* CONFIG_X86 */
357b7e32befSThomas Hellström