1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
221cfa0e9SRobin Murphy /*
321cfa0e9SRobin Murphy  * Copyright (C) 2017 ARM Ltd.
421cfa0e9SRobin Murphy  */
521cfa0e9SRobin Murphy 
621cfa0e9SRobin Murphy #include <linux/uaccess.h>
721cfa0e9SRobin Murphy #include <asm/barrier.h>
821cfa0e9SRobin Murphy #include <asm/cacheflush.h>
921cfa0e9SRobin Murphy 
1021cfa0e9SRobin Murphy void memcpy_flushcache(void *dst, const void *src, size_t cnt)
1121cfa0e9SRobin Murphy {
1221cfa0e9SRobin Murphy 	/*
1321cfa0e9SRobin Murphy 	 * We assume this should not be called with @dst pointing to
1421cfa0e9SRobin Murphy 	 * non-cacheable memory, such that we don't need an explicit
1521cfa0e9SRobin Murphy 	 * barrier to order the cache maintenance against the memcpy.
1621cfa0e9SRobin Murphy 	 */
1721cfa0e9SRobin Murphy 	memcpy(dst, src, cnt);
1821cfa0e9SRobin Murphy 	__clean_dcache_area_pop(dst, cnt);
1921cfa0e9SRobin Murphy }
2021cfa0e9SRobin Murphy EXPORT_SYMBOL_GPL(memcpy_flushcache);
2121cfa0e9SRobin Murphy 
2221cfa0e9SRobin Murphy void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
2321cfa0e9SRobin Murphy 			    size_t len)
2421cfa0e9SRobin Murphy {
2521cfa0e9SRobin Murphy 	memcpy_flushcache(to, page_address(page) + offset, len);
2621cfa0e9SRobin Murphy }
2721cfa0e9SRobin Murphy 
2821cfa0e9SRobin Murphy unsigned long __copy_user_flushcache(void *to, const void __user *from,
2921cfa0e9SRobin Murphy 				     unsigned long n)
3021cfa0e9SRobin Murphy {
31e50be648SPavel Tatashin 	unsigned long rc;
32e50be648SPavel Tatashin 
33e50be648SPavel Tatashin 	uaccess_enable_not_uao();
34e50be648SPavel Tatashin 	rc = __arch_copy_from_user(to, from, n);
35e50be648SPavel Tatashin 	uaccess_disable_not_uao();
3621cfa0e9SRobin Murphy 
3721cfa0e9SRobin Murphy 	/* See above */
3821cfa0e9SRobin Murphy 	__clean_dcache_area_pop(to, n - rc);
3921cfa0e9SRobin Murphy 	return rc;
4021cfa0e9SRobin Murphy }
41