1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
221cfa0e9SRobin Murphy /*
321cfa0e9SRobin Murphy  * Copyright (C) 2017 ARM Ltd.
421cfa0e9SRobin Murphy  */
521cfa0e9SRobin Murphy 
621cfa0e9SRobin Murphy #include <linux/uaccess.h>
721cfa0e9SRobin Murphy #include <asm/barrier.h>
821cfa0e9SRobin Murphy #include <asm/cacheflush.h>
921cfa0e9SRobin Murphy 
1021cfa0e9SRobin Murphy void memcpy_flushcache(void *dst, const void *src, size_t cnt)
1121cfa0e9SRobin Murphy {
1221cfa0e9SRobin Murphy 	/*
1321cfa0e9SRobin Murphy 	 * We assume this should not be called with @dst pointing to
1421cfa0e9SRobin Murphy 	 * non-cacheable memory, such that we don't need an explicit
1521cfa0e9SRobin Murphy 	 * barrier to order the cache maintenance against the memcpy.
1621cfa0e9SRobin Murphy 	 */
1721cfa0e9SRobin Murphy 	memcpy(dst, src, cnt);
18*fade9c2cSFuad Tabba 	dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
1921cfa0e9SRobin Murphy }
2021cfa0e9SRobin Murphy EXPORT_SYMBOL_GPL(memcpy_flushcache);
2121cfa0e9SRobin Murphy 
2221cfa0e9SRobin Murphy void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
2321cfa0e9SRobin Murphy 			    size_t len)
2421cfa0e9SRobin Murphy {
2521cfa0e9SRobin Murphy 	memcpy_flushcache(to, page_address(page) + offset, len);
2621cfa0e9SRobin Murphy }
2721cfa0e9SRobin Murphy 
2821cfa0e9SRobin Murphy unsigned long __copy_user_flushcache(void *to, const void __user *from,
2921cfa0e9SRobin Murphy 				     unsigned long n)
3021cfa0e9SRobin Murphy {
31e50be648SPavel Tatashin 	unsigned long rc;
32e50be648SPavel Tatashin 
339e94fdadSMark Rutland 	rc = raw_copy_from_user(to, from, n);
3421cfa0e9SRobin Murphy 
3521cfa0e9SRobin Murphy 	/* See above */
36*fade9c2cSFuad Tabba 	dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
3721cfa0e9SRobin Murphy 	return rc;
3821cfa0e9SRobin Murphy }
39