xref: /openbmc/linux/include/linux/prefetch.h (revision bef7a78d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  Generic cache management functions. Everything is arch-specific,
4  *  but this header exists to make sure the defines/functions can be
5  *  used in a generic way.
6  *
7  *  2000-11-13  Arjan van de Ven   <arjan@fenrus.demon.nl>
8  *
9  */
10 
11 #ifndef _LINUX_PREFETCH_H
12 #define _LINUX_PREFETCH_H
13 
14 #include <linux/types.h>
15 #include <asm/processor.h>
16 #include <asm/cache.h>
17 
18 struct page;
19 /*
20 	prefetch(x) attempts to pre-emptively get the memory pointed to
21 	by address "x" into the CPU L1 cache.
22 	prefetch(x) should not cause any kind of exception, prefetch(0) is
23 	specifically ok.
24 
25 	prefetch() should be defined by the architecture, if not, the
26 	#define below provides a no-op define.
27 
28 	There are 3 prefetch() macros:
29 
30 	prefetch(x)  	- prefetches the cacheline at "x" for read
31 	prefetchw(x)	- prefetches the cacheline at "x" for write
32 	spin_lock_prefetch(x) - prefetches the spinlock *x for taking
33 
34 	there is also PREFETCH_STRIDE which is the architecure-preferred
35 	"lookahead" size for prefetching streamed operations.
36 
37 */
38 
39 #ifndef ARCH_HAS_PREFETCH
40 #define prefetch(x) __builtin_prefetch(x)
41 #endif
42 
43 #ifndef ARCH_HAS_PREFETCHW
44 #define prefetchw(x) __builtin_prefetch(x,1)
45 #endif
46 
47 #ifndef ARCH_HAS_SPINLOCK_PREFETCH
48 #define spin_lock_prefetch(x) prefetchw(x)
49 #endif
50 
51 #ifndef PREFETCH_STRIDE
52 #define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
53 #endif
54 
55 static inline void prefetch_range(void *addr, size_t len)
56 {
57 #ifdef ARCH_HAS_PREFETCH
58 	char *cp;
59 	char *end = addr + len;
60 
61 	for (cp = addr; cp < end; cp += PREFETCH_STRIDE)
62 		prefetch(cp);
63 #endif
64 }
65 
66 static inline void prefetch_page_address(struct page *page)
67 {
68 #if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
69 	prefetch(page);
70 #endif
71 }
72 
73 #endif
74