1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef __I915_REG_DEFS__
7 #define __I915_REG_DEFS__
8 
9 #include <linux/bitfield.h>
10 #include <linux/bits.h>
11 
12 /**
13  * REG_BIT() - Prepare a u32 bit value
14  * @__n: 0-based bit number
15  *
16  * Local wrapper for BIT() to force u32, with compile time checks.
17  *
18  * @return: Value with bit @__n set.
19  */
20 #define REG_BIT(__n)							\
21 	((u32)(BIT(__n) +						\
22 	       BUILD_BUG_ON_ZERO(__is_constexpr(__n) &&		\
23 				 ((__n) < 0 || (__n) > 31))))
24 
25 /**
26  * REG_GENMASK() - Prepare a continuous u32 bitmask
27  * @__high: 0-based high bit
28  * @__low: 0-based low bit
29  *
30  * Local wrapper for GENMASK() to force u32, with compile time checks.
31  *
32  * @return: Continuous bitmask from @__high to @__low, inclusive.
33  */
34 #define REG_GENMASK(__high, __low)					\
35 	((u32)(GENMASK(__high, __low) +					\
36 	       BUILD_BUG_ON_ZERO(__is_constexpr(__high) &&	\
37 				 __is_constexpr(__low) &&		\
38 				 ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
39 
40 /**
41  * REG_GENMASK64() - Prepare a continuous u64 bitmask
42  * @__high: 0-based high bit
43  * @__low: 0-based low bit
44  *
45  * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
46  *
47  * @return: Continuous bitmask from @__high to @__low, inclusive.
48  */
49 #define REG_GENMASK64(__high, __low)					\
50 	((u64)(GENMASK_ULL(__high, __low) +				\
51 	       BUILD_BUG_ON_ZERO(__is_constexpr(__high) &&		\
52 				 __is_constexpr(__low) &&		\
53 				 ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
54 
55 /*
56  * Local integer constant expression version of is_power_of_2().
57  */
58 #define IS_POWER_OF_2(__x)		((__x) && (((__x) & ((__x) - 1)) == 0))
59 
60 /**
61  * REG_FIELD_PREP() - Prepare a u32 bitfield value
62  * @__mask: shifted mask defining the field's length and position
63  * @__val: value to put in the field
64  *
65  * Local copy of FIELD_PREP() to generate an integer constant expression, force
66  * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
67  *
68  * @return: @__val masked and shifted into the field defined by @__mask.
69  */
70 #define REG_FIELD_PREP(__mask, __val)						\
71 	((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
72 	       BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) +		\
73 	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) +		\
74 	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
75 	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
76 
77 /**
78  * REG_FIELD_GET() - Extract a u32 bitfield value
79  * @__mask: shifted mask defining the field's length and position
80  * @__val: value to extract the bitfield value from
81  *
82  * Local wrapper for FIELD_GET() to force u32 and for consistency with
83  * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
84  *
85  * @return: Masked and shifted value of the field defined by @__mask in @__val.
86  */
87 #define REG_FIELD_GET(__mask, __val)	((u32)FIELD_GET(__mask, __val))
88 
89 /**
90  * REG_FIELD_GET64() - Extract a u64 bitfield value
91  * @__mask: shifted mask defining the field's length and position
92  * @__val: value to extract the bitfield value from
93  *
94  * Local wrapper for FIELD_GET() to force u64 and for consistency with
95  * REG_GENMASK64().
96  *
97  * @return: Masked and shifted value of the field defined by @__mask in @__val.
98  */
99 #define REG_FIELD_GET64(__mask, __val)	((u64)FIELD_GET(__mask, __val))
100 
101 #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
102 #define _MASKED_FIELD(mask, value) ({					   \
103 	if (__builtin_constant_p(mask))					   \
104 		BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
105 	if (__builtin_constant_p(value))				   \
106 		BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
107 	if (__builtin_constant_p(mask) && __builtin_constant_p(value))	   \
108 		BUILD_BUG_ON_MSG((value) & ~(mask),			   \
109 				 "Incorrect value for mask");		   \
110 	__MASKED_FIELD(mask, value); })
111 #define _MASKED_BIT_ENABLE(a)	({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
112 #define _MASKED_BIT_DISABLE(a)	(_MASKED_FIELD((a), 0))
113 
114 /*
115  * Given the first two numbers __a and __b of arbitrarily many evenly spaced
116  * numbers, pick the 0-based __index'th value.
117  *
118  * Always prefer this over _PICK() if the numbers are evenly spaced.
119  */
120 #define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
121 
122 /*
123  * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
124  * @__c_index corresponds to the index in which the second range starts to be
125  * used. Using math interval notation, the first range is used for indexes [ 0,
126  * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
127  *
128  * #define _FOO_A			0xf000
129  * #define _FOO_B			0xf004
130  * #define _FOO_C			0xf008
131  * #define _SUPER_FOO_A			0xa000
132  * #define _SUPER_FOO_B			0xa100
133  * #define FOO(x)			_MMIO(_PICK_EVEN_2RANGES(x, 3,		\
134  *					      _FOO_A, _FOO_B,			\
135  *					      _SUPER_FOO_A, _SUPER_FOO_B))
136  *
137  * This expands to:
138  *	0: 0xf000,
139  *	1: 0xf004,
140  *	2: 0xf008,
141  *	3: 0xa000,
142  *	4: 0xa100,
143  *	5: 0xa200,
144  *	...
145  */
146 #define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d)		\
147 	(BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) +			\
148 	 ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) :		\
149 				   _PICK_EVEN((__index) - (__c_index), __c, __d)))
150 
151 /*
152  * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
153  *
154  * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
155  */
156 #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
157 
158 typedef struct {
159 	u32 reg;
160 } i915_reg_t;
161 
162 #define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
163 
164 typedef struct {
165 	u32 reg;
166 } i915_mcr_reg_t;
167 
168 #define MCR_REG(offset)	((const i915_mcr_reg_t){ .reg = (offset) })
169 
170 #define INVALID_MMIO_REG _MMIO(0)
171 
172 /*
173  * These macros can be used on either i915_reg_t or i915_mcr_reg_t since they're
174  * simply operations on the register's offset and don't care about the MCR vs
175  * non-MCR nature of the register.
176  */
177 #define i915_mmio_reg_offset(r) \
178 	_Generic((r), i915_reg_t: (r).reg, i915_mcr_reg_t: (r).reg)
179 #define i915_mmio_reg_equal(a, b) (i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b))
180 #define i915_mmio_reg_valid(r) (!i915_mmio_reg_equal(r, INVALID_MMIO_REG))
181 
182 #endif /* __I915_REG_DEFS__ */
183