xref: /openbmc/linux/drivers/cxl/cxl.h (revision 92c005a1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #ifndef __CXL_H__
5 #define __CXL_H__
6 
7 #include <linux/libnvdimm.h>
8 #include <linux/bitfield.h>
9 #include <linux/bitops.h>
10 #include <linux/io.h>
11 
12 /**
13  * DOC: cxl objects
14  *
15  * The CXL core objects like ports, decoders, and regions are shared
16  * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
17  * (port-driver, region-driver, nvdimm object-drivers... etc).
18  */
19 
20 /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */
21 #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K
22 
23 /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
24 #define CXL_CM_OFFSET 0x1000
25 #define CXL_CM_CAP_HDR_OFFSET 0x0
26 #define   CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
27 #define     CM_CAP_HDR_CAP_ID 1
28 #define   CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
29 #define     CM_CAP_HDR_CAP_VERSION 1
30 #define   CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
31 #define     CM_CAP_HDR_CACHE_MEM_VERSION 1
32 #define   CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
33 #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
34 
35 #define   CXL_CM_CAP_CAP_ID_HDM 0x5
36 #define   CXL_CM_CAP_CAP_HDM_VERSION 1
37 
38 /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
39 #define CXL_HDM_DECODER_CAP_OFFSET 0x0
40 #define   CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
41 #define   CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
42 #define   CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
43 #define   CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
44 #define CXL_HDM_DECODER_CTRL_OFFSET 0x4
45 #define   CXL_HDM_DECODER_ENABLE BIT(1)
46 #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
47 #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14)
48 #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18)
49 #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c)
50 #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20)
51 #define   CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0)
52 #define   CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4)
53 #define   CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
54 #define   CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
55 #define   CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
56 #define   CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
57 #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
58 #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
59 
60 static inline int cxl_hdm_decoder_count(u32 cap_hdr)
61 {
62 	int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
63 
64 	return val ? val * 2 : 1;
65 }
66 
67 /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
68 #define CXLDEV_CAP_ARRAY_OFFSET 0x0
69 #define   CXLDEV_CAP_ARRAY_CAP_ID 0
70 #define   CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
71 #define   CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
72 /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
73 #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
74 /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
75 #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
76 #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
77 #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
78 #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
79 
80 /* CXL 2.0 8.2.8.4 Mailbox Registers */
81 #define CXLDEV_MBOX_CAPS_OFFSET 0x00
82 #define   CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
83 #define CXLDEV_MBOX_CTRL_OFFSET 0x04
84 #define   CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
85 #define CXLDEV_MBOX_CMD_OFFSET 0x08
86 #define   CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
87 #define   CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
88 #define CXLDEV_MBOX_STATUS_OFFSET 0x10
89 #define   CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
90 #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
91 #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
92 
93 /*
94  * Using struct_group() allows for per register-block-type helper routines,
95  * without requiring block-type agnostic code to include the prefix.
96  */
97 struct cxl_regs {
98 	/*
99 	 * Common set of CXL Component register block base pointers
100 	 * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
101 	 */
102 	struct_group_tagged(cxl_component_regs, component,
103 		void __iomem *hdm_decoder;
104 	);
105 	/*
106 	 * Common set of CXL Device register block base pointers
107 	 * @status: CXL 2.0 8.2.8.3 Device Status Registers
108 	 * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
109 	 * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
110 	 */
111 	struct_group_tagged(cxl_device_regs, device_regs,
112 		void __iomem *status, *mbox, *memdev;
113 	);
114 };
115 
116 struct cxl_reg_map {
117 	bool valid;
118 	unsigned long offset;
119 	unsigned long size;
120 };
121 
122 struct cxl_component_reg_map {
123 	struct cxl_reg_map hdm_decoder;
124 };
125 
126 struct cxl_device_reg_map {
127 	struct cxl_reg_map status;
128 	struct cxl_reg_map mbox;
129 	struct cxl_reg_map memdev;
130 };
131 
132 /**
133  * struct cxl_register_map - DVSEC harvested register block mapping parameters
134  * @base: virtual base of the register-block-BAR + @block_offset
135  * @block_offset: offset to start of register block in @barno
136  * @reg_type: see enum cxl_regloc_type
137  * @barno: PCI BAR number containing the register block
138  * @component_map: cxl_reg_map for component registers
139  * @device_map: cxl_reg_maps for device registers
140  */
141 struct cxl_register_map {
142 	void __iomem *base;
143 	u64 block_offset;
144 	u8 reg_type;
145 	u8 barno;
146 	union {
147 		struct cxl_component_reg_map component_map;
148 		struct cxl_device_reg_map device_map;
149 	};
150 };
151 
152 void cxl_probe_component_regs(struct device *dev, void __iomem *base,
153 			      struct cxl_component_reg_map *map);
154 void cxl_probe_device_regs(struct device *dev, void __iomem *base,
155 			   struct cxl_device_reg_map *map);
156 int cxl_map_component_regs(struct pci_dev *pdev,
157 			   struct cxl_component_regs *regs,
158 			   struct cxl_register_map *map);
159 int cxl_map_device_regs(struct pci_dev *pdev,
160 			struct cxl_device_regs *regs,
161 			struct cxl_register_map *map);
162 
163 enum cxl_regloc_type;
164 int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
165 		      struct cxl_register_map *map);
166 void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
167 				   resource_size_t length);
168 
169 #define CXL_RESOURCE_NONE ((resource_size_t) -1)
170 #define CXL_TARGET_STRLEN 20
171 
172 /*
173  * cxl_decoder flags that define the type of memory / devices this
174  * decoder supports as well as configuration lock status See "CXL 2.0
175  * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
176  */
177 #define CXL_DECODER_F_RAM   BIT(0)
178 #define CXL_DECODER_F_PMEM  BIT(1)
179 #define CXL_DECODER_F_TYPE2 BIT(2)
180 #define CXL_DECODER_F_TYPE3 BIT(3)
181 #define CXL_DECODER_F_LOCK  BIT(4)
182 #define CXL_DECODER_F_ENABLE    BIT(5)
183 #define CXL_DECODER_F_MASK  GENMASK(5, 0)
184 
185 enum cxl_decoder_type {
186        CXL_DECODER_ACCELERATOR = 2,
187        CXL_DECODER_EXPANDER = 3,
188 };
189 
190 /*
191  * Current specification goes up to 8, double that seems a reasonable
192  * software max for the foreseeable future
193  */
194 #define CXL_DECODER_MAX_INTERLEAVE 16
195 
196 /**
197  * struct cxl_decoder - CXL address range decode configuration
198  * @dev: this decoder's device
199  * @id: kernel device name id
200  * @platform_res: address space resources considered by root decoder
201  * @decoder_range: address space resources considered by midlevel decoder
202  * @interleave_ways: number of cxl_dports in this decode
203  * @interleave_granularity: data stride per dport
204  * @target_type: accelerator vs expander (type2 vs type3) selector
205  * @flags: memory type capabilities and locking
206  * @target_lock: coordinate coherent reads of the target list
207  * @nr_targets: number of elements in @target
208  * @target: active ordered target list in current decoder configuration
209  */
210 struct cxl_decoder {
211 	struct device dev;
212 	int id;
213 	union {
214 		struct resource platform_res;
215 		struct range decoder_range;
216 	};
217 	int interleave_ways;
218 	int interleave_granularity;
219 	enum cxl_decoder_type target_type;
220 	unsigned long flags;
221 	seqlock_t target_lock;
222 	int nr_targets;
223 	struct cxl_dport *target[];
224 };
225 
226 
227 /**
228  * enum cxl_nvdimm_brige_state - state machine for managing bus rescans
229  * @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
230  * @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
231  * @CXL_NVB_ONLINE: Target state after successful ->probe()
232  * @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
233  */
234 enum cxl_nvdimm_brige_state {
235 	CXL_NVB_NEW,
236 	CXL_NVB_DEAD,
237 	CXL_NVB_ONLINE,
238 	CXL_NVB_OFFLINE,
239 };
240 
241 struct cxl_nvdimm_bridge {
242 	int id;
243 	struct device dev;
244 	struct cxl_port *port;
245 	struct nvdimm_bus *nvdimm_bus;
246 	struct nvdimm_bus_descriptor nd_desc;
247 	struct work_struct state_work;
248 	enum cxl_nvdimm_brige_state state;
249 };
250 
251 struct cxl_nvdimm {
252 	struct device dev;
253 	struct cxl_memdev *cxlmd;
254 	struct nvdimm *nvdimm;
255 };
256 
257 /**
258  * struct cxl_port - logical collection of upstream port devices and
259  *		     downstream port devices to construct a CXL memory
260  *		     decode hierarchy.
261  * @dev: this port's device
262  * @uport: PCI or platform device implementing the upstream port capability
263  * @id: id for port device-name
264  * @dports: cxl_dport instances referenced by decoders
265  * @endpoints: cxl_ep instances, endpoints that are a descendant of this port
266  * @decoder_ida: allocator for decoder ids
267  * @component_reg_phys: component register capability base address (optional)
268  * @dead: last ep has been removed, force port re-creation
269  * @depth: How deep this port is relative to the root. depth 0 is the root.
270  */
271 struct cxl_port {
272 	struct device dev;
273 	struct device *uport;
274 	int id;
275 	struct list_head dports;
276 	struct list_head endpoints;
277 	struct ida decoder_ida;
278 	resource_size_t component_reg_phys;
279 	bool dead;
280 	unsigned int depth;
281 };
282 
283 /**
284  * struct cxl_dport - CXL downstream port
285  * @dport: PCI bridge or firmware device representing the downstream link
286  * @port_id: unique hardware identifier for dport in decoder target list
287  * @component_reg_phys: downstream port component registers
288  * @port: reference to cxl_port that contains this downstream port
289  * @list: node for a cxl_port's list of cxl_dport instances
290  */
291 struct cxl_dport {
292 	struct device *dport;
293 	int port_id;
294 	resource_size_t component_reg_phys;
295 	struct cxl_port *port;
296 	struct list_head list;
297 };
298 
299 /**
300  * struct cxl_ep - track an endpoint's interest in a port
301  * @ep: device that hosts a generic CXL endpoint (expander or accelerator)
302  * @list: node on port->endpoints list
303  */
304 struct cxl_ep {
305 	struct device *ep;
306 	struct list_head list;
307 };
308 
309 /*
310  * The platform firmware device hosting the root is also the top of the
311  * CXL port topology. All other CXL ports have another CXL port as their
312  * parent and their ->uport / host device is out-of-line of the port
313  * ancestry.
314  */
315 static inline bool is_cxl_root(struct cxl_port *port)
316 {
317 	return port->uport == port->dev.parent;
318 }
319 
320 bool is_cxl_port(struct device *dev);
321 struct cxl_port *to_cxl_port(struct device *dev);
322 struct pci_bus;
323 int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
324 			      struct pci_bus *bus);
325 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
326 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
327 				   resource_size_t component_reg_phys,
328 				   struct cxl_port *parent_port);
329 struct cxl_port *find_cxl_root(struct device *dev);
330 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
331 int cxl_bus_rescan(void);
332 struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd);
333 bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
334 
335 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
336 				     struct device *dport, int port_id,
337 				     resource_size_t component_reg_phys);
338 struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
339 					const struct device *dev);
340 
341 struct cxl_decoder *to_cxl_decoder(struct device *dev);
342 bool is_root_decoder(struct device *dev);
343 bool is_endpoint_decoder(struct device *dev);
344 bool is_cxl_decoder(struct device *dev);
345 struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
346 					   unsigned int nr_targets);
347 struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
348 					     unsigned int nr_targets);
349 int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
350 struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
351 int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
352 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
353 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
354 
355 struct cxl_hdm;
356 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
357 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
358 int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
359 
360 extern struct bus_type cxl_bus_type;
361 
362 struct cxl_driver {
363 	const char *name;
364 	int (*probe)(struct device *dev);
365 	void (*remove)(struct device *dev);
366 	struct device_driver drv;
367 	int id;
368 };
369 
370 static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
371 {
372 	return container_of(drv, struct cxl_driver, drv);
373 }
374 
375 int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
376 			  const char *modname);
377 #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
378 void cxl_driver_unregister(struct cxl_driver *cxl_drv);
379 
380 #define module_cxl_driver(__cxl_driver) \
381 	module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister)
382 
383 #define CXL_DEVICE_NVDIMM_BRIDGE	1
384 #define CXL_DEVICE_NVDIMM		2
385 #define CXL_DEVICE_PORT			3
386 #define CXL_DEVICE_ROOT			4
387 #define CXL_DEVICE_MEMORY_EXPANDER	5
388 
389 #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
390 #define CXL_MODALIAS_FMT "cxl:t%d"
391 
392 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
393 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
394 						     struct cxl_port *port);
395 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
396 bool is_cxl_nvdimm(struct device *dev);
397 bool is_cxl_nvdimm_bridge(struct device *dev);
398 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
399 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
400 
401 /*
402  * Unit test builds overrides this to __weak, find the 'strong' version
403  * of these symbols in tools/testing/cxl/.
404  */
405 #ifndef __mock
406 #define __mock static
407 #endif
408 
409 #endif /* __CXL_H__ */
410