xref: /openbmc/linux/drivers/cxl/core/hdm.c (revision 1423885c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/seq_file.h>
4 #include <linux/device.h>
5 #include <linux/delay.h>
6 
7 #include "cxlmem.h"
8 #include "core.h"
9 
10 /**
11  * DOC: cxl core hdm
12  *
13  * Compute Express Link Host Managed Device Memory, starting with the
14  * CXL 2.0 specification, is managed by an array of HDM Decoder register
15  * instances per CXL port and per CXL endpoint. Define common helpers
16  * for enumerating these registers and capabilities.
17  */
18 
19 DECLARE_RWSEM(cxl_dpa_rwsem);
20 
21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22 			   int *target_map)
23 {
24 	int rc;
25 
26 	rc = cxl_decoder_add_locked(cxld, target_map);
27 	if (rc) {
28 		put_device(&cxld->dev);
29 		dev_err(&port->dev, "Failed to add decoder\n");
30 		return rc;
31 	}
32 
33 	rc = cxl_decoder_autoremove(&port->dev, cxld);
34 	if (rc)
35 		return rc;
36 
37 	dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38 
39 	return 0;
40 }
41 
42 /*
43  * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44  * single ported host-bridges need not publish a decoder capability when a
45  * passthrough decode can be assumed, i.e. all transactions that the uport sees
46  * are claimed and passed to the single dport. Disable the range until the first
47  * CXL region is enumerated / activated.
48  */
49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50 {
51 	struct cxl_switch_decoder *cxlsd;
52 	struct cxl_dport *dport = NULL;
53 	int single_port_map[1];
54 	unsigned long index;
55 
56 	cxlsd = cxl_switch_decoder_alloc(port, 1);
57 	if (IS_ERR(cxlsd))
58 		return PTR_ERR(cxlsd);
59 
60 	device_lock_assert(&port->dev);
61 
62 	xa_for_each(&port->dports, index, dport)
63 		break;
64 	single_port_map[0] = dport->port_id;
65 
66 	return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
67 }
68 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
69 
70 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
71 {
72 	u32 hdm_cap;
73 
74 	hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
75 	cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
76 	cxlhdm->target_count =
77 		FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
78 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
79 		cxlhdm->interleave_mask |= GENMASK(11, 8);
80 	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
81 		cxlhdm->interleave_mask |= GENMASK(14, 12);
82 }
83 
84 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
85 				struct cxl_component_regs *regs)
86 {
87 	struct cxl_register_map map = {
88 		.resource = port->component_reg_phys,
89 		.base = crb,
90 		.max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
91 	};
92 
93 	cxl_probe_component_regs(&port->dev, crb, &map.component_map);
94 	if (!map.component_map.hdm_decoder.valid) {
95 		dev_err(&port->dev, "HDM decoder registers invalid\n");
96 		return -ENXIO;
97 	}
98 
99 	return cxl_map_component_regs(&port->dev, regs, &map,
100 				      BIT(CXL_CM_CAP_CAP_ID_HDM));
101 }
102 
103 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
104 {
105 	struct cxl_hdm *cxlhdm;
106 	void __iomem *hdm;
107 	u32 ctrl;
108 	int i;
109 
110 	if (!info)
111 		return false;
112 
113 	cxlhdm = dev_get_drvdata(&info->port->dev);
114 	hdm = cxlhdm->regs.hdm_decoder;
115 
116 	if (!hdm)
117 		return true;
118 
119 	/*
120 	 * If HDM decoders are present and the driver is in control of
121 	 * Mem_Enable skip DVSEC based emulation
122 	 */
123 	if (!info->mem_enabled)
124 		return false;
125 
126 	/*
127 	 * If any decoders are committed already, there should not be any
128 	 * emulated DVSEC decoders.
129 	 */
130 	for (i = 0; i < cxlhdm->decoder_count; i++) {
131 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
132 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
133 			return false;
134 	}
135 
136 	return true;
137 }
138 
139 /**
140  * devm_cxl_setup_hdm - map HDM decoder component registers
141  * @port: cxl_port to map
142  * @info: cached DVSEC range register info
143  */
144 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
145 				   struct cxl_endpoint_dvsec_info *info)
146 {
147 	struct device *dev = &port->dev;
148 	struct cxl_hdm *cxlhdm;
149 	void __iomem *crb;
150 	int rc;
151 
152 	cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
153 	if (!cxlhdm)
154 		return ERR_PTR(-ENOMEM);
155 	cxlhdm->port = port;
156 	dev_set_drvdata(dev, cxlhdm);
157 
158 	crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
159 	if (!crb && info && info->mem_enabled) {
160 		cxlhdm->decoder_count = info->ranges;
161 		return cxlhdm;
162 	} else if (!crb) {
163 		dev_err(dev, "No component registers mapped\n");
164 		return ERR_PTR(-ENXIO);
165 	}
166 
167 	rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
168 	iounmap(crb);
169 	if (rc)
170 		return ERR_PTR(rc);
171 
172 	parse_hdm_decoder_caps(cxlhdm);
173 	if (cxlhdm->decoder_count == 0) {
174 		dev_err(dev, "Spec violation. Caps invalid\n");
175 		return ERR_PTR(-ENXIO);
176 	}
177 
178 	/*
179 	 * Now that the hdm capability is parsed, decide if range
180 	 * register emulation is needed and fixup cxlhdm accordingly.
181 	 */
182 	if (should_emulate_decoders(info)) {
183 		dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
184 			info->ranges > 1 ? "s" : "");
185 		cxlhdm->decoder_count = info->ranges;
186 	}
187 
188 	return cxlhdm;
189 }
190 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
191 
192 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
193 {
194 	unsigned long long start = r->start, end = r->end;
195 
196 	seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
197 		   r->name);
198 }
199 
200 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
201 {
202 	struct resource *p1, *p2;
203 
204 	down_read(&cxl_dpa_rwsem);
205 	for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
206 		__cxl_dpa_debug(file, p1, 0);
207 		for (p2 = p1->child; p2; p2 = p2->sibling)
208 			__cxl_dpa_debug(file, p2, 1);
209 	}
210 	up_read(&cxl_dpa_rwsem);
211 }
212 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
213 
214 /*
215  * Must be called in a context that synchronizes against this decoder's
216  * port ->remove() callback (like an endpoint decoder sysfs attribute)
217  */
218 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
219 {
220 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
221 	struct cxl_port *port = cxled_to_port(cxled);
222 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
223 	struct resource *res = cxled->dpa_res;
224 	resource_size_t skip_start;
225 
226 	lockdep_assert_held_write(&cxl_dpa_rwsem);
227 
228 	/* save @skip_start, before @res is released */
229 	skip_start = res->start - cxled->skip;
230 	__release_region(&cxlds->dpa_res, res->start, resource_size(res));
231 	if (cxled->skip)
232 		__release_region(&cxlds->dpa_res, skip_start, cxled->skip);
233 	cxled->skip = 0;
234 	cxled->dpa_res = NULL;
235 	put_device(&cxled->cxld.dev);
236 	port->hdm_end--;
237 }
238 
239 static void cxl_dpa_release(void *cxled)
240 {
241 	down_write(&cxl_dpa_rwsem);
242 	__cxl_dpa_release(cxled);
243 	up_write(&cxl_dpa_rwsem);
244 }
245 
246 /*
247  * Must be called from context that will not race port device
248  * unregistration, like decoder sysfs attribute methods
249  */
250 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
251 {
252 	struct cxl_port *port = cxled_to_port(cxled);
253 
254 	lockdep_assert_held_write(&cxl_dpa_rwsem);
255 	devm_remove_action(&port->dev, cxl_dpa_release, cxled);
256 	__cxl_dpa_release(cxled);
257 }
258 
259 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
260 			     resource_size_t base, resource_size_t len,
261 			     resource_size_t skipped)
262 {
263 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
264 	struct cxl_port *port = cxled_to_port(cxled);
265 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
266 	struct device *dev = &port->dev;
267 	struct resource *res;
268 
269 	lockdep_assert_held_write(&cxl_dpa_rwsem);
270 
271 	if (!len) {
272 		dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
273 			 port->id, cxled->cxld.id);
274 		return -EINVAL;
275 	}
276 
277 	if (cxled->dpa_res) {
278 		dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
279 			port->id, cxled->cxld.id, cxled->dpa_res);
280 		return -EBUSY;
281 	}
282 
283 	if (port->hdm_end + 1 != cxled->cxld.id) {
284 		/*
285 		 * Assumes alloc and commit order is always in hardware instance
286 		 * order per expectations from 8.2.5.12.20 Committing Decoder
287 		 * Programming that enforce decoder[m] committed before
288 		 * decoder[m+1] commit start.
289 		 */
290 		dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
291 			cxled->cxld.id, port->id, port->hdm_end + 1);
292 		return -EBUSY;
293 	}
294 
295 	if (skipped) {
296 		res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
297 				       dev_name(&cxled->cxld.dev), 0);
298 		if (!res) {
299 			dev_dbg(dev,
300 				"decoder%d.%d: failed to reserve skipped space\n",
301 				port->id, cxled->cxld.id);
302 			return -EBUSY;
303 		}
304 	}
305 	res = __request_region(&cxlds->dpa_res, base, len,
306 			       dev_name(&cxled->cxld.dev), 0);
307 	if (!res) {
308 		dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
309 			port->id, cxled->cxld.id);
310 		if (skipped)
311 			__release_region(&cxlds->dpa_res, base - skipped,
312 					 skipped);
313 		return -EBUSY;
314 	}
315 	cxled->dpa_res = res;
316 	cxled->skip = skipped;
317 
318 	if (resource_contains(&cxlds->pmem_res, res))
319 		cxled->mode = CXL_DECODER_PMEM;
320 	else if (resource_contains(&cxlds->ram_res, res))
321 		cxled->mode = CXL_DECODER_RAM;
322 	else {
323 		dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
324 			cxled->cxld.id, cxled->dpa_res);
325 		cxled->mode = CXL_DECODER_MIXED;
326 	}
327 
328 	port->hdm_end++;
329 	get_device(&cxled->cxld.dev);
330 	return 0;
331 }
332 
333 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
334 				resource_size_t base, resource_size_t len,
335 				resource_size_t skipped)
336 {
337 	struct cxl_port *port = cxled_to_port(cxled);
338 	int rc;
339 
340 	down_write(&cxl_dpa_rwsem);
341 	rc = __cxl_dpa_reserve(cxled, base, len, skipped);
342 	up_write(&cxl_dpa_rwsem);
343 
344 	if (rc)
345 		return rc;
346 
347 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
348 }
349 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
350 
351 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
352 {
353 	resource_size_t size = 0;
354 
355 	down_read(&cxl_dpa_rwsem);
356 	if (cxled->dpa_res)
357 		size = resource_size(cxled->dpa_res);
358 	up_read(&cxl_dpa_rwsem);
359 
360 	return size;
361 }
362 
363 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
364 {
365 	resource_size_t base = -1;
366 
367 	down_read(&cxl_dpa_rwsem);
368 	if (cxled->dpa_res)
369 		base = cxled->dpa_res->start;
370 	up_read(&cxl_dpa_rwsem);
371 
372 	return base;
373 }
374 
375 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
376 {
377 	struct cxl_port *port = cxled_to_port(cxled);
378 	struct device *dev = &cxled->cxld.dev;
379 	int rc;
380 
381 	down_write(&cxl_dpa_rwsem);
382 	if (!cxled->dpa_res) {
383 		rc = 0;
384 		goto out;
385 	}
386 	if (cxled->cxld.region) {
387 		dev_dbg(dev, "decoder assigned to: %s\n",
388 			dev_name(&cxled->cxld.region->dev));
389 		rc = -EBUSY;
390 		goto out;
391 	}
392 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
393 		dev_dbg(dev, "decoder enabled\n");
394 		rc = -EBUSY;
395 		goto out;
396 	}
397 	if (cxled->cxld.id != port->hdm_end) {
398 		dev_dbg(dev, "expected decoder%d.%d\n", port->id,
399 			port->hdm_end);
400 		rc = -EBUSY;
401 		goto out;
402 	}
403 	devm_cxl_dpa_release(cxled);
404 	rc = 0;
405 out:
406 	up_write(&cxl_dpa_rwsem);
407 	return rc;
408 }
409 
410 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
411 		     enum cxl_decoder_mode mode)
412 {
413 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
414 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
415 	struct device *dev = &cxled->cxld.dev;
416 	int rc;
417 
418 	switch (mode) {
419 	case CXL_DECODER_RAM:
420 	case CXL_DECODER_PMEM:
421 		break;
422 	default:
423 		dev_dbg(dev, "unsupported mode: %d\n", mode);
424 		return -EINVAL;
425 	}
426 
427 	down_write(&cxl_dpa_rwsem);
428 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
429 		rc = -EBUSY;
430 		goto out;
431 	}
432 
433 	/*
434 	 * Only allow modes that are supported by the current partition
435 	 * configuration
436 	 */
437 	if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
438 		dev_dbg(dev, "no available pmem capacity\n");
439 		rc = -ENXIO;
440 		goto out;
441 	}
442 	if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
443 		dev_dbg(dev, "no available ram capacity\n");
444 		rc = -ENXIO;
445 		goto out;
446 	}
447 
448 	cxled->mode = mode;
449 	rc = 0;
450 out:
451 	up_write(&cxl_dpa_rwsem);
452 
453 	return rc;
454 }
455 
456 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
457 {
458 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
459 	resource_size_t free_ram_start, free_pmem_start;
460 	struct cxl_port *port = cxled_to_port(cxled);
461 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
462 	struct device *dev = &cxled->cxld.dev;
463 	resource_size_t start, avail, skip;
464 	struct resource *p, *last;
465 	int rc;
466 
467 	down_write(&cxl_dpa_rwsem);
468 	if (cxled->cxld.region) {
469 		dev_dbg(dev, "decoder attached to %s\n",
470 			dev_name(&cxled->cxld.region->dev));
471 		rc = -EBUSY;
472 		goto out;
473 	}
474 
475 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
476 		dev_dbg(dev, "decoder enabled\n");
477 		rc = -EBUSY;
478 		goto out;
479 	}
480 
481 	for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
482 		last = p;
483 	if (last)
484 		free_ram_start = last->end + 1;
485 	else
486 		free_ram_start = cxlds->ram_res.start;
487 
488 	for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
489 		last = p;
490 	if (last)
491 		free_pmem_start = last->end + 1;
492 	else
493 		free_pmem_start = cxlds->pmem_res.start;
494 
495 	if (cxled->mode == CXL_DECODER_RAM) {
496 		start = free_ram_start;
497 		avail = cxlds->ram_res.end - start + 1;
498 		skip = 0;
499 	} else if (cxled->mode == CXL_DECODER_PMEM) {
500 		resource_size_t skip_start, skip_end;
501 
502 		start = free_pmem_start;
503 		avail = cxlds->pmem_res.end - start + 1;
504 		skip_start = free_ram_start;
505 
506 		/*
507 		 * If some pmem is already allocated, then that allocation
508 		 * already handled the skip.
509 		 */
510 		if (cxlds->pmem_res.child &&
511 		    skip_start == cxlds->pmem_res.child->start)
512 			skip_end = skip_start - 1;
513 		else
514 			skip_end = start - 1;
515 		skip = skip_end - skip_start + 1;
516 	} else {
517 		dev_dbg(dev, "mode not set\n");
518 		rc = -EINVAL;
519 		goto out;
520 	}
521 
522 	if (size > avail) {
523 		dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
524 			cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
525 			&avail);
526 		rc = -ENOSPC;
527 		goto out;
528 	}
529 
530 	rc = __cxl_dpa_reserve(cxled, start, size, skip);
531 out:
532 	up_write(&cxl_dpa_rwsem);
533 
534 	if (rc)
535 		return rc;
536 
537 	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
538 }
539 
540 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
541 {
542 	u16 eig;
543 	u8 eiw;
544 
545 	/*
546 	 * Input validation ensures these warns never fire, but otherwise
547 	 * suppress unititalized variable usage warnings.
548 	 */
549 	if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
550 		      "invalid interleave_ways: %d\n", cxld->interleave_ways))
551 		return;
552 	if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
553 		      "invalid interleave_granularity: %d\n",
554 		      cxld->interleave_granularity))
555 		return;
556 
557 	u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
558 	u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
559 	*ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
560 }
561 
562 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
563 {
564 	u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
565 			  CXL_HDM_DECODER0_CTRL_TYPE);
566 }
567 
568 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
569 {
570 	struct cxl_dport **t = &cxlsd->target[0];
571 	int ways = cxlsd->cxld.interleave_ways;
572 
573 	if (dev_WARN_ONCE(&cxlsd->cxld.dev,
574 			  ways > 8 || ways > cxlsd->nr_targets,
575 			  "ways: %d overflows targets: %d\n", ways,
576 			  cxlsd->nr_targets))
577 		return -ENXIO;
578 
579 	*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
580 	if (ways > 1)
581 		*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
582 	if (ways > 2)
583 		*tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
584 	if (ways > 3)
585 		*tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
586 	if (ways > 4)
587 		*tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
588 	if (ways > 5)
589 		*tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
590 	if (ways > 6)
591 		*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
592 	if (ways > 7)
593 		*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
594 
595 	return 0;
596 }
597 
598 /*
599  * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
600  * committed or error within 10ms, but just be generous with 20ms to account for
601  * clock skew and other marginal behavior
602  */
603 #define COMMIT_TIMEOUT_MS 20
604 static int cxld_await_commit(void __iomem *hdm, int id)
605 {
606 	u32 ctrl;
607 	int i;
608 
609 	for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
610 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
611 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
612 			ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
613 			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
614 			return -EIO;
615 		}
616 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
617 			return 0;
618 		fsleep(1000);
619 	}
620 
621 	return -ETIMEDOUT;
622 }
623 
624 static int cxl_decoder_commit(struct cxl_decoder *cxld)
625 {
626 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
627 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
628 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
629 	int id = cxld->id, rc;
630 	u64 base, size;
631 	u32 ctrl;
632 
633 	if (cxld->flags & CXL_DECODER_F_ENABLE)
634 		return 0;
635 
636 	if (port->commit_end + 1 != id) {
637 		dev_dbg(&port->dev,
638 			"%s: out of order commit, expected decoder%d.%d\n",
639 			dev_name(&cxld->dev), port->id, port->commit_end + 1);
640 		return -EBUSY;
641 	}
642 
643 	down_read(&cxl_dpa_rwsem);
644 	/* common decoder settings */
645 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
646 	cxld_set_interleave(cxld, &ctrl);
647 	cxld_set_type(cxld, &ctrl);
648 	base = cxld->hpa_range.start;
649 	size = range_len(&cxld->hpa_range);
650 
651 	writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
652 	writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
653 	writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
654 	writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
655 
656 	if (is_switch_decoder(&cxld->dev)) {
657 		struct cxl_switch_decoder *cxlsd =
658 			to_cxl_switch_decoder(&cxld->dev);
659 		void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
660 		void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
661 		u64 targets;
662 
663 		rc = cxlsd_set_targets(cxlsd, &targets);
664 		if (rc) {
665 			dev_dbg(&port->dev, "%s: target configuration error\n",
666 				dev_name(&cxld->dev));
667 			goto err;
668 		}
669 
670 		writel(upper_32_bits(targets), tl_hi);
671 		writel(lower_32_bits(targets), tl_lo);
672 	} else {
673 		struct cxl_endpoint_decoder *cxled =
674 			to_cxl_endpoint_decoder(&cxld->dev);
675 		void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
676 		void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
677 
678 		writel(upper_32_bits(cxled->skip), sk_hi);
679 		writel(lower_32_bits(cxled->skip), sk_lo);
680 	}
681 
682 	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
683 	up_read(&cxl_dpa_rwsem);
684 
685 	port->commit_end++;
686 	rc = cxld_await_commit(hdm, cxld->id);
687 err:
688 	if (rc) {
689 		dev_dbg(&port->dev, "%s: error %d committing decoder\n",
690 			dev_name(&cxld->dev), rc);
691 		cxld->reset(cxld);
692 		return rc;
693 	}
694 	cxld->flags |= CXL_DECODER_F_ENABLE;
695 
696 	return 0;
697 }
698 
699 static int cxl_decoder_reset(struct cxl_decoder *cxld)
700 {
701 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
702 	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
703 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
704 	int id = cxld->id;
705 	u32 ctrl;
706 
707 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
708 		return 0;
709 
710 	if (port->commit_end != id) {
711 		dev_dbg(&port->dev,
712 			"%s: out of order reset, expected decoder%d.%d\n",
713 			dev_name(&cxld->dev), port->id, port->commit_end);
714 		return -EBUSY;
715 	}
716 
717 	down_read(&cxl_dpa_rwsem);
718 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
719 	ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
720 	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
721 
722 	writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
723 	writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
724 	writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
725 	writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
726 	up_read(&cxl_dpa_rwsem);
727 
728 	port->commit_end--;
729 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
730 
731 	/* Userspace is now responsible for reconfiguring this decoder */
732 	if (is_endpoint_decoder(&cxld->dev)) {
733 		struct cxl_endpoint_decoder *cxled;
734 
735 		cxled = to_cxl_endpoint_decoder(&cxld->dev);
736 		cxled->state = CXL_DECODER_STATE_MANUAL;
737 	}
738 
739 	return 0;
740 }
741 
742 static int cxl_setup_hdm_decoder_from_dvsec(
743 	struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
744 	int which, struct cxl_endpoint_dvsec_info *info)
745 {
746 	struct cxl_endpoint_decoder *cxled;
747 	u64 len;
748 	int rc;
749 
750 	if (!is_cxl_endpoint(port))
751 		return -EOPNOTSUPP;
752 
753 	cxled = to_cxl_endpoint_decoder(&cxld->dev);
754 	len = range_len(&info->dvsec_range[which]);
755 	if (!len)
756 		return -ENOENT;
757 
758 	cxld->target_type = CXL_DECODER_EXPANDER;
759 	cxld->commit = NULL;
760 	cxld->reset = NULL;
761 	cxld->hpa_range = info->dvsec_range[which];
762 
763 	/*
764 	 * Set the emulated decoder as locked pending additional support to
765 	 * change the range registers at run time.
766 	 */
767 	cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
768 	port->commit_end = cxld->id;
769 
770 	rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
771 	if (rc) {
772 		dev_err(&port->dev,
773 			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
774 			port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
775 		return rc;
776 	}
777 	*dpa_base += len;
778 	cxled->state = CXL_DECODER_STATE_AUTO;
779 
780 	return 0;
781 }
782 
783 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
784 			    int *target_map, void __iomem *hdm, int which,
785 			    u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
786 {
787 	u64 size, base, skip, dpa_size, lo, hi;
788 	struct cxl_endpoint_decoder *cxled;
789 	bool committed;
790 	u32 remainder;
791 	int i, rc;
792 	u32 ctrl;
793 	union {
794 		u64 value;
795 		unsigned char target_id[8];
796 	} target_list;
797 
798 	if (should_emulate_decoders(info))
799 		return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
800 							which, info);
801 
802 	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
803 	lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
804 	hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
805 	base = (hi << 32) + lo;
806 	lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
807 	hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
808 	size = (hi << 32) + lo;
809 	committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
810 	cxld->commit = cxl_decoder_commit;
811 	cxld->reset = cxl_decoder_reset;
812 
813 	if (!committed)
814 		size = 0;
815 	if (base == U64_MAX || size == U64_MAX) {
816 		dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
817 			 port->id, cxld->id);
818 		return -ENXIO;
819 	}
820 
821 	cxld->hpa_range = (struct range) {
822 		.start = base,
823 		.end = base + size - 1,
824 	};
825 
826 	/* decoders are enabled if committed */
827 	if (committed) {
828 		cxld->flags |= CXL_DECODER_F_ENABLE;
829 		if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
830 			cxld->flags |= CXL_DECODER_F_LOCK;
831 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
832 			cxld->target_type = CXL_DECODER_EXPANDER;
833 		else
834 			cxld->target_type = CXL_DECODER_ACCELERATOR;
835 		if (cxld->id != port->commit_end + 1) {
836 			dev_warn(&port->dev,
837 				 "decoder%d.%d: Committed out of order\n",
838 				 port->id, cxld->id);
839 			return -ENXIO;
840 		}
841 
842 		if (size == 0) {
843 			dev_warn(&port->dev,
844 				 "decoder%d.%d: Committed with zero size\n",
845 				 port->id, cxld->id);
846 			return -ENXIO;
847 		}
848 		port->commit_end = cxld->id;
849 	} else {
850 		/* unless / until type-2 drivers arrive, assume type-3 */
851 		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
852 			ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
853 			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
854 		}
855 		cxld->target_type = CXL_DECODER_EXPANDER;
856 	}
857 	rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
858 			  &cxld->interleave_ways);
859 	if (rc) {
860 		dev_warn(&port->dev,
861 			 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
862 			 port->id, cxld->id, ctrl);
863 		return rc;
864 	}
865 	rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
866 				 &cxld->interleave_granularity);
867 	if (rc)
868 		return rc;
869 
870 	if (!info) {
871 		lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
872 		hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
873 		target_list.value = (hi << 32) + lo;
874 		for (i = 0; i < cxld->interleave_ways; i++)
875 			target_map[i] = target_list.target_id[i];
876 
877 		return 0;
878 	}
879 
880 	if (!committed)
881 		return 0;
882 
883 	dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
884 	if (remainder) {
885 		dev_err(&port->dev,
886 			"decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
887 			port->id, cxld->id, size, cxld->interleave_ways);
888 		return -ENXIO;
889 	}
890 	lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
891 	hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
892 	skip = (hi << 32) + lo;
893 	cxled = to_cxl_endpoint_decoder(&cxld->dev);
894 	rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
895 	if (rc) {
896 		dev_err(&port->dev,
897 			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
898 			port->id, cxld->id, *dpa_base,
899 			*dpa_base + dpa_size + skip - 1, rc);
900 		return rc;
901 	}
902 	*dpa_base += dpa_size + skip;
903 
904 	cxled->state = CXL_DECODER_STATE_AUTO;
905 
906 	return 0;
907 }
908 
909 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
910 {
911 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
912 	int committed, i;
913 	u32 ctrl;
914 
915 	if (!hdm)
916 		return;
917 
918 	/*
919 	 * Since the register resource was recently claimed via request_region()
920 	 * be careful about trusting the "not-committed" status until the commit
921 	 * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
922 	 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
923 	 * host and target.
924 	 */
925 	for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
926 		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
927 		if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
928 			committed++;
929 	}
930 
931 	/* ensure that future checks of committed can be trusted */
932 	if (committed != cxlhdm->decoder_count)
933 		msleep(20);
934 }
935 
936 /**
937  * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
938  * @cxlhdm: Structure to populate with HDM capabilities
939  * @info: cached DVSEC range register info
940  */
941 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
942 				struct cxl_endpoint_dvsec_info *info)
943 {
944 	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
945 	struct cxl_port *port = cxlhdm->port;
946 	int i;
947 	u64 dpa_base = 0;
948 
949 	cxl_settle_decoders(cxlhdm);
950 
951 	for (i = 0; i < cxlhdm->decoder_count; i++) {
952 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
953 		int rc, target_count = cxlhdm->target_count;
954 		struct cxl_decoder *cxld;
955 
956 		if (is_cxl_endpoint(port)) {
957 			struct cxl_endpoint_decoder *cxled;
958 
959 			cxled = cxl_endpoint_decoder_alloc(port);
960 			if (IS_ERR(cxled)) {
961 				dev_warn(&port->dev,
962 					 "Failed to allocate decoder%d.%d\n",
963 					 port->id, i);
964 				return PTR_ERR(cxled);
965 			}
966 			cxld = &cxled->cxld;
967 		} else {
968 			struct cxl_switch_decoder *cxlsd;
969 
970 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
971 			if (IS_ERR(cxlsd)) {
972 				dev_warn(&port->dev,
973 					 "Failed to allocate decoder%d.%d\n",
974 					 port->id, i);
975 				return PTR_ERR(cxlsd);
976 			}
977 			cxld = &cxlsd->cxld;
978 		}
979 
980 		rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
981 				      &dpa_base, info);
982 		if (rc) {
983 			dev_warn(&port->dev,
984 				 "Failed to initialize decoder%d.%d\n",
985 				 port->id, i);
986 			put_device(&cxld->dev);
987 			return rc;
988 		}
989 		rc = add_hdm_decoder(port, cxld, target_map);
990 		if (rc) {
991 			dev_warn(&port->dev,
992 				 "Failed to add decoder%d.%d\n", port->id, i);
993 			return rc;
994 		}
995 	}
996 
997 	return 0;
998 }
999 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
1000