xref: /openbmc/linux/drivers/iommu/apple-dart.c (revision 0251d010)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Apple DART (Device Address Resolution Table) IOMMU driver
4  *
5  * Copyright (C) 2021 The Asahi Linux Contributors
6  *
7  * Based on arm/arm-smmu/arm-ssmu.c and arm/arm-smmu-v3/arm-smmu-v3.c
8  *  Copyright (C) 2013 ARM Limited
9  *  Copyright (C) 2015 ARM Limited
10  * and on exynos-iommu.c
11  *  Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/dev_printk.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io-pgtable.h>
22 #include <linux/iommu.h>
23 #include <linux/iopoll.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_iommu.h>
28 #include <linux/of_platform.h>
29 #include <linux/pci.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
32 #include <linux/swab.h>
33 #include <linux/types.h>
34 
35 #include "dma-iommu.h"
36 
37 #define DART_MAX_STREAMS 16
38 #define DART_MAX_TTBR 4
39 #define MAX_DARTS_PER_DEVICE 2
40 
41 #define DART_STREAM_ALL 0xffff
42 
43 #define DART_PARAMS1 0x00
44 #define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
45 
46 #define DART_PARAMS2 0x04
47 #define DART_PARAMS_BYPASS_SUPPORT BIT(0)
48 
49 #define DART_STREAM_COMMAND 0x20
50 #define DART_STREAM_COMMAND_BUSY BIT(2)
51 #define DART_STREAM_COMMAND_INVALIDATE BIT(20)
52 
53 #define DART_STREAM_SELECT 0x34
54 
55 #define DART_ERROR 0x40
56 #define DART_ERROR_STREAM GENMASK(27, 24)
57 #define DART_ERROR_CODE GENMASK(11, 0)
58 #define DART_ERROR_FLAG BIT(31)
59 
60 #define DART_ERROR_READ_FAULT BIT(4)
61 #define DART_ERROR_WRITE_FAULT BIT(3)
62 #define DART_ERROR_NO_PTE BIT(2)
63 #define DART_ERROR_NO_PMD BIT(1)
64 #define DART_ERROR_NO_TTBR BIT(0)
65 
66 #define DART_CONFIG 0x60
67 #define DART_CONFIG_LOCK BIT(15)
68 
69 #define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
70 
71 #define DART_ERROR_ADDR_HI 0x54
72 #define DART_ERROR_ADDR_LO 0x50
73 
74 #define DART_STREAMS_ENABLE 0xfc
75 
76 #define DART_TCR(sid) (0x100 + 4 * (sid))
77 #define DART_TCR_TRANSLATE_ENABLE BIT(7)
78 #define DART_TCR_BYPASS0_ENABLE BIT(8)
79 #define DART_TCR_BYPASS1_ENABLE BIT(12)
80 
81 #define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
82 #define DART_TTBR_VALID BIT(31)
83 #define DART_TTBR_SHIFT 12
84 
85 struct apple_dart_hw {
86 	u32 oas;
87 	enum io_pgtable_fmt fmt;
88 };
89 
90 /*
91  * Private structure associated with each DART device.
92  *
93  * @dev: device struct
94  * @hw: SoC-specific hardware data
95  * @regs: mapped MMIO region
96  * @irq: interrupt number, can be shared with other DARTs
97  * @clks: clocks associated with this DART
98  * @num_clks: number of @clks
99  * @lock: lock for hardware operations involving this dart
100  * @pgsize: pagesize supported by this DART
101  * @supports_bypass: indicates if this DART supports bypass mode
102  * @force_bypass: force bypass mode due to pagesize mismatch?
103  * @sid2group: maps stream ids to iommu_groups
104  * @iommu: iommu core device
105  */
106 struct apple_dart {
107 	struct device *dev;
108 	const struct apple_dart_hw *hw;
109 
110 	void __iomem *regs;
111 
112 	int irq;
113 	struct clk_bulk_data *clks;
114 	int num_clks;
115 
116 	spinlock_t lock;
117 
118 	u32 pgsize;
119 	u32 supports_bypass : 1;
120 	u32 force_bypass : 1;
121 
122 	struct iommu_group *sid2group[DART_MAX_STREAMS];
123 	struct iommu_device iommu;
124 };
125 
126 /*
127  * Convenience struct to identify streams.
128  *
129  * The normal variant is used inside apple_dart_master_cfg which isn't written
130  * to concurrently.
131  * The atomic variant is used inside apple_dart_domain where we have to guard
132  * against races from potential parallel calls to attach/detach_device.
133  * Note that even inside the atomic variant the apple_dart pointer is not
134  * protected: This pointer is initialized once under the domain init mutex
135  * and never changed again afterwards. Devices with different dart pointers
136  * cannot be attached to the same domain.
137  *
138  * @dart dart pointer
139  * @sid stream id bitmap
140  */
141 struct apple_dart_stream_map {
142 	struct apple_dart *dart;
143 	unsigned long sidmap;
144 };
145 struct apple_dart_atomic_stream_map {
146 	struct apple_dart *dart;
147 	atomic64_t sidmap;
148 };
149 
150 /*
151  * This structure is attached to each iommu domain handled by a DART.
152  *
153  * @pgtbl_ops: pagetable ops allocated by io-pgtable
154  * @finalized: true if the domain has been completely initialized
155  * @init_lock: protects domain initialization
156  * @stream_maps: streams attached to this domain (valid for DMA/UNMANAGED only)
157  * @domain: core iommu domain pointer
158  */
159 struct apple_dart_domain {
160 	struct io_pgtable_ops *pgtbl_ops;
161 
162 	bool finalized;
163 	struct mutex init_lock;
164 	struct apple_dart_atomic_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
165 
166 	struct iommu_domain domain;
167 };
168 
169 /*
170  * This structure is attached to devices with dev_iommu_priv_set() on of_xlate
171  * and contains a list of streams bound to this device.
172  * So far the worst case seen is a single device with two streams
173  * from different darts, such that this simple static array is enough.
174  *
175  * @streams: streams for this device
176  */
177 struct apple_dart_master_cfg {
178 	struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
179 };
180 
181 /*
182  * Helper macro to iterate over apple_dart_master_cfg.stream_maps and
183  * apple_dart_domain.stream_maps
184  *
185  * @i int used as loop variable
186  * @base pointer to base struct (apple_dart_master_cfg or apple_dart_domain)
187  * @stream pointer to the apple_dart_streams struct for each loop iteration
188  */
189 #define for_each_stream_map(i, base, stream_map)                               \
190 	for (i = 0, stream_map = &(base)->stream_maps[0];                      \
191 	     i < MAX_DARTS_PER_DEVICE && stream_map->dart;                     \
192 	     stream_map = &(base)->stream_maps[++i])
193 
194 static struct platform_driver apple_dart_driver;
195 static const struct iommu_ops apple_dart_iommu_ops;
196 
197 static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
198 {
199 	return container_of(dom, struct apple_dart_domain, domain);
200 }
201 
202 static void
203 apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
204 {
205 	int sid;
206 
207 	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
208 		writel(DART_TCR_TRANSLATE_ENABLE,
209 		       stream_map->dart->regs + DART_TCR(sid));
210 }
211 
212 static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
213 {
214 	int sid;
215 
216 	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
217 		writel(0, stream_map->dart->regs + DART_TCR(sid));
218 }
219 
220 static void
221 apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
222 {
223 	int sid;
224 
225 	WARN_ON(!stream_map->dart->supports_bypass);
226 	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
227 		writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
228 		       stream_map->dart->regs + DART_TCR(sid));
229 }
230 
231 static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
232 				   u8 idx, phys_addr_t paddr)
233 {
234 	int sid;
235 
236 	WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
237 	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
238 		writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
239 		       stream_map->dart->regs + DART_TTBR(sid, idx));
240 }
241 
242 static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
243 				     u8 idx)
244 {
245 	int sid;
246 
247 	for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
248 		writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
249 }
250 
251 static void
252 apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
253 {
254 	int i;
255 
256 	for (i = 0; i < DART_MAX_TTBR; ++i)
257 		apple_dart_hw_clear_ttbr(stream_map, i);
258 }
259 
260 static int
261 apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
262 			     u32 command)
263 {
264 	unsigned long flags;
265 	int ret;
266 	u32 command_reg;
267 
268 	spin_lock_irqsave(&stream_map->dart->lock, flags);
269 
270 	writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
271 	writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
272 
273 	ret = readl_poll_timeout_atomic(
274 		stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
275 		!(command_reg & DART_STREAM_COMMAND_BUSY), 1,
276 		DART_STREAM_COMMAND_BUSY_TIMEOUT);
277 
278 	spin_unlock_irqrestore(&stream_map->dart->lock, flags);
279 
280 	if (ret) {
281 		dev_err(stream_map->dart->dev,
282 			"busy bit did not clear after command %x for streams %lx\n",
283 			command, stream_map->sidmap);
284 		return ret;
285 	}
286 
287 	return 0;
288 }
289 
290 static int
291 apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
292 {
293 	return apple_dart_hw_stream_command(stream_map,
294 					    DART_STREAM_COMMAND_INVALIDATE);
295 }
296 
297 static int apple_dart_hw_reset(struct apple_dart *dart)
298 {
299 	u32 config;
300 	struct apple_dart_stream_map stream_map;
301 
302 	config = readl(dart->regs + DART_CONFIG);
303 	if (config & DART_CONFIG_LOCK) {
304 		dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
305 			config);
306 		return -EINVAL;
307 	}
308 
309 	stream_map.dart = dart;
310 	stream_map.sidmap = DART_STREAM_ALL;
311 	apple_dart_hw_disable_dma(&stream_map);
312 	apple_dart_hw_clear_all_ttbrs(&stream_map);
313 
314 	/* enable all streams globally since TCR is used to control isolation */
315 	writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
316 
317 	/* clear any pending errors before the interrupt is unmasked */
318 	writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
319 
320 	return apple_dart_hw_invalidate_tlb(&stream_map);
321 }
322 
323 static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
324 {
325 	int i;
326 	struct apple_dart_atomic_stream_map *domain_stream_map;
327 	struct apple_dart_stream_map stream_map;
328 
329 	for_each_stream_map(i, domain, domain_stream_map) {
330 		stream_map.dart = domain_stream_map->dart;
331 		stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
332 		apple_dart_hw_invalidate_tlb(&stream_map);
333 	}
334 }
335 
336 static void apple_dart_flush_iotlb_all(struct iommu_domain *domain)
337 {
338 	apple_dart_domain_flush_tlb(to_dart_domain(domain));
339 }
340 
341 static void apple_dart_iotlb_sync(struct iommu_domain *domain,
342 				  struct iommu_iotlb_gather *gather)
343 {
344 	apple_dart_domain_flush_tlb(to_dart_domain(domain));
345 }
346 
347 static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
348 				      unsigned long iova, size_t size)
349 {
350 	apple_dart_domain_flush_tlb(to_dart_domain(domain));
351 }
352 
353 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
354 					   dma_addr_t iova)
355 {
356 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
357 	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
358 
359 	if (!ops)
360 		return 0;
361 
362 	return ops->iova_to_phys(ops, iova);
363 }
364 
365 static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova,
366 				phys_addr_t paddr, size_t pgsize,
367 				size_t pgcount, int prot, gfp_t gfp,
368 				size_t *mapped)
369 {
370 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
371 	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
372 
373 	if (!ops)
374 		return -ENODEV;
375 
376 	return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp,
377 			      mapped);
378 }
379 
380 static size_t apple_dart_unmap_pages(struct iommu_domain *domain,
381 				     unsigned long iova, size_t pgsize,
382 				     size_t pgcount,
383 				     struct iommu_iotlb_gather *gather)
384 {
385 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
386 	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
387 
388 	return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
389 }
390 
391 static void
392 apple_dart_setup_translation(struct apple_dart_domain *domain,
393 			     struct apple_dart_stream_map *stream_map)
394 {
395 	int i;
396 	struct io_pgtable_cfg *pgtbl_cfg =
397 		&io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
398 
399 	for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
400 		apple_dart_hw_set_ttbr(stream_map, i,
401 				       pgtbl_cfg->apple_dart_cfg.ttbr[i]);
402 	for (; i < DART_MAX_TTBR; ++i)
403 		apple_dart_hw_clear_ttbr(stream_map, i);
404 
405 	apple_dart_hw_enable_translation(stream_map);
406 	apple_dart_hw_invalidate_tlb(stream_map);
407 }
408 
409 static int apple_dart_finalize_domain(struct iommu_domain *domain,
410 				      struct apple_dart_master_cfg *cfg)
411 {
412 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
413 	struct apple_dart *dart = cfg->stream_maps[0].dart;
414 	struct io_pgtable_cfg pgtbl_cfg;
415 	int ret = 0;
416 	int i;
417 
418 	mutex_lock(&dart_domain->init_lock);
419 
420 	if (dart_domain->finalized)
421 		goto done;
422 
423 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
424 		dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
425 		atomic64_set(&dart_domain->stream_maps[i].sidmap,
426 			     cfg->stream_maps[i].sidmap);
427 	}
428 
429 	pgtbl_cfg = (struct io_pgtable_cfg){
430 		.pgsize_bitmap = dart->pgsize,
431 		.ias = 32,
432 		.oas = dart->hw->oas,
433 		.coherent_walk = 1,
434 		.iommu_dev = dart->dev,
435 	};
436 
437 	dart_domain->pgtbl_ops =
438 		alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
439 	if (!dart_domain->pgtbl_ops) {
440 		ret = -ENOMEM;
441 		goto done;
442 	}
443 
444 	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
445 	domain->geometry.aperture_start = 0;
446 	domain->geometry.aperture_end = DMA_BIT_MASK(32);
447 	domain->geometry.force_aperture = true;
448 
449 	dart_domain->finalized = true;
450 
451 done:
452 	mutex_unlock(&dart_domain->init_lock);
453 	return ret;
454 }
455 
456 static int
457 apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
458 		       struct apple_dart_stream_map *master_maps,
459 		       bool add_streams)
460 {
461 	int i;
462 
463 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
464 		if (domain_maps[i].dart != master_maps[i].dart)
465 			return -EINVAL;
466 	}
467 
468 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
469 		if (!domain_maps[i].dart)
470 			break;
471 		if (add_streams)
472 			atomic64_or(master_maps[i].sidmap,
473 				    &domain_maps[i].sidmap);
474 		else
475 			atomic64_and(~master_maps[i].sidmap,
476 				     &domain_maps[i].sidmap);
477 	}
478 
479 	return 0;
480 }
481 
482 static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
483 					 struct apple_dart_master_cfg *cfg)
484 {
485 	return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
486 				      true);
487 }
488 
489 static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain,
490 					    struct apple_dart_master_cfg *cfg)
491 {
492 	return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
493 				      false);
494 }
495 
496 static int apple_dart_attach_dev(struct iommu_domain *domain,
497 				 struct device *dev)
498 {
499 	int ret, i;
500 	struct apple_dart_stream_map *stream_map;
501 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
502 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
503 
504 	if (cfg->stream_maps[0].dart->force_bypass &&
505 	    domain->type != IOMMU_DOMAIN_IDENTITY)
506 		return -EINVAL;
507 	if (!cfg->stream_maps[0].dart->supports_bypass &&
508 	    domain->type == IOMMU_DOMAIN_IDENTITY)
509 		return -EINVAL;
510 
511 	ret = apple_dart_finalize_domain(domain, cfg);
512 	if (ret)
513 		return ret;
514 
515 	switch (domain->type) {
516 	case IOMMU_DOMAIN_DMA:
517 	case IOMMU_DOMAIN_UNMANAGED:
518 		ret = apple_dart_domain_add_streams(dart_domain, cfg);
519 		if (ret)
520 			return ret;
521 
522 		for_each_stream_map(i, cfg, stream_map)
523 			apple_dart_setup_translation(dart_domain, stream_map);
524 		break;
525 	case IOMMU_DOMAIN_BLOCKED:
526 		for_each_stream_map(i, cfg, stream_map)
527 			apple_dart_hw_disable_dma(stream_map);
528 		break;
529 	case IOMMU_DOMAIN_IDENTITY:
530 		for_each_stream_map(i, cfg, stream_map)
531 			apple_dart_hw_enable_bypass(stream_map);
532 		break;
533 	}
534 
535 	return ret;
536 }
537 
538 static void apple_dart_detach_dev(struct iommu_domain *domain,
539 				  struct device *dev)
540 {
541 	int i;
542 	struct apple_dart_stream_map *stream_map;
543 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
544 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
545 
546 	for_each_stream_map(i, cfg, stream_map)
547 		apple_dart_hw_disable_dma(stream_map);
548 
549 	if (domain->type == IOMMU_DOMAIN_DMA ||
550 	    domain->type == IOMMU_DOMAIN_UNMANAGED)
551 		apple_dart_domain_remove_streams(dart_domain, cfg);
552 }
553 
554 static struct iommu_device *apple_dart_probe_device(struct device *dev)
555 {
556 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
557 	struct apple_dart_stream_map *stream_map;
558 	int i;
559 
560 	if (!cfg)
561 		return ERR_PTR(-ENODEV);
562 
563 	for_each_stream_map(i, cfg, stream_map)
564 		device_link_add(
565 			dev, stream_map->dart->dev,
566 			DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
567 
568 	return &cfg->stream_maps[0].dart->iommu;
569 }
570 
571 static void apple_dart_release_device(struct device *dev)
572 {
573 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
574 
575 	dev_iommu_priv_set(dev, NULL);
576 	kfree(cfg);
577 }
578 
579 static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
580 {
581 	struct apple_dart_domain *dart_domain;
582 
583 	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
584 	    type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
585 		return NULL;
586 
587 	dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
588 	if (!dart_domain)
589 		return NULL;
590 
591 	mutex_init(&dart_domain->init_lock);
592 
593 	/* no need to allocate pgtbl_ops or do any other finalization steps */
594 	if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED)
595 		dart_domain->finalized = true;
596 
597 	return &dart_domain->domain;
598 }
599 
600 static void apple_dart_domain_free(struct iommu_domain *domain)
601 {
602 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
603 
604 	if (dart_domain->pgtbl_ops)
605 		free_io_pgtable_ops(dart_domain->pgtbl_ops);
606 
607 	kfree(dart_domain);
608 }
609 
610 static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
611 {
612 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
613 	struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
614 	struct apple_dart *dart = platform_get_drvdata(iommu_pdev);
615 	struct apple_dart *cfg_dart;
616 	int i, sid;
617 
618 	if (args->args_count != 1)
619 		return -EINVAL;
620 	sid = args->args[0];
621 
622 	if (!cfg)
623 		cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
624 	if (!cfg)
625 		return -ENOMEM;
626 	dev_iommu_priv_set(dev, cfg);
627 
628 	cfg_dart = cfg->stream_maps[0].dart;
629 	if (cfg_dart) {
630 		if (cfg_dart->supports_bypass != dart->supports_bypass)
631 			return -EINVAL;
632 		if (cfg_dart->force_bypass != dart->force_bypass)
633 			return -EINVAL;
634 		if (cfg_dart->pgsize != dart->pgsize)
635 			return -EINVAL;
636 	}
637 
638 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
639 		if (cfg->stream_maps[i].dart == dart) {
640 			cfg->stream_maps[i].sidmap |= 1 << sid;
641 			return 0;
642 		}
643 	}
644 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
645 		if (!cfg->stream_maps[i].dart) {
646 			cfg->stream_maps[i].dart = dart;
647 			cfg->stream_maps[i].sidmap = 1 << sid;
648 			return 0;
649 		}
650 	}
651 
652 	return -EINVAL;
653 }
654 
655 static DEFINE_MUTEX(apple_dart_groups_lock);
656 
657 static void apple_dart_release_group(void *iommu_data)
658 {
659 	int i, sid;
660 	struct apple_dart_stream_map *stream_map;
661 	struct apple_dart_master_cfg *group_master_cfg = iommu_data;
662 
663 	mutex_lock(&apple_dart_groups_lock);
664 
665 	for_each_stream_map(i, group_master_cfg, stream_map)
666 		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
667 			stream_map->dart->sid2group[sid] = NULL;
668 
669 	kfree(iommu_data);
670 	mutex_unlock(&apple_dart_groups_lock);
671 }
672 
673 static struct iommu_group *apple_dart_device_group(struct device *dev)
674 {
675 	int i, sid;
676 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
677 	struct apple_dart_stream_map *stream_map;
678 	struct apple_dart_master_cfg *group_master_cfg;
679 	struct iommu_group *group = NULL;
680 	struct iommu_group *res = ERR_PTR(-EINVAL);
681 
682 	mutex_lock(&apple_dart_groups_lock);
683 
684 	for_each_stream_map(i, cfg, stream_map) {
685 		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
686 			struct iommu_group *stream_group =
687 				stream_map->dart->sid2group[sid];
688 
689 			if (group && group != stream_group) {
690 				res = ERR_PTR(-EINVAL);
691 				goto out;
692 			}
693 
694 			group = stream_group;
695 		}
696 	}
697 
698 	if (group) {
699 		res = iommu_group_ref_get(group);
700 		goto out;
701 	}
702 
703 #ifdef CONFIG_PCI
704 	if (dev_is_pci(dev))
705 		group = pci_device_group(dev);
706 	else
707 #endif
708 		group = generic_device_group(dev);
709 
710 	res = ERR_PTR(-ENOMEM);
711 	if (!group)
712 		goto out;
713 
714 	group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
715 	if (!group_master_cfg) {
716 		iommu_group_put(group);
717 		goto out;
718 	}
719 
720 	iommu_group_set_iommudata(group, group_master_cfg,
721 		apple_dart_release_group);
722 
723 	for_each_stream_map(i, cfg, stream_map)
724 		for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
725 			stream_map->dart->sid2group[sid] = group;
726 
727 	res = group;
728 
729 out:
730 	mutex_unlock(&apple_dart_groups_lock);
731 	return res;
732 }
733 
734 static int apple_dart_def_domain_type(struct device *dev)
735 {
736 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
737 
738 	if (cfg->stream_maps[0].dart->force_bypass)
739 		return IOMMU_DOMAIN_IDENTITY;
740 	if (!cfg->stream_maps[0].dart->supports_bypass)
741 		return IOMMU_DOMAIN_DMA;
742 
743 	return 0;
744 }
745 
746 #ifndef CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
747 /* Keep things compiling when CONFIG_PCI_APPLE isn't selected */
748 #define CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR	0
749 #endif
750 #define DOORBELL_ADDR	(CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR & PAGE_MASK)
751 
752 static void apple_dart_get_resv_regions(struct device *dev,
753 					struct list_head *head)
754 {
755 	if (IS_ENABLED(CONFIG_PCIE_APPLE) && dev_is_pci(dev)) {
756 		struct iommu_resv_region *region;
757 		int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
758 
759 		region = iommu_alloc_resv_region(DOORBELL_ADDR,
760 						 PAGE_SIZE, prot,
761 						 IOMMU_RESV_MSI, GFP_KERNEL);
762 		if (!region)
763 			return;
764 
765 		list_add_tail(&region->list, head);
766 	}
767 
768 	iommu_dma_get_resv_regions(dev, head);
769 }
770 
771 static const struct iommu_ops apple_dart_iommu_ops = {
772 	.domain_alloc = apple_dart_domain_alloc,
773 	.probe_device = apple_dart_probe_device,
774 	.release_device = apple_dart_release_device,
775 	.device_group = apple_dart_device_group,
776 	.of_xlate = apple_dart_of_xlate,
777 	.def_domain_type = apple_dart_def_domain_type,
778 	.get_resv_regions = apple_dart_get_resv_regions,
779 	.pgsize_bitmap = -1UL, /* Restricted during dart probe */
780 	.owner = THIS_MODULE,
781 	.default_domain_ops = &(const struct iommu_domain_ops) {
782 		.attach_dev	= apple_dart_attach_dev,
783 		.detach_dev	= apple_dart_detach_dev,
784 		.map_pages	= apple_dart_map_pages,
785 		.unmap_pages	= apple_dart_unmap_pages,
786 		.flush_iotlb_all = apple_dart_flush_iotlb_all,
787 		.iotlb_sync	= apple_dart_iotlb_sync,
788 		.iotlb_sync_map	= apple_dart_iotlb_sync_map,
789 		.iova_to_phys	= apple_dart_iova_to_phys,
790 		.free		= apple_dart_domain_free,
791 	}
792 };
793 
794 static irqreturn_t apple_dart_irq(int irq, void *dev)
795 {
796 	struct apple_dart *dart = dev;
797 	const char *fault_name = NULL;
798 	u32 error = readl(dart->regs + DART_ERROR);
799 	u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
800 	u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
801 	u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
802 	u64 addr = addr_lo | (((u64)addr_hi) << 32);
803 	u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
804 
805 	if (!(error & DART_ERROR_FLAG))
806 		return IRQ_NONE;
807 
808 	/* there should only be a single bit set but let's use == to be sure */
809 	if (error_code == DART_ERROR_READ_FAULT)
810 		fault_name = "READ FAULT";
811 	else if (error_code == DART_ERROR_WRITE_FAULT)
812 		fault_name = "WRITE FAULT";
813 	else if (error_code == DART_ERROR_NO_PTE)
814 		fault_name = "NO PTE FOR IOVA";
815 	else if (error_code == DART_ERROR_NO_PMD)
816 		fault_name = "NO PMD FOR IOVA";
817 	else if (error_code == DART_ERROR_NO_TTBR)
818 		fault_name = "NO TTBR FOR IOVA";
819 	else
820 		fault_name = "unknown";
821 
822 	dev_err_ratelimited(
823 		dart->dev,
824 		"translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
825 		error, stream_idx, error_code, fault_name, addr);
826 
827 	writel(error, dart->regs + DART_ERROR);
828 	return IRQ_HANDLED;
829 }
830 
831 static int apple_dart_probe(struct platform_device *pdev)
832 {
833 	int ret;
834 	u32 dart_params[2];
835 	struct resource *res;
836 	struct apple_dart *dart;
837 	struct device *dev = &pdev->dev;
838 
839 	dart = devm_kzalloc(dev, sizeof(*dart), GFP_KERNEL);
840 	if (!dart)
841 		return -ENOMEM;
842 
843 	dart->dev = dev;
844 	dart->hw = of_device_get_match_data(dev);
845 	spin_lock_init(&dart->lock);
846 
847 	dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
848 	if (IS_ERR(dart->regs))
849 		return PTR_ERR(dart->regs);
850 
851 	if (resource_size(res) < 0x4000) {
852 		dev_err(dev, "MMIO region too small (%pr)\n", res);
853 		return -EINVAL;
854 	}
855 
856 	dart->irq = platform_get_irq(pdev, 0);
857 	if (dart->irq < 0)
858 		return -ENODEV;
859 
860 	ret = devm_clk_bulk_get_all(dev, &dart->clks);
861 	if (ret < 0)
862 		return ret;
863 	dart->num_clks = ret;
864 
865 	ret = clk_bulk_prepare_enable(dart->num_clks, dart->clks);
866 	if (ret)
867 		return ret;
868 
869 	ret = apple_dart_hw_reset(dart);
870 	if (ret)
871 		goto err_clk_disable;
872 
873 	dart_params[0] = readl(dart->regs + DART_PARAMS1);
874 	dart_params[1] = readl(dart->regs + DART_PARAMS2);
875 	dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
876 	dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
877 	dart->force_bypass = dart->pgsize > PAGE_SIZE;
878 
879 	ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
880 			  "apple-dart fault handler", dart);
881 	if (ret)
882 		goto err_clk_disable;
883 
884 	platform_set_drvdata(pdev, dart);
885 
886 	ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
887 				     dev_name(&pdev->dev));
888 	if (ret)
889 		goto err_free_irq;
890 
891 	ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
892 	if (ret)
893 		goto err_sysfs_remove;
894 
895 	dev_info(
896 		&pdev->dev,
897 		"DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
898 		dart->pgsize, dart->supports_bypass, dart->force_bypass);
899 	return 0;
900 
901 err_sysfs_remove:
902 	iommu_device_sysfs_remove(&dart->iommu);
903 err_free_irq:
904 	free_irq(dart->irq, dart);
905 err_clk_disable:
906 	clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
907 
908 	return ret;
909 }
910 
911 static int apple_dart_remove(struct platform_device *pdev)
912 {
913 	struct apple_dart *dart = platform_get_drvdata(pdev);
914 
915 	apple_dart_hw_reset(dart);
916 	free_irq(dart->irq, dart);
917 
918 	iommu_device_unregister(&dart->iommu);
919 	iommu_device_sysfs_remove(&dart->iommu);
920 
921 	clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
922 
923 	return 0;
924 }
925 
926 static const struct apple_dart_hw apple_dart_hw_t8103 = {
927 	.oas = 36,
928 	.fmt = APPLE_DART,
929 };
930 static const struct apple_dart_hw apple_dart_hw_t6000 = {
931 	.oas = 42,
932 	.fmt = APPLE_DART2,
933 };
934 
935 static const struct of_device_id apple_dart_of_match[] = {
936 	{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
937 	{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
938 	{},
939 };
940 MODULE_DEVICE_TABLE(of, apple_dart_of_match);
941 
942 static struct platform_driver apple_dart_driver = {
943 	.driver	= {
944 		.name			= "apple-dart",
945 		.of_match_table		= apple_dart_of_match,
946 		.suppress_bind_attrs    = true,
947 	},
948 	.probe	= apple_dart_probe,
949 	.remove	= apple_dart_remove,
950 };
951 
952 module_platform_driver(apple_dart_driver);
953 
954 MODULE_DESCRIPTION("IOMMU API for Apple's DART");
955 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
956 MODULE_LICENSE("GPL v2");
957