xref: /openbmc/linux/drivers/memory/tegra/mc.c (revision 16921921)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2014 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/export.h>
10 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/sort.h>
18 
19 #include <soc/tegra/fuse.h>
20 
21 #include "mc.h"
22 
23 static const struct of_device_id tegra_mc_of_match[] = {
24 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
25 	{ .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
26 #endif
27 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
28 	{ .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
29 #endif
30 #ifdef CONFIG_ARCH_TEGRA_114_SOC
31 	{ .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
32 #endif
33 #ifdef CONFIG_ARCH_TEGRA_124_SOC
34 	{ .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
35 #endif
36 #ifdef CONFIG_ARCH_TEGRA_132_SOC
37 	{ .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
38 #endif
39 #ifdef CONFIG_ARCH_TEGRA_210_SOC
40 	{ .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
41 #endif
42 #ifdef CONFIG_ARCH_TEGRA_186_SOC
43 	{ .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
44 #endif
45 #ifdef CONFIG_ARCH_TEGRA_194_SOC
46 	{ .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
47 #endif
48 #ifdef CONFIG_ARCH_TEGRA_234_SOC
49 	{ .compatible = "nvidia,tegra234-mc", .data = &tegra234_mc_soc },
50 #endif
51 	{ /* sentinel */ }
52 };
53 MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
54 
55 static void tegra_mc_devm_action_put_device(void *data)
56 {
57 	struct tegra_mc *mc = data;
58 
59 	put_device(mc->dev);
60 }
61 
62 /**
63  * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
64  * @dev: device pointer for the consumer device
65  *
66  * This function will search for the Memory Controller node in a device-tree
67  * and retrieve the Memory Controller handle.
68  *
69  * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
70  */
71 struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
72 {
73 	struct platform_device *pdev;
74 	struct device_node *np;
75 	struct tegra_mc *mc;
76 	int err;
77 
78 	np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
79 	if (!np)
80 		return ERR_PTR(-ENOENT);
81 
82 	pdev = of_find_device_by_node(np);
83 	of_node_put(np);
84 	if (!pdev)
85 		return ERR_PTR(-ENODEV);
86 
87 	mc = platform_get_drvdata(pdev);
88 	if (!mc) {
89 		put_device(&pdev->dev);
90 		return ERR_PTR(-EPROBE_DEFER);
91 	}
92 
93 	err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
94 	if (err)
95 		return ERR_PTR(err);
96 
97 	return mc;
98 }
99 EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
100 
101 int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
102 {
103 	if (mc->soc->ops && mc->soc->ops->probe_device)
104 		return mc->soc->ops->probe_device(mc, dev);
105 
106 	return 0;
107 }
108 EXPORT_SYMBOL_GPL(tegra_mc_probe_device);
109 
110 int tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
111                                phys_addr_t *base, u64 *size)
112 {
113 	u32 offset;
114 
115 	if (id < 1 || id >= mc->soc->num_carveouts)
116 		return -EINVAL;
117 
118 	if (id < 6)
119 		offset = 0xc0c + 0x50 * (id - 1);
120 	else
121 		offset = 0x2004 + 0x50 * (id - 6);
122 
123 	*base = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x0);
124 #ifdef CONFIG_PHYS_ADDR_T_64BIT
125 	*base |= (phys_addr_t)mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x4) << 32;
126 #endif
127 
128 	if (size)
129 		*size = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x8) << 17;
130 
131 	return 0;
132 }
133 EXPORT_SYMBOL_GPL(tegra_mc_get_carveout_info);
134 
135 static int tegra_mc_block_dma_common(struct tegra_mc *mc,
136 				     const struct tegra_mc_reset *rst)
137 {
138 	unsigned long flags;
139 	u32 value;
140 
141 	spin_lock_irqsave(&mc->lock, flags);
142 
143 	value = mc_readl(mc, rst->control) | BIT(rst->bit);
144 	mc_writel(mc, value, rst->control);
145 
146 	spin_unlock_irqrestore(&mc->lock, flags);
147 
148 	return 0;
149 }
150 
151 static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
152 				       const struct tegra_mc_reset *rst)
153 {
154 	return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
155 }
156 
157 static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
158 				       const struct tegra_mc_reset *rst)
159 {
160 	unsigned long flags;
161 	u32 value;
162 
163 	spin_lock_irqsave(&mc->lock, flags);
164 
165 	value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
166 	mc_writel(mc, value, rst->control);
167 
168 	spin_unlock_irqrestore(&mc->lock, flags);
169 
170 	return 0;
171 }
172 
173 static int tegra_mc_reset_status_common(struct tegra_mc *mc,
174 					const struct tegra_mc_reset *rst)
175 {
176 	return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
177 }
178 
179 const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
180 	.block_dma = tegra_mc_block_dma_common,
181 	.dma_idling = tegra_mc_dma_idling_common,
182 	.unblock_dma = tegra_mc_unblock_dma_common,
183 	.reset_status = tegra_mc_reset_status_common,
184 };
185 
186 static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
187 {
188 	return container_of(rcdev, struct tegra_mc, reset);
189 }
190 
191 static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc,
192 							unsigned long id)
193 {
194 	unsigned int i;
195 
196 	for (i = 0; i < mc->soc->num_resets; i++)
197 		if (mc->soc->resets[i].id == id)
198 			return &mc->soc->resets[i];
199 
200 	return NULL;
201 }
202 
203 static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
204 				    unsigned long id)
205 {
206 	struct tegra_mc *mc = reset_to_mc(rcdev);
207 	const struct tegra_mc_reset_ops *rst_ops;
208 	const struct tegra_mc_reset *rst;
209 	int retries = 500;
210 	int err;
211 
212 	rst = tegra_mc_reset_find(mc, id);
213 	if (!rst)
214 		return -ENODEV;
215 
216 	rst_ops = mc->soc->reset_ops;
217 	if (!rst_ops)
218 		return -ENODEV;
219 
220 	/* DMA flushing will fail if reset is already asserted */
221 	if (rst_ops->reset_status) {
222 		/* check whether reset is asserted */
223 		if (rst_ops->reset_status(mc, rst))
224 			return 0;
225 	}
226 
227 	if (rst_ops->block_dma) {
228 		/* block clients DMA requests */
229 		err = rst_ops->block_dma(mc, rst);
230 		if (err) {
231 			dev_err(mc->dev, "failed to block %s DMA: %d\n",
232 				rst->name, err);
233 			return err;
234 		}
235 	}
236 
237 	if (rst_ops->dma_idling) {
238 		/* wait for completion of the outstanding DMA requests */
239 		while (!rst_ops->dma_idling(mc, rst)) {
240 			if (!retries--) {
241 				dev_err(mc->dev, "failed to flush %s DMA\n",
242 					rst->name);
243 				return -EBUSY;
244 			}
245 
246 			usleep_range(10, 100);
247 		}
248 	}
249 
250 	if (rst_ops->hotreset_assert) {
251 		/* clear clients DMA requests sitting before arbitration */
252 		err = rst_ops->hotreset_assert(mc, rst);
253 		if (err) {
254 			dev_err(mc->dev, "failed to hot reset %s: %d\n",
255 				rst->name, err);
256 			return err;
257 		}
258 	}
259 
260 	return 0;
261 }
262 
263 static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
264 				      unsigned long id)
265 {
266 	struct tegra_mc *mc = reset_to_mc(rcdev);
267 	const struct tegra_mc_reset_ops *rst_ops;
268 	const struct tegra_mc_reset *rst;
269 	int err;
270 
271 	rst = tegra_mc_reset_find(mc, id);
272 	if (!rst)
273 		return -ENODEV;
274 
275 	rst_ops = mc->soc->reset_ops;
276 	if (!rst_ops)
277 		return -ENODEV;
278 
279 	if (rst_ops->hotreset_deassert) {
280 		/* take out client from hot reset */
281 		err = rst_ops->hotreset_deassert(mc, rst);
282 		if (err) {
283 			dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
284 				rst->name, err);
285 			return err;
286 		}
287 	}
288 
289 	if (rst_ops->unblock_dma) {
290 		/* allow new DMA requests to proceed to arbitration */
291 		err = rst_ops->unblock_dma(mc, rst);
292 		if (err) {
293 			dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
294 				rst->name, err);
295 			return err;
296 		}
297 	}
298 
299 	return 0;
300 }
301 
302 static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev,
303 				    unsigned long id)
304 {
305 	struct tegra_mc *mc = reset_to_mc(rcdev);
306 	const struct tegra_mc_reset_ops *rst_ops;
307 	const struct tegra_mc_reset *rst;
308 
309 	rst = tegra_mc_reset_find(mc, id);
310 	if (!rst)
311 		return -ENODEV;
312 
313 	rst_ops = mc->soc->reset_ops;
314 	if (!rst_ops)
315 		return -ENODEV;
316 
317 	return rst_ops->reset_status(mc, rst);
318 }
319 
320 static const struct reset_control_ops tegra_mc_reset_ops = {
321 	.assert = tegra_mc_hotreset_assert,
322 	.deassert = tegra_mc_hotreset_deassert,
323 	.status = tegra_mc_hotreset_status,
324 };
325 
326 static int tegra_mc_reset_setup(struct tegra_mc *mc)
327 {
328 	int err;
329 
330 	mc->reset.ops = &tegra_mc_reset_ops;
331 	mc->reset.owner = THIS_MODULE;
332 	mc->reset.of_node = mc->dev->of_node;
333 	mc->reset.of_reset_n_cells = 1;
334 	mc->reset.nr_resets = mc->soc->num_resets;
335 
336 	err = reset_controller_register(&mc->reset);
337 	if (err < 0)
338 		return err;
339 
340 	return 0;
341 }
342 
343 int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
344 {
345 	unsigned int i;
346 	struct tegra_mc_timing *timing = NULL;
347 
348 	for (i = 0; i < mc->num_timings; i++) {
349 		if (mc->timings[i].rate == rate) {
350 			timing = &mc->timings[i];
351 			break;
352 		}
353 	}
354 
355 	if (!timing) {
356 		dev_err(mc->dev, "no memory timing registered for rate %lu\n",
357 			rate);
358 		return -EINVAL;
359 	}
360 
361 	for (i = 0; i < mc->soc->num_emem_regs; ++i)
362 		mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
363 
364 	return 0;
365 }
366 EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
367 
368 unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
369 {
370 	u8 dram_count;
371 
372 	dram_count = mc_readl(mc, MC_EMEM_ADR_CFG);
373 	dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV;
374 	dram_count++;
375 
376 	return dram_count;
377 }
378 EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
379 
380 #if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
381     defined(CONFIG_ARCH_TEGRA_114_SOC) || \
382     defined(CONFIG_ARCH_TEGRA_124_SOC) || \
383     defined(CONFIG_ARCH_TEGRA_132_SOC) || \
384     defined(CONFIG_ARCH_TEGRA_210_SOC)
385 static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
386 {
387 	unsigned long long tick;
388 	unsigned int i;
389 	u32 value;
390 
391 	/* compute the number of MC clock cycles per tick */
392 	tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
393 	do_div(tick, NSEC_PER_SEC);
394 
395 	value = mc_readl(mc, MC_EMEM_ARB_CFG);
396 	value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
397 	value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
398 	mc_writel(mc, value, MC_EMEM_ARB_CFG);
399 
400 	/* write latency allowance defaults */
401 	for (i = 0; i < mc->soc->num_clients; i++) {
402 		const struct tegra_mc_client *client = &mc->soc->clients[i];
403 		u32 value;
404 
405 		value = mc_readl(mc, client->regs.la.reg);
406 		value &= ~(client->regs.la.mask << client->regs.la.shift);
407 		value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift;
408 		mc_writel(mc, value, client->regs.la.reg);
409 	}
410 
411 	/* latch new values */
412 	mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
413 
414 	return 0;
415 }
416 
417 static int load_one_timing(struct tegra_mc *mc,
418 			   struct tegra_mc_timing *timing,
419 			   struct device_node *node)
420 {
421 	int err;
422 	u32 tmp;
423 
424 	err = of_property_read_u32(node, "clock-frequency", &tmp);
425 	if (err) {
426 		dev_err(mc->dev,
427 			"timing %pOFn: failed to read rate\n", node);
428 		return err;
429 	}
430 
431 	timing->rate = tmp;
432 	timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs,
433 					 sizeof(u32), GFP_KERNEL);
434 	if (!timing->emem_data)
435 		return -ENOMEM;
436 
437 	err = of_property_read_u32_array(node, "nvidia,emem-configuration",
438 					 timing->emem_data,
439 					 mc->soc->num_emem_regs);
440 	if (err) {
441 		dev_err(mc->dev,
442 			"timing %pOFn: failed to read EMEM configuration\n",
443 			node);
444 		return err;
445 	}
446 
447 	return 0;
448 }
449 
450 static int load_timings(struct tegra_mc *mc, struct device_node *node)
451 {
452 	struct device_node *child;
453 	struct tegra_mc_timing *timing;
454 	int child_count = of_get_child_count(node);
455 	int i = 0, err;
456 
457 	mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing),
458 				   GFP_KERNEL);
459 	if (!mc->timings)
460 		return -ENOMEM;
461 
462 	mc->num_timings = child_count;
463 
464 	for_each_child_of_node(node, child) {
465 		timing = &mc->timings[i++];
466 
467 		err = load_one_timing(mc, timing, child);
468 		if (err) {
469 			of_node_put(child);
470 			return err;
471 		}
472 	}
473 
474 	return 0;
475 }
476 
477 static int tegra_mc_setup_timings(struct tegra_mc *mc)
478 {
479 	struct device_node *node;
480 	u32 ram_code, node_ram_code;
481 	int err;
482 
483 	ram_code = tegra_read_ram_code();
484 
485 	mc->num_timings = 0;
486 
487 	for_each_child_of_node(mc->dev->of_node, node) {
488 		err = of_property_read_u32(node, "nvidia,ram-code",
489 					   &node_ram_code);
490 		if (err || (node_ram_code != ram_code))
491 			continue;
492 
493 		err = load_timings(mc, node);
494 		of_node_put(node);
495 		if (err)
496 			return err;
497 		break;
498 	}
499 
500 	if (mc->num_timings == 0)
501 		dev_warn(mc->dev,
502 			 "no memory timings for RAM code %u registered\n",
503 			 ram_code);
504 
505 	return 0;
506 }
507 
508 int tegra30_mc_probe(struct tegra_mc *mc)
509 {
510 	int err;
511 
512 	mc->clk = devm_clk_get_optional(mc->dev, "mc");
513 	if (IS_ERR(mc->clk)) {
514 		dev_err(mc->dev, "failed to get MC clock: %ld\n", PTR_ERR(mc->clk));
515 		return PTR_ERR(mc->clk);
516 	}
517 
518 	/* ensure that debug features are disabled */
519 	mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
520 
521 	err = tegra_mc_setup_latency_allowance(mc);
522 	if (err < 0) {
523 		dev_err(mc->dev, "failed to setup latency allowance: %d\n", err);
524 		return err;
525 	}
526 
527 	err = tegra_mc_setup_timings(mc);
528 	if (err < 0) {
529 		dev_err(mc->dev, "failed to setup timings: %d\n", err);
530 		return err;
531 	}
532 
533 	return 0;
534 }
535 
536 const struct tegra_mc_ops tegra30_mc_ops = {
537 	.probe = tegra30_mc_probe,
538 	.handle_irq = tegra30_mc_handle_irq,
539 };
540 #endif
541 
542 static int mc_global_intstatus_to_channel(const struct tegra_mc *mc, u32 status,
543 					  unsigned int *mc_channel)
544 {
545 	if ((status & mc->soc->ch_intmask) == 0)
546 		return -EINVAL;
547 
548 	*mc_channel = __ffs((status & mc->soc->ch_intmask) >>
549 			    mc->soc->global_intstatus_channel_shift);
550 
551 	return 0;
552 }
553 
554 static u32 mc_channel_to_global_intstatus(const struct tegra_mc *mc,
555 					  unsigned int channel)
556 {
557 	return BIT(channel) << mc->soc->global_intstatus_channel_shift;
558 }
559 
560 irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
561 {
562 	struct tegra_mc *mc = data;
563 	unsigned int bit, channel;
564 	unsigned long status;
565 
566 	if (mc->soc->num_channels) {
567 		u32 global_status;
568 		int err;
569 
570 		global_status = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, MC_GLOBAL_INTSTATUS);
571 		err = mc_global_intstatus_to_channel(mc, global_status, &channel);
572 		if (err < 0) {
573 			dev_err_ratelimited(mc->dev, "unknown interrupt channel 0x%08x\n",
574 					    global_status);
575 			return IRQ_NONE;
576 		}
577 
578 		/* mask all interrupts to avoid flooding */
579 		status = mc_ch_readl(mc, channel, MC_INTSTATUS) & mc->soc->intmask;
580 	} else {
581 		status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
582 	}
583 
584 	if (!status)
585 		return IRQ_NONE;
586 
587 	for_each_set_bit(bit, &status, 32) {
588 		const char *error = tegra_mc_status_names[bit] ?: "unknown";
589 		const char *client = "unknown", *desc;
590 		const char *direction, *secure;
591 		u32 status_reg, addr_reg;
592 		u32 intmask = BIT(bit);
593 		phys_addr_t addr = 0;
594 #ifdef CONFIG_PHYS_ADDR_T_64BIT
595 		u32 addr_hi_reg = 0;
596 #endif
597 		unsigned int i;
598 		char perm[7];
599 		u8 id, type;
600 		u32 value;
601 
602 		switch (intmask) {
603 		case MC_INT_DECERR_VPR:
604 			status_reg = MC_ERR_VPR_STATUS;
605 			addr_reg = MC_ERR_VPR_ADR;
606 			break;
607 
608 		case MC_INT_SECERR_SEC:
609 			status_reg = MC_ERR_SEC_STATUS;
610 			addr_reg = MC_ERR_SEC_ADR;
611 			break;
612 
613 		case MC_INT_DECERR_MTS:
614 			status_reg = MC_ERR_MTS_STATUS;
615 			addr_reg = MC_ERR_MTS_ADR;
616 			break;
617 
618 		case MC_INT_DECERR_GENERALIZED_CARVEOUT:
619 			status_reg = MC_ERR_GENERALIZED_CARVEOUT_STATUS;
620 			addr_reg = MC_ERR_GENERALIZED_CARVEOUT_ADR;
621 			break;
622 
623 		case MC_INT_DECERR_ROUTE_SANITY:
624 			status_reg = MC_ERR_ROUTE_SANITY_STATUS;
625 			addr_reg = MC_ERR_ROUTE_SANITY_ADR;
626 			break;
627 
628 		default:
629 			status_reg = MC_ERR_STATUS;
630 			addr_reg = MC_ERR_ADR;
631 
632 #ifdef CONFIG_PHYS_ADDR_T_64BIT
633 			if (mc->soc->has_addr_hi_reg)
634 				addr_hi_reg = MC_ERR_ADR_HI;
635 #endif
636 			break;
637 		}
638 
639 		if (mc->soc->num_channels)
640 			value = mc_ch_readl(mc, channel, status_reg);
641 		else
642 			value = mc_readl(mc, status_reg);
643 
644 #ifdef CONFIG_PHYS_ADDR_T_64BIT
645 		if (mc->soc->num_address_bits > 32) {
646 			if (addr_hi_reg) {
647 				if (mc->soc->num_channels)
648 					addr = mc_ch_readl(mc, channel, addr_hi_reg);
649 				else
650 					addr = mc_readl(mc, addr_hi_reg);
651 			} else {
652 				addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
653 					MC_ERR_STATUS_ADR_HI_MASK);
654 			}
655 			addr <<= 32;
656 		}
657 #endif
658 
659 		if (value & MC_ERR_STATUS_RW)
660 			direction = "write";
661 		else
662 			direction = "read";
663 
664 		if (value & MC_ERR_STATUS_SECURITY)
665 			secure = "secure ";
666 		else
667 			secure = "";
668 
669 		id = value & mc->soc->client_id_mask;
670 
671 		for (i = 0; i < mc->soc->num_clients; i++) {
672 			if (mc->soc->clients[i].id == id) {
673 				client = mc->soc->clients[i].name;
674 				break;
675 			}
676 		}
677 
678 		type = (value & MC_ERR_STATUS_TYPE_MASK) >>
679 		       MC_ERR_STATUS_TYPE_SHIFT;
680 		desc = tegra_mc_error_names[type];
681 
682 		switch (value & MC_ERR_STATUS_TYPE_MASK) {
683 		case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
684 			perm[0] = ' ';
685 			perm[1] = '[';
686 
687 			if (value & MC_ERR_STATUS_READABLE)
688 				perm[2] = 'R';
689 			else
690 				perm[2] = '-';
691 
692 			if (value & MC_ERR_STATUS_WRITABLE)
693 				perm[3] = 'W';
694 			else
695 				perm[3] = '-';
696 
697 			if (value & MC_ERR_STATUS_NONSECURE)
698 				perm[4] = '-';
699 			else
700 				perm[4] = 'S';
701 
702 			perm[5] = ']';
703 			perm[6] = '\0';
704 			break;
705 
706 		default:
707 			perm[0] = '\0';
708 			break;
709 		}
710 
711 		if (mc->soc->num_channels)
712 			value = mc_ch_readl(mc, channel, addr_reg);
713 		else
714 			value = mc_readl(mc, addr_reg);
715 		addr |= value;
716 
717 		dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
718 				    client, secure, direction, &addr, error,
719 				    desc, perm);
720 	}
721 
722 	/* clear interrupts */
723 	if (mc->soc->num_channels) {
724 		mc_ch_writel(mc, channel, status, MC_INTSTATUS);
725 		mc_ch_writel(mc, MC_BROADCAST_CHANNEL,
726 			     mc_channel_to_global_intstatus(mc, channel),
727 			     MC_GLOBAL_INTSTATUS);
728 	} else {
729 		mc_writel(mc, status, MC_INTSTATUS);
730 	}
731 
732 	return IRQ_HANDLED;
733 }
734 
735 const char *const tegra_mc_status_names[32] = {
736 	[ 1] = "External interrupt",
737 	[ 6] = "EMEM address decode error",
738 	[ 7] = "GART page fault",
739 	[ 8] = "Security violation",
740 	[ 9] = "EMEM arbitration error",
741 	[10] = "Page fault",
742 	[11] = "Invalid APB ASID update",
743 	[12] = "VPR violation",
744 	[13] = "Secure carveout violation",
745 	[16] = "MTS carveout violation",
746 	[17] = "Generalized carveout violation",
747 	[20] = "Route Sanity error",
748 };
749 
750 const char *const tegra_mc_error_names[8] = {
751 	[2] = "EMEM decode error",
752 	[3] = "TrustZone violation",
753 	[4] = "Carveout violation",
754 	[6] = "SMMU translation error",
755 };
756 
757 /*
758  * Memory Controller (MC) has few Memory Clients that are issuing memory
759  * bandwidth allocation requests to the MC interconnect provider. The MC
760  * provider aggregates the requests and then sends the aggregated request
761  * up to the External Memory Controller (EMC) interconnect provider which
762  * re-configures hardware interface to External Memory (EMEM) in accordance
763  * to the required bandwidth. Each MC interconnect node represents an
764  * individual Memory Client.
765  *
766  * Memory interconnect topology:
767  *
768  *               +----+
769  * +--------+    |    |
770  * | TEXSRD +--->+    |
771  * +--------+    |    |
772  *               |    |    +-----+    +------+
773  *    ...        | MC +--->+ EMC +--->+ EMEM |
774  *               |    |    +-----+    +------+
775  * +--------+    |    |
776  * | DISP.. +--->+    |
777  * +--------+    |    |
778  *               +----+
779  */
780 static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
781 {
782 	struct icc_node *node;
783 	unsigned int i;
784 	int err;
785 
786 	/* older device-trees don't have interconnect properties */
787 	if (!device_property_present(mc->dev, "#interconnect-cells") ||
788 	    !mc->soc->icc_ops)
789 		return 0;
790 
791 	mc->provider.dev = mc->dev;
792 	mc->provider.data = &mc->provider;
793 	mc->provider.set = mc->soc->icc_ops->set;
794 	mc->provider.aggregate = mc->soc->icc_ops->aggregate;
795 	mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
796 
797 	icc_provider_init(&mc->provider);
798 
799 	/* create Memory Controller node */
800 	node = icc_node_create(TEGRA_ICC_MC);
801 	if (IS_ERR(node))
802 		return PTR_ERR(node);
803 
804 	node->name = "Memory Controller";
805 	icc_node_add(node, &mc->provider);
806 
807 	/* link Memory Controller to External Memory Controller */
808 	err = icc_link_create(node, TEGRA_ICC_EMC);
809 	if (err)
810 		goto remove_nodes;
811 
812 	for (i = 0; i < mc->soc->num_clients; i++) {
813 		/* create MC client node */
814 		node = icc_node_create(mc->soc->clients[i].id);
815 		if (IS_ERR(node)) {
816 			err = PTR_ERR(node);
817 			goto remove_nodes;
818 		}
819 
820 		node->name = mc->soc->clients[i].name;
821 		icc_node_add(node, &mc->provider);
822 
823 		/* link Memory Client to Memory Controller */
824 		err = icc_link_create(node, TEGRA_ICC_MC);
825 		if (err)
826 			goto remove_nodes;
827 	}
828 
829 	err = icc_provider_register(&mc->provider);
830 	if (err)
831 		goto remove_nodes;
832 
833 	return 0;
834 
835 remove_nodes:
836 	icc_nodes_remove(&mc->provider);
837 
838 	return err;
839 }
840 
841 static int tegra_mc_probe(struct platform_device *pdev)
842 {
843 	struct tegra_mc *mc;
844 	u64 mask;
845 	int err;
846 
847 	mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
848 	if (!mc)
849 		return -ENOMEM;
850 
851 	platform_set_drvdata(pdev, mc);
852 	spin_lock_init(&mc->lock);
853 	mc->soc = of_device_get_match_data(&pdev->dev);
854 	mc->dev = &pdev->dev;
855 
856 	mask = DMA_BIT_MASK(mc->soc->num_address_bits);
857 
858 	err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
859 	if (err < 0) {
860 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
861 		return err;
862 	}
863 
864 	/* length of MC tick in nanoseconds */
865 	mc->tick = 30;
866 
867 	mc->regs = devm_platform_ioremap_resource(pdev, 0);
868 	if (IS_ERR(mc->regs))
869 		return PTR_ERR(mc->regs);
870 
871 	mc->debugfs.root = debugfs_create_dir("mc", NULL);
872 
873 	if (mc->soc->ops && mc->soc->ops->probe) {
874 		err = mc->soc->ops->probe(mc);
875 		if (err < 0)
876 			return err;
877 	}
878 
879 	if (mc->soc->ops && mc->soc->ops->handle_irq) {
880 		mc->irq = platform_get_irq(pdev, 0);
881 		if (mc->irq < 0)
882 			return mc->irq;
883 
884 		WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
885 
886 		if (mc->soc->num_channels)
887 			mc_ch_writel(mc, MC_BROADCAST_CHANNEL, mc->soc->intmask,
888 				     MC_INTMASK);
889 		else
890 			mc_writel(mc, mc->soc->intmask, MC_INTMASK);
891 
892 		err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
893 				       dev_name(&pdev->dev), mc);
894 		if (err < 0) {
895 			dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
896 				err);
897 			return err;
898 		}
899 	}
900 
901 	if (mc->soc->reset_ops) {
902 		err = tegra_mc_reset_setup(mc);
903 		if (err < 0)
904 			dev_err(&pdev->dev, "failed to register reset controller: %d\n", err);
905 	}
906 
907 	err = tegra_mc_interconnect_setup(mc);
908 	if (err < 0)
909 		dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
910 			err);
911 
912 	if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
913 		mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
914 		if (IS_ERR(mc->smmu)) {
915 			dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
916 				PTR_ERR(mc->smmu));
917 			mc->smmu = NULL;
918 		}
919 	}
920 
921 	if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
922 		mc->gart = tegra_gart_probe(&pdev->dev, mc);
923 		if (IS_ERR(mc->gart)) {
924 			dev_err(&pdev->dev, "failed to probe GART: %ld\n",
925 				PTR_ERR(mc->gart));
926 			mc->gart = NULL;
927 		}
928 	}
929 
930 	return 0;
931 }
932 
933 static int __maybe_unused tegra_mc_suspend(struct device *dev)
934 {
935 	struct tegra_mc *mc = dev_get_drvdata(dev);
936 
937 	if (mc->soc->ops && mc->soc->ops->suspend)
938 		return mc->soc->ops->suspend(mc);
939 
940 	return 0;
941 }
942 
943 static int __maybe_unused tegra_mc_resume(struct device *dev)
944 {
945 	struct tegra_mc *mc = dev_get_drvdata(dev);
946 
947 	if (mc->soc->ops && mc->soc->ops->resume)
948 		return mc->soc->ops->resume(mc);
949 
950 	return 0;
951 }
952 
953 static void tegra_mc_sync_state(struct device *dev)
954 {
955 	struct tegra_mc *mc = dev_get_drvdata(dev);
956 
957 	/* check whether ICC provider is registered */
958 	if (mc->provider.dev == dev)
959 		icc_sync_state(dev);
960 }
961 
962 static const struct dev_pm_ops tegra_mc_pm_ops = {
963 	SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
964 };
965 
966 static struct platform_driver tegra_mc_driver = {
967 	.driver = {
968 		.name = "tegra-mc",
969 		.of_match_table = tegra_mc_of_match,
970 		.pm = &tegra_mc_pm_ops,
971 		.suppress_bind_attrs = true,
972 		.sync_state = tegra_mc_sync_state,
973 	},
974 	.prevent_deferred_probe = true,
975 	.probe = tegra_mc_probe,
976 };
977 
978 static int tegra_mc_init(void)
979 {
980 	return platform_driver_register(&tegra_mc_driver);
981 }
982 arch_initcall(tegra_mc_init);
983 
984 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
985 MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
986