xref: /openbmc/linux/arch/riscv/kvm/aia_aplic.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
174967aa2SAnup Patel // SPDX-License-Identifier: GPL-2.0
274967aa2SAnup Patel /*
374967aa2SAnup Patel  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
474967aa2SAnup Patel  * Copyright (C) 2022 Ventana Micro Systems Inc.
574967aa2SAnup Patel  *
674967aa2SAnup Patel  * Authors:
774967aa2SAnup Patel  *	Anup Patel <apatel@ventanamicro.com>
874967aa2SAnup Patel  */
974967aa2SAnup Patel 
1074967aa2SAnup Patel #include <linux/kvm_host.h>
1174967aa2SAnup Patel #include <linux/math.h>
1274967aa2SAnup Patel #include <linux/spinlock.h>
1374967aa2SAnup Patel #include <linux/swab.h>
1474967aa2SAnup Patel #include <kvm/iodev.h>
1574967aa2SAnup Patel #include <asm/kvm_aia_aplic.h>
1674967aa2SAnup Patel 
1774967aa2SAnup Patel struct aplic_irq {
1874967aa2SAnup Patel 	raw_spinlock_t lock;
1974967aa2SAnup Patel 	u32 sourcecfg;
2074967aa2SAnup Patel 	u32 state;
2174967aa2SAnup Patel #define APLIC_IRQ_STATE_PENDING		BIT(0)
2274967aa2SAnup Patel #define APLIC_IRQ_STATE_ENABLED		BIT(1)
2374967aa2SAnup Patel #define APLIC_IRQ_STATE_ENPEND		(APLIC_IRQ_STATE_PENDING | \
2474967aa2SAnup Patel 					 APLIC_IRQ_STATE_ENABLED)
2574967aa2SAnup Patel #define APLIC_IRQ_STATE_INPUT		BIT(8)
2674967aa2SAnup Patel 	u32 target;
2774967aa2SAnup Patel };
2874967aa2SAnup Patel 
2974967aa2SAnup Patel struct aplic {
3074967aa2SAnup Patel 	struct kvm_io_device iodev;
3174967aa2SAnup Patel 
3274967aa2SAnup Patel 	u32 domaincfg;
3374967aa2SAnup Patel 	u32 genmsi;
3474967aa2SAnup Patel 
3574967aa2SAnup Patel 	u32 nr_irqs;
3674967aa2SAnup Patel 	u32 nr_words;
3774967aa2SAnup Patel 	struct aplic_irq *irqs;
3874967aa2SAnup Patel };
3974967aa2SAnup Patel 
aplic_read_sourcecfg(struct aplic * aplic,u32 irq)4074967aa2SAnup Patel static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
4174967aa2SAnup Patel {
4274967aa2SAnup Patel 	u32 ret;
4374967aa2SAnup Patel 	unsigned long flags;
4474967aa2SAnup Patel 	struct aplic_irq *irqd;
4574967aa2SAnup Patel 
4674967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
4774967aa2SAnup Patel 		return 0;
4874967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
4974967aa2SAnup Patel 
5074967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
5174967aa2SAnup Patel 	ret = irqd->sourcecfg;
5274967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
5374967aa2SAnup Patel 
5474967aa2SAnup Patel 	return ret;
5574967aa2SAnup Patel }
5674967aa2SAnup Patel 
aplic_write_sourcecfg(struct aplic * aplic,u32 irq,u32 val)5774967aa2SAnup Patel static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
5874967aa2SAnup Patel {
5974967aa2SAnup Patel 	unsigned long flags;
6074967aa2SAnup Patel 	struct aplic_irq *irqd;
6174967aa2SAnup Patel 
6274967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
6374967aa2SAnup Patel 		return;
6474967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
6574967aa2SAnup Patel 
6674967aa2SAnup Patel 	if (val & APLIC_SOURCECFG_D)
6774967aa2SAnup Patel 		val = 0;
6874967aa2SAnup Patel 	else
6974967aa2SAnup Patel 		val &= APLIC_SOURCECFG_SM_MASK;
7074967aa2SAnup Patel 
7174967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
7274967aa2SAnup Patel 	irqd->sourcecfg = val;
7374967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
7474967aa2SAnup Patel }
7574967aa2SAnup Patel 
aplic_read_target(struct aplic * aplic,u32 irq)7674967aa2SAnup Patel static u32 aplic_read_target(struct aplic *aplic, u32 irq)
7774967aa2SAnup Patel {
7874967aa2SAnup Patel 	u32 ret;
7974967aa2SAnup Patel 	unsigned long flags;
8074967aa2SAnup Patel 	struct aplic_irq *irqd;
8174967aa2SAnup Patel 
8274967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
8374967aa2SAnup Patel 		return 0;
8474967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
8574967aa2SAnup Patel 
8674967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
8774967aa2SAnup Patel 	ret = irqd->target;
8874967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
8974967aa2SAnup Patel 
9074967aa2SAnup Patel 	return ret;
9174967aa2SAnup Patel }
9274967aa2SAnup Patel 
aplic_write_target(struct aplic * aplic,u32 irq,u32 val)9374967aa2SAnup Patel static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
9474967aa2SAnup Patel {
9574967aa2SAnup Patel 	unsigned long flags;
9674967aa2SAnup Patel 	struct aplic_irq *irqd;
9774967aa2SAnup Patel 
9874967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
9974967aa2SAnup Patel 		return;
10074967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
10174967aa2SAnup Patel 
10274967aa2SAnup Patel 	val &= APLIC_TARGET_EIID_MASK |
10374967aa2SAnup Patel 	       (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
10474967aa2SAnup Patel 	       (APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
10574967aa2SAnup Patel 
10674967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
10774967aa2SAnup Patel 	irqd->target = val;
10874967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
10974967aa2SAnup Patel }
11074967aa2SAnup Patel 
aplic_read_pending(struct aplic * aplic,u32 irq)11174967aa2SAnup Patel static bool aplic_read_pending(struct aplic *aplic, u32 irq)
11274967aa2SAnup Patel {
11374967aa2SAnup Patel 	bool ret;
11474967aa2SAnup Patel 	unsigned long flags;
11574967aa2SAnup Patel 	struct aplic_irq *irqd;
11674967aa2SAnup Patel 
11774967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
11874967aa2SAnup Patel 		return false;
11974967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
12074967aa2SAnup Patel 
12174967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
12274967aa2SAnup Patel 	ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
12374967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
12474967aa2SAnup Patel 
12574967aa2SAnup Patel 	return ret;
12674967aa2SAnup Patel }
12774967aa2SAnup Patel 
aplic_write_pending(struct aplic * aplic,u32 irq,bool pending)12874967aa2SAnup Patel static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
12974967aa2SAnup Patel {
13074967aa2SAnup Patel 	unsigned long flags, sm;
13174967aa2SAnup Patel 	struct aplic_irq *irqd;
13274967aa2SAnup Patel 
13374967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
13474967aa2SAnup Patel 		return;
13574967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
13674967aa2SAnup Patel 
13774967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
13874967aa2SAnup Patel 
13974967aa2SAnup Patel 	sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
140200cc2c7SAnup Patel 	if (sm == APLIC_SOURCECFG_SM_INACTIVE)
14174967aa2SAnup Patel 		goto skip_write_pending;
14274967aa2SAnup Patel 
143200cc2c7SAnup Patel 	if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
144200cc2c7SAnup Patel 	    sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
145200cc2c7SAnup Patel 		if (!pending)
146*cd2f9bc8SYong-Xuan Wang 			goto noskip_write_pending;
147200cc2c7SAnup Patel 		if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
148200cc2c7SAnup Patel 		    sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
149200cc2c7SAnup Patel 			goto skip_write_pending;
150200cc2c7SAnup Patel 		if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
151200cc2c7SAnup Patel 		    sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
152200cc2c7SAnup Patel 			goto skip_write_pending;
153200cc2c7SAnup Patel 	}
154200cc2c7SAnup Patel 
155*cd2f9bc8SYong-Xuan Wang noskip_write_pending:
15674967aa2SAnup Patel 	if (pending)
15774967aa2SAnup Patel 		irqd->state |= APLIC_IRQ_STATE_PENDING;
15874967aa2SAnup Patel 	else
15974967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_PENDING;
16074967aa2SAnup Patel 
16174967aa2SAnup Patel skip_write_pending:
16274967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
16374967aa2SAnup Patel }
16474967aa2SAnup Patel 
aplic_read_enabled(struct aplic * aplic,u32 irq)16574967aa2SAnup Patel static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
16674967aa2SAnup Patel {
16774967aa2SAnup Patel 	bool ret;
16874967aa2SAnup Patel 	unsigned long flags;
16974967aa2SAnup Patel 	struct aplic_irq *irqd;
17074967aa2SAnup Patel 
17174967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
17274967aa2SAnup Patel 		return false;
17374967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
17474967aa2SAnup Patel 
17574967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
17674967aa2SAnup Patel 	ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
17774967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
17874967aa2SAnup Patel 
17974967aa2SAnup Patel 	return ret;
18074967aa2SAnup Patel }
18174967aa2SAnup Patel 
aplic_write_enabled(struct aplic * aplic,u32 irq,bool enabled)18274967aa2SAnup Patel static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
18374967aa2SAnup Patel {
18474967aa2SAnup Patel 	unsigned long flags;
18574967aa2SAnup Patel 	struct aplic_irq *irqd;
18674967aa2SAnup Patel 
18774967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
18874967aa2SAnup Patel 		return;
18974967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
19074967aa2SAnup Patel 
19174967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
19274967aa2SAnup Patel 	if (enabled)
19374967aa2SAnup Patel 		irqd->state |= APLIC_IRQ_STATE_ENABLED;
19474967aa2SAnup Patel 	else
19574967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
19674967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
19774967aa2SAnup Patel }
19874967aa2SAnup Patel 
aplic_read_input(struct aplic * aplic,u32 irq)19974967aa2SAnup Patel static bool aplic_read_input(struct aplic *aplic, u32 irq)
20074967aa2SAnup Patel {
201651bf5b1SAnup Patel 	u32 sourcecfg, sm, raw_input, irq_inverted;
20274967aa2SAnup Patel 	struct aplic_irq *irqd;
203651bf5b1SAnup Patel 	unsigned long flags;
204651bf5b1SAnup Patel 	bool ret = false;
20574967aa2SAnup Patel 
20674967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
20774967aa2SAnup Patel 		return false;
20874967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
20974967aa2SAnup Patel 
21074967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
211651bf5b1SAnup Patel 
212651bf5b1SAnup Patel 	sourcecfg = irqd->sourcecfg;
213651bf5b1SAnup Patel 	if (sourcecfg & APLIC_SOURCECFG_D)
214651bf5b1SAnup Patel 		goto skip;
215651bf5b1SAnup Patel 
216651bf5b1SAnup Patel 	sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
217651bf5b1SAnup Patel 	if (sm == APLIC_SOURCECFG_SM_INACTIVE)
218651bf5b1SAnup Patel 		goto skip;
219651bf5b1SAnup Patel 
220651bf5b1SAnup Patel 	raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
221651bf5b1SAnup Patel 	irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
222651bf5b1SAnup Patel 			sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
223651bf5b1SAnup Patel 	ret = !!(raw_input ^ irq_inverted);
224651bf5b1SAnup Patel 
225651bf5b1SAnup Patel skip:
22674967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
22774967aa2SAnup Patel 
22874967aa2SAnup Patel 	return ret;
22974967aa2SAnup Patel }
23074967aa2SAnup Patel 
aplic_inject_msi(struct kvm * kvm,u32 irq,u32 target)23174967aa2SAnup Patel static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
23274967aa2SAnup Patel {
23374967aa2SAnup Patel 	u32 hart_idx, guest_idx, eiid;
23474967aa2SAnup Patel 
23574967aa2SAnup Patel 	hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
23674967aa2SAnup Patel 	hart_idx &= APLIC_TARGET_HART_IDX_MASK;
23774967aa2SAnup Patel 	guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
23874967aa2SAnup Patel 	guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
23974967aa2SAnup Patel 	eiid = target & APLIC_TARGET_EIID_MASK;
24074967aa2SAnup Patel 	kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
24174967aa2SAnup Patel }
24274967aa2SAnup Patel 
aplic_update_irq_range(struct kvm * kvm,u32 first,u32 last)24374967aa2SAnup Patel static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
24474967aa2SAnup Patel {
24574967aa2SAnup Patel 	bool inject;
24674967aa2SAnup Patel 	u32 irq, target;
24774967aa2SAnup Patel 	unsigned long flags;
24874967aa2SAnup Patel 	struct aplic_irq *irqd;
24974967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
25074967aa2SAnup Patel 
25174967aa2SAnup Patel 	if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
25274967aa2SAnup Patel 		return;
25374967aa2SAnup Patel 
25474967aa2SAnup Patel 	for (irq = first; irq <= last; irq++) {
25574967aa2SAnup Patel 		if (!irq || aplic->nr_irqs <= irq)
25674967aa2SAnup Patel 			continue;
25774967aa2SAnup Patel 		irqd = &aplic->irqs[irq];
25874967aa2SAnup Patel 
25974967aa2SAnup Patel 		raw_spin_lock_irqsave(&irqd->lock, flags);
26074967aa2SAnup Patel 
26174967aa2SAnup Patel 		inject = false;
26274967aa2SAnup Patel 		target = irqd->target;
26374967aa2SAnup Patel 		if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
26474967aa2SAnup Patel 		    APLIC_IRQ_STATE_ENPEND) {
26574967aa2SAnup Patel 			irqd->state &= ~APLIC_IRQ_STATE_PENDING;
26674967aa2SAnup Patel 			inject = true;
26774967aa2SAnup Patel 		}
26874967aa2SAnup Patel 
26974967aa2SAnup Patel 		raw_spin_unlock_irqrestore(&irqd->lock, flags);
27074967aa2SAnup Patel 
27174967aa2SAnup Patel 		if (inject)
27274967aa2SAnup Patel 			aplic_inject_msi(kvm, irq, target);
27374967aa2SAnup Patel 	}
27474967aa2SAnup Patel }
27574967aa2SAnup Patel 
kvm_riscv_aia_aplic_inject(struct kvm * kvm,u32 source,bool level)27674967aa2SAnup Patel int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
27774967aa2SAnup Patel {
27874967aa2SAnup Patel 	u32 target;
27974967aa2SAnup Patel 	bool inject = false, ie;
28074967aa2SAnup Patel 	unsigned long flags;
28174967aa2SAnup Patel 	struct aplic_irq *irqd;
28274967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
28374967aa2SAnup Patel 
28474967aa2SAnup Patel 	if (!aplic || !source || (aplic->nr_irqs <= source))
28574967aa2SAnup Patel 		return -ENODEV;
28674967aa2SAnup Patel 	irqd = &aplic->irqs[source];
28774967aa2SAnup Patel 	ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
28874967aa2SAnup Patel 
28974967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
29074967aa2SAnup Patel 
29174967aa2SAnup Patel 	if (irqd->sourcecfg & APLIC_SOURCECFG_D)
29274967aa2SAnup Patel 		goto skip_unlock;
29374967aa2SAnup Patel 
29474967aa2SAnup Patel 	switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
29574967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_EDGE_RISE:
29674967aa2SAnup Patel 		if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
29774967aa2SAnup Patel 		    !(irqd->state & APLIC_IRQ_STATE_PENDING))
29874967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
29974967aa2SAnup Patel 		break;
30074967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_EDGE_FALL:
30174967aa2SAnup Patel 		if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
30274967aa2SAnup Patel 		    !(irqd->state & APLIC_IRQ_STATE_PENDING))
30374967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
30474967aa2SAnup Patel 		break;
30574967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_LEVEL_HIGH:
30674967aa2SAnup Patel 		if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
30774967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
30874967aa2SAnup Patel 		break;
30974967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_LEVEL_LOW:
31074967aa2SAnup Patel 		if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
31174967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
31274967aa2SAnup Patel 		break;
31374967aa2SAnup Patel 	}
31474967aa2SAnup Patel 
31574967aa2SAnup Patel 	if (level)
31674967aa2SAnup Patel 		irqd->state |= APLIC_IRQ_STATE_INPUT;
31774967aa2SAnup Patel 	else
31874967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_INPUT;
31974967aa2SAnup Patel 
32074967aa2SAnup Patel 	target = irqd->target;
32174967aa2SAnup Patel 	if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
32274967aa2SAnup Patel 		   APLIC_IRQ_STATE_ENPEND)) {
32374967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_PENDING;
32474967aa2SAnup Patel 		inject = true;
32574967aa2SAnup Patel 	}
32674967aa2SAnup Patel 
32774967aa2SAnup Patel skip_unlock:
32874967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
32974967aa2SAnup Patel 
33074967aa2SAnup Patel 	if (inject)
33174967aa2SAnup Patel 		aplic_inject_msi(kvm, source, target);
33274967aa2SAnup Patel 
33374967aa2SAnup Patel 	return 0;
33474967aa2SAnup Patel }
33574967aa2SAnup Patel 
aplic_read_input_word(struct aplic * aplic,u32 word)33674967aa2SAnup Patel static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
33774967aa2SAnup Patel {
33874967aa2SAnup Patel 	u32 i, ret = 0;
33974967aa2SAnup Patel 
34074967aa2SAnup Patel 	for (i = 0; i < 32; i++)
34174967aa2SAnup Patel 		ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
34274967aa2SAnup Patel 
34374967aa2SAnup Patel 	return ret;
34474967aa2SAnup Patel }
34574967aa2SAnup Patel 
aplic_read_pending_word(struct aplic * aplic,u32 word)34674967aa2SAnup Patel static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
34774967aa2SAnup Patel {
34874967aa2SAnup Patel 	u32 i, ret = 0;
34974967aa2SAnup Patel 
35074967aa2SAnup Patel 	for (i = 0; i < 32; i++)
35174967aa2SAnup Patel 		ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
35274967aa2SAnup Patel 
35374967aa2SAnup Patel 	return ret;
35474967aa2SAnup Patel }
35574967aa2SAnup Patel 
aplic_write_pending_word(struct aplic * aplic,u32 word,u32 val,bool pending)35674967aa2SAnup Patel static void aplic_write_pending_word(struct aplic *aplic, u32 word,
35774967aa2SAnup Patel 				     u32 val, bool pending)
35874967aa2SAnup Patel {
35974967aa2SAnup Patel 	u32 i;
36074967aa2SAnup Patel 
36174967aa2SAnup Patel 	for (i = 0; i < 32; i++) {
36274967aa2SAnup Patel 		if (val & BIT(i))
36374967aa2SAnup Patel 			aplic_write_pending(aplic, word * 32 + i, pending);
36474967aa2SAnup Patel 	}
36574967aa2SAnup Patel }
36674967aa2SAnup Patel 
aplic_read_enabled_word(struct aplic * aplic,u32 word)36774967aa2SAnup Patel static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
36874967aa2SAnup Patel {
36974967aa2SAnup Patel 	u32 i, ret = 0;
37074967aa2SAnup Patel 
37174967aa2SAnup Patel 	for (i = 0; i < 32; i++)
37274967aa2SAnup Patel 		ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
37374967aa2SAnup Patel 
37474967aa2SAnup Patel 	return ret;
37574967aa2SAnup Patel }
37674967aa2SAnup Patel 
aplic_write_enabled_word(struct aplic * aplic,u32 word,u32 val,bool enabled)37774967aa2SAnup Patel static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
37874967aa2SAnup Patel 				     u32 val, bool enabled)
37974967aa2SAnup Patel {
38074967aa2SAnup Patel 	u32 i;
38174967aa2SAnup Patel 
38274967aa2SAnup Patel 	for (i = 0; i < 32; i++) {
38374967aa2SAnup Patel 		if (val & BIT(i))
38474967aa2SAnup Patel 			aplic_write_enabled(aplic, word * 32 + i, enabled);
38574967aa2SAnup Patel 	}
38674967aa2SAnup Patel }
38774967aa2SAnup Patel 
aplic_mmio_read_offset(struct kvm * kvm,gpa_t off,u32 * val32)38874967aa2SAnup Patel static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
38974967aa2SAnup Patel {
39074967aa2SAnup Patel 	u32 i;
39174967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
39274967aa2SAnup Patel 
39374967aa2SAnup Patel 	if ((off & 0x3) != 0)
39474967aa2SAnup Patel 		return -EOPNOTSUPP;
39574967aa2SAnup Patel 
39674967aa2SAnup Patel 	if (off == APLIC_DOMAINCFG) {
39774967aa2SAnup Patel 		*val32 = APLIC_DOMAINCFG_RDONLY |
39874967aa2SAnup Patel 			 aplic->domaincfg | APLIC_DOMAINCFG_DM;
39974967aa2SAnup Patel 	} else if ((off >= APLIC_SOURCECFG_BASE) &&
40074967aa2SAnup Patel 		 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
40174967aa2SAnup Patel 		i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
40274967aa2SAnup Patel 		*val32 = aplic_read_sourcecfg(aplic, i);
40374967aa2SAnup Patel 	} else if ((off >= APLIC_SETIP_BASE) &&
40474967aa2SAnup Patel 		   (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
40574967aa2SAnup Patel 		i = (off - APLIC_SETIP_BASE) >> 2;
40674967aa2SAnup Patel 		*val32 = aplic_read_pending_word(aplic, i);
40774967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM) {
40874967aa2SAnup Patel 		*val32 = 0;
40974967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIP_BASE) &&
41074967aa2SAnup Patel 		   (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
41174967aa2SAnup Patel 		i = (off - APLIC_CLRIP_BASE) >> 2;
41274967aa2SAnup Patel 		*val32 = aplic_read_input_word(aplic, i);
41374967aa2SAnup Patel 	} else if (off == APLIC_CLRIPNUM) {
41474967aa2SAnup Patel 		*val32 = 0;
41574967aa2SAnup Patel 	} else if ((off >= APLIC_SETIE_BASE) &&
41674967aa2SAnup Patel 		   (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
41774967aa2SAnup Patel 		i = (off - APLIC_SETIE_BASE) >> 2;
41874967aa2SAnup Patel 		*val32 = aplic_read_enabled_word(aplic, i);
41974967aa2SAnup Patel 	} else if (off == APLIC_SETIENUM) {
42074967aa2SAnup Patel 		*val32 = 0;
42174967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIE_BASE) &&
42274967aa2SAnup Patel 		   (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
42374967aa2SAnup Patel 		*val32 = 0;
42474967aa2SAnup Patel 	} else if (off == APLIC_CLRIENUM) {
42574967aa2SAnup Patel 		*val32 = 0;
42674967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_LE) {
42774967aa2SAnup Patel 		*val32 = 0;
42874967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_BE) {
42974967aa2SAnup Patel 		*val32 = 0;
43074967aa2SAnup Patel 	} else if (off == APLIC_GENMSI) {
43174967aa2SAnup Patel 		*val32 = aplic->genmsi;
43274967aa2SAnup Patel 	} else if ((off >= APLIC_TARGET_BASE) &&
43374967aa2SAnup Patel 		   (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
43474967aa2SAnup Patel 		i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
43574967aa2SAnup Patel 		*val32 = aplic_read_target(aplic, i);
43674967aa2SAnup Patel 	} else
43774967aa2SAnup Patel 		return -ENODEV;
43874967aa2SAnup Patel 
43974967aa2SAnup Patel 	return 0;
44074967aa2SAnup Patel }
44174967aa2SAnup Patel 
aplic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)44274967aa2SAnup Patel static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
44374967aa2SAnup Patel 			   gpa_t addr, int len, void *val)
44474967aa2SAnup Patel {
44574967aa2SAnup Patel 	if (len != 4)
44674967aa2SAnup Patel 		return -EOPNOTSUPP;
44774967aa2SAnup Patel 
44874967aa2SAnup Patel 	return aplic_mmio_read_offset(vcpu->kvm,
44974967aa2SAnup Patel 				      addr - vcpu->kvm->arch.aia.aplic_addr,
45074967aa2SAnup Patel 				      val);
45174967aa2SAnup Patel }
45274967aa2SAnup Patel 
aplic_mmio_write_offset(struct kvm * kvm,gpa_t off,u32 val32)45374967aa2SAnup Patel static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
45474967aa2SAnup Patel {
45574967aa2SAnup Patel 	u32 i;
45674967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
45774967aa2SAnup Patel 
45874967aa2SAnup Patel 	if ((off & 0x3) != 0)
45974967aa2SAnup Patel 		return -EOPNOTSUPP;
46074967aa2SAnup Patel 
46174967aa2SAnup Patel 	if (off == APLIC_DOMAINCFG) {
46274967aa2SAnup Patel 		/* Only IE bit writeable */
46374967aa2SAnup Patel 		aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
46474967aa2SAnup Patel 	} else if ((off >= APLIC_SOURCECFG_BASE) &&
46574967aa2SAnup Patel 		 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
46674967aa2SAnup Patel 		i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
46774967aa2SAnup Patel 		aplic_write_sourcecfg(aplic, i, val32);
46874967aa2SAnup Patel 	} else if ((off >= APLIC_SETIP_BASE) &&
46974967aa2SAnup Patel 		   (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
47074967aa2SAnup Patel 		i = (off - APLIC_SETIP_BASE) >> 2;
47174967aa2SAnup Patel 		aplic_write_pending_word(aplic, i, val32, true);
47274967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM) {
47374967aa2SAnup Patel 		aplic_write_pending(aplic, val32, true);
47474967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIP_BASE) &&
47574967aa2SAnup Patel 		   (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
47674967aa2SAnup Patel 		i = (off - APLIC_CLRIP_BASE) >> 2;
47774967aa2SAnup Patel 		aplic_write_pending_word(aplic, i, val32, false);
47874967aa2SAnup Patel 	} else if (off == APLIC_CLRIPNUM) {
47974967aa2SAnup Patel 		aplic_write_pending(aplic, val32, false);
48074967aa2SAnup Patel 	} else if ((off >= APLIC_SETIE_BASE) &&
48174967aa2SAnup Patel 		   (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
48274967aa2SAnup Patel 		i = (off - APLIC_SETIE_BASE) >> 2;
48374967aa2SAnup Patel 		aplic_write_enabled_word(aplic, i, val32, true);
48474967aa2SAnup Patel 	} else if (off == APLIC_SETIENUM) {
48574967aa2SAnup Patel 		aplic_write_enabled(aplic, val32, true);
48674967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIE_BASE) &&
48774967aa2SAnup Patel 		   (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
48874967aa2SAnup Patel 		i = (off - APLIC_CLRIE_BASE) >> 2;
48974967aa2SAnup Patel 		aplic_write_enabled_word(aplic, i, val32, false);
49074967aa2SAnup Patel 	} else if (off == APLIC_CLRIENUM) {
49174967aa2SAnup Patel 		aplic_write_enabled(aplic, val32, false);
49274967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_LE) {
49374967aa2SAnup Patel 		aplic_write_pending(aplic, val32, true);
49474967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_BE) {
49574967aa2SAnup Patel 		aplic_write_pending(aplic, __swab32(val32), true);
49674967aa2SAnup Patel 	} else if (off == APLIC_GENMSI) {
49774967aa2SAnup Patel 		aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
49874967aa2SAnup Patel 					  APLIC_TARGET_GUEST_IDX_SHIFT);
49974967aa2SAnup Patel 		kvm_riscv_aia_inject_msi_by_id(kvm,
50074967aa2SAnup Patel 				val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
50174967aa2SAnup Patel 				val32 & APLIC_TARGET_EIID_MASK);
50274967aa2SAnup Patel 	} else if ((off >= APLIC_TARGET_BASE) &&
50374967aa2SAnup Patel 		   (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
50474967aa2SAnup Patel 		i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
50574967aa2SAnup Patel 		aplic_write_target(aplic, i, val32);
50674967aa2SAnup Patel 	} else
50774967aa2SAnup Patel 		return -ENODEV;
50874967aa2SAnup Patel 
50974967aa2SAnup Patel 	aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
51074967aa2SAnup Patel 
51174967aa2SAnup Patel 	return 0;
51274967aa2SAnup Patel }
51374967aa2SAnup Patel 
aplic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)51474967aa2SAnup Patel static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
51574967aa2SAnup Patel 			    gpa_t addr, int len, const void *val)
51674967aa2SAnup Patel {
51774967aa2SAnup Patel 	if (len != 4)
51874967aa2SAnup Patel 		return -EOPNOTSUPP;
51974967aa2SAnup Patel 
52074967aa2SAnup Patel 	return aplic_mmio_write_offset(vcpu->kvm,
52174967aa2SAnup Patel 				       addr - vcpu->kvm->arch.aia.aplic_addr,
52274967aa2SAnup Patel 				       *((const u32 *)val));
52374967aa2SAnup Patel }
52474967aa2SAnup Patel 
52574967aa2SAnup Patel static struct kvm_io_device_ops aplic_iodoev_ops = {
52674967aa2SAnup Patel 	.read = aplic_mmio_read,
52774967aa2SAnup Patel 	.write = aplic_mmio_write,
52874967aa2SAnup Patel };
52974967aa2SAnup Patel 
kvm_riscv_aia_aplic_set_attr(struct kvm * kvm,unsigned long type,u32 v)530289a007bSAnup Patel int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
531289a007bSAnup Patel {
532289a007bSAnup Patel 	int rc;
533289a007bSAnup Patel 
534289a007bSAnup Patel 	if (!kvm->arch.aia.aplic_state)
535289a007bSAnup Patel 		return -ENODEV;
536289a007bSAnup Patel 
537289a007bSAnup Patel 	rc = aplic_mmio_write_offset(kvm, type, v);
538289a007bSAnup Patel 	if (rc)
539289a007bSAnup Patel 		return rc;
540289a007bSAnup Patel 
541289a007bSAnup Patel 	return 0;
542289a007bSAnup Patel }
543289a007bSAnup Patel 
kvm_riscv_aia_aplic_get_attr(struct kvm * kvm,unsigned long type,u32 * v)544289a007bSAnup Patel int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
545289a007bSAnup Patel {
546289a007bSAnup Patel 	int rc;
547289a007bSAnup Patel 
548289a007bSAnup Patel 	if (!kvm->arch.aia.aplic_state)
549289a007bSAnup Patel 		return -ENODEV;
550289a007bSAnup Patel 
551289a007bSAnup Patel 	rc = aplic_mmio_read_offset(kvm, type, v);
552289a007bSAnup Patel 	if (rc)
553289a007bSAnup Patel 		return rc;
554289a007bSAnup Patel 
555289a007bSAnup Patel 	return 0;
556289a007bSAnup Patel }
557289a007bSAnup Patel 
kvm_riscv_aia_aplic_has_attr(struct kvm * kvm,unsigned long type)558289a007bSAnup Patel int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
559289a007bSAnup Patel {
560289a007bSAnup Patel 	int rc;
561289a007bSAnup Patel 	u32 val;
562289a007bSAnup Patel 
563289a007bSAnup Patel 	if (!kvm->arch.aia.aplic_state)
564289a007bSAnup Patel 		return -ENODEV;
565289a007bSAnup Patel 
566289a007bSAnup Patel 	rc = aplic_mmio_read_offset(kvm, type, &val);
567289a007bSAnup Patel 	if (rc)
568289a007bSAnup Patel 		return rc;
569289a007bSAnup Patel 
570289a007bSAnup Patel 	return 0;
571289a007bSAnup Patel }
572289a007bSAnup Patel 
kvm_riscv_aia_aplic_init(struct kvm * kvm)57374967aa2SAnup Patel int kvm_riscv_aia_aplic_init(struct kvm *kvm)
57474967aa2SAnup Patel {
57574967aa2SAnup Patel 	int i, ret = 0;
57674967aa2SAnup Patel 	struct aplic *aplic;
57774967aa2SAnup Patel 
57874967aa2SAnup Patel 	/* Do nothing if we have zero sources */
57974967aa2SAnup Patel 	if (!kvm->arch.aia.nr_sources)
58074967aa2SAnup Patel 		return 0;
58174967aa2SAnup Patel 
58274967aa2SAnup Patel 	/* Allocate APLIC global state */
58374967aa2SAnup Patel 	aplic = kzalloc(sizeof(*aplic), GFP_KERNEL);
58474967aa2SAnup Patel 	if (!aplic)
58574967aa2SAnup Patel 		return -ENOMEM;
58674967aa2SAnup Patel 	kvm->arch.aia.aplic_state = aplic;
58774967aa2SAnup Patel 
58874967aa2SAnup Patel 	/* Setup APLIC IRQs */
58974967aa2SAnup Patel 	aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
59074967aa2SAnup Patel 	aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
59174967aa2SAnup Patel 	aplic->irqs = kcalloc(aplic->nr_irqs,
59274967aa2SAnup Patel 			      sizeof(*aplic->irqs), GFP_KERNEL);
59374967aa2SAnup Patel 	if (!aplic->irqs) {
59474967aa2SAnup Patel 		ret = -ENOMEM;
59574967aa2SAnup Patel 		goto fail_free_aplic;
59674967aa2SAnup Patel 	}
59774967aa2SAnup Patel 	for (i = 0; i < aplic->nr_irqs; i++)
59874967aa2SAnup Patel 		raw_spin_lock_init(&aplic->irqs[i].lock);
59974967aa2SAnup Patel 
60074967aa2SAnup Patel 	/* Setup IO device */
60174967aa2SAnup Patel 	kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
60274967aa2SAnup Patel 	mutex_lock(&kvm->slots_lock);
60374967aa2SAnup Patel 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
60474967aa2SAnup Patel 				      kvm->arch.aia.aplic_addr,
60574967aa2SAnup Patel 				      KVM_DEV_RISCV_APLIC_SIZE,
60674967aa2SAnup Patel 				      &aplic->iodev);
60774967aa2SAnup Patel 	mutex_unlock(&kvm->slots_lock);
60874967aa2SAnup Patel 	if (ret)
60974967aa2SAnup Patel 		goto fail_free_aplic_irqs;
61074967aa2SAnup Patel 
61174967aa2SAnup Patel 	/* Setup default IRQ routing */
61274967aa2SAnup Patel 	ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
61374967aa2SAnup Patel 	if (ret)
61474967aa2SAnup Patel 		goto fail_unreg_iodev;
61574967aa2SAnup Patel 
61674967aa2SAnup Patel 	return 0;
61774967aa2SAnup Patel 
61874967aa2SAnup Patel fail_unreg_iodev:
61974967aa2SAnup Patel 	mutex_lock(&kvm->slots_lock);
62074967aa2SAnup Patel 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
62174967aa2SAnup Patel 	mutex_unlock(&kvm->slots_lock);
62274967aa2SAnup Patel fail_free_aplic_irqs:
62374967aa2SAnup Patel 	kfree(aplic->irqs);
62474967aa2SAnup Patel fail_free_aplic:
62574967aa2SAnup Patel 	kvm->arch.aia.aplic_state = NULL;
62674967aa2SAnup Patel 	kfree(aplic);
62774967aa2SAnup Patel 	return ret;
62874967aa2SAnup Patel }
62974967aa2SAnup Patel 
kvm_riscv_aia_aplic_cleanup(struct kvm * kvm)63074967aa2SAnup Patel void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
63174967aa2SAnup Patel {
63274967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
63374967aa2SAnup Patel 
63474967aa2SAnup Patel 	if (!aplic)
63574967aa2SAnup Patel 		return;
63674967aa2SAnup Patel 
63774967aa2SAnup Patel 	mutex_lock(&kvm->slots_lock);
63874967aa2SAnup Patel 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
63974967aa2SAnup Patel 	mutex_unlock(&kvm->slots_lock);
64074967aa2SAnup Patel 
64174967aa2SAnup Patel 	kfree(aplic->irqs);
64274967aa2SAnup Patel 
64374967aa2SAnup Patel 	kvm->arch.aia.aplic_state = NULL;
64474967aa2SAnup Patel 	kfree(aplic);
64574967aa2SAnup Patel }
646