xref: /openbmc/linux/arch/riscv/kvm/aia_aplic.c (revision 651bf5b1)
174967aa2SAnup Patel // SPDX-License-Identifier: GPL-2.0
274967aa2SAnup Patel /*
374967aa2SAnup Patel  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
474967aa2SAnup Patel  * Copyright (C) 2022 Ventana Micro Systems Inc.
574967aa2SAnup Patel  *
674967aa2SAnup Patel  * Authors:
774967aa2SAnup Patel  *	Anup Patel <apatel@ventanamicro.com>
874967aa2SAnup Patel  */
974967aa2SAnup Patel 
1074967aa2SAnup Patel #include <linux/kvm_host.h>
1174967aa2SAnup Patel #include <linux/math.h>
1274967aa2SAnup Patel #include <linux/spinlock.h>
1374967aa2SAnup Patel #include <linux/swab.h>
1474967aa2SAnup Patel #include <kvm/iodev.h>
1574967aa2SAnup Patel #include <asm/kvm_aia_aplic.h>
1674967aa2SAnup Patel 
1774967aa2SAnup Patel struct aplic_irq {
1874967aa2SAnup Patel 	raw_spinlock_t lock;
1974967aa2SAnup Patel 	u32 sourcecfg;
2074967aa2SAnup Patel 	u32 state;
2174967aa2SAnup Patel #define APLIC_IRQ_STATE_PENDING		BIT(0)
2274967aa2SAnup Patel #define APLIC_IRQ_STATE_ENABLED		BIT(1)
2374967aa2SAnup Patel #define APLIC_IRQ_STATE_ENPEND		(APLIC_IRQ_STATE_PENDING | \
2474967aa2SAnup Patel 					 APLIC_IRQ_STATE_ENABLED)
2574967aa2SAnup Patel #define APLIC_IRQ_STATE_INPUT		BIT(8)
2674967aa2SAnup Patel 	u32 target;
2774967aa2SAnup Patel };
2874967aa2SAnup Patel 
2974967aa2SAnup Patel struct aplic {
3074967aa2SAnup Patel 	struct kvm_io_device iodev;
3174967aa2SAnup Patel 
3274967aa2SAnup Patel 	u32 domaincfg;
3374967aa2SAnup Patel 	u32 genmsi;
3474967aa2SAnup Patel 
3574967aa2SAnup Patel 	u32 nr_irqs;
3674967aa2SAnup Patel 	u32 nr_words;
3774967aa2SAnup Patel 	struct aplic_irq *irqs;
3874967aa2SAnup Patel };
3974967aa2SAnup Patel 
aplic_read_sourcecfg(struct aplic * aplic,u32 irq)4074967aa2SAnup Patel static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
4174967aa2SAnup Patel {
4274967aa2SAnup Patel 	u32 ret;
4374967aa2SAnup Patel 	unsigned long flags;
4474967aa2SAnup Patel 	struct aplic_irq *irqd;
4574967aa2SAnup Patel 
4674967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
4774967aa2SAnup Patel 		return 0;
4874967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
4974967aa2SAnup Patel 
5074967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
5174967aa2SAnup Patel 	ret = irqd->sourcecfg;
5274967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
5374967aa2SAnup Patel 
5474967aa2SAnup Patel 	return ret;
5574967aa2SAnup Patel }
5674967aa2SAnup Patel 
aplic_write_sourcecfg(struct aplic * aplic,u32 irq,u32 val)5774967aa2SAnup Patel static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
5874967aa2SAnup Patel {
5974967aa2SAnup Patel 	unsigned long flags;
6074967aa2SAnup Patel 	struct aplic_irq *irqd;
6174967aa2SAnup Patel 
6274967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
6374967aa2SAnup Patel 		return;
6474967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
6574967aa2SAnup Patel 
6674967aa2SAnup Patel 	if (val & APLIC_SOURCECFG_D)
6774967aa2SAnup Patel 		val = 0;
6874967aa2SAnup Patel 	else
6974967aa2SAnup Patel 		val &= APLIC_SOURCECFG_SM_MASK;
7074967aa2SAnup Patel 
7174967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
7274967aa2SAnup Patel 	irqd->sourcecfg = val;
7374967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
7474967aa2SAnup Patel }
7574967aa2SAnup Patel 
aplic_read_target(struct aplic * aplic,u32 irq)7674967aa2SAnup Patel static u32 aplic_read_target(struct aplic *aplic, u32 irq)
7774967aa2SAnup Patel {
7874967aa2SAnup Patel 	u32 ret;
7974967aa2SAnup Patel 	unsigned long flags;
8074967aa2SAnup Patel 	struct aplic_irq *irqd;
8174967aa2SAnup Patel 
8274967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
8374967aa2SAnup Patel 		return 0;
8474967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
8574967aa2SAnup Patel 
8674967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
8774967aa2SAnup Patel 	ret = irqd->target;
8874967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
8974967aa2SAnup Patel 
9074967aa2SAnup Patel 	return ret;
9174967aa2SAnup Patel }
9274967aa2SAnup Patel 
aplic_write_target(struct aplic * aplic,u32 irq,u32 val)9374967aa2SAnup Patel static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
9474967aa2SAnup Patel {
9574967aa2SAnup Patel 	unsigned long flags;
9674967aa2SAnup Patel 	struct aplic_irq *irqd;
9774967aa2SAnup Patel 
9874967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
9974967aa2SAnup Patel 		return;
10074967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
10174967aa2SAnup Patel 
10274967aa2SAnup Patel 	val &= APLIC_TARGET_EIID_MASK |
10374967aa2SAnup Patel 	       (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
10474967aa2SAnup Patel 	       (APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
10574967aa2SAnup Patel 
10674967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
10774967aa2SAnup Patel 	irqd->target = val;
10874967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
10974967aa2SAnup Patel }
11074967aa2SAnup Patel 
aplic_read_pending(struct aplic * aplic,u32 irq)11174967aa2SAnup Patel static bool aplic_read_pending(struct aplic *aplic, u32 irq)
11274967aa2SAnup Patel {
11374967aa2SAnup Patel 	bool ret;
11474967aa2SAnup Patel 	unsigned long flags;
11574967aa2SAnup Patel 	struct aplic_irq *irqd;
11674967aa2SAnup Patel 
11774967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
11874967aa2SAnup Patel 		return false;
11974967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
12074967aa2SAnup Patel 
12174967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
12274967aa2SAnup Patel 	ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
12374967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
12474967aa2SAnup Patel 
12574967aa2SAnup Patel 	return ret;
12674967aa2SAnup Patel }
12774967aa2SAnup Patel 
aplic_write_pending(struct aplic * aplic,u32 irq,bool pending)12874967aa2SAnup Patel static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
12974967aa2SAnup Patel {
13074967aa2SAnup Patel 	unsigned long flags, sm;
13174967aa2SAnup Patel 	struct aplic_irq *irqd;
13274967aa2SAnup Patel 
13374967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
13474967aa2SAnup Patel 		return;
13574967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
13674967aa2SAnup Patel 
13774967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
13874967aa2SAnup Patel 
13974967aa2SAnup Patel 	sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
140200cc2c7SAnup Patel 	if (sm == APLIC_SOURCECFG_SM_INACTIVE)
14174967aa2SAnup Patel 		goto skip_write_pending;
14274967aa2SAnup Patel 
143200cc2c7SAnup Patel 	if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
144200cc2c7SAnup Patel 	    sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
145200cc2c7SAnup Patel 		if (!pending)
146200cc2c7SAnup Patel 			goto skip_write_pending;
147200cc2c7SAnup Patel 		if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
148200cc2c7SAnup Patel 		    sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
149200cc2c7SAnup Patel 			goto skip_write_pending;
150200cc2c7SAnup Patel 		if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
151200cc2c7SAnup Patel 		    sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
152200cc2c7SAnup Patel 			goto skip_write_pending;
153200cc2c7SAnup Patel 	}
154200cc2c7SAnup Patel 
15574967aa2SAnup Patel 	if (pending)
15674967aa2SAnup Patel 		irqd->state |= APLIC_IRQ_STATE_PENDING;
15774967aa2SAnup Patel 	else
15874967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_PENDING;
15974967aa2SAnup Patel 
16074967aa2SAnup Patel skip_write_pending:
16174967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
16274967aa2SAnup Patel }
16374967aa2SAnup Patel 
aplic_read_enabled(struct aplic * aplic,u32 irq)16474967aa2SAnup Patel static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
16574967aa2SAnup Patel {
16674967aa2SAnup Patel 	bool ret;
16774967aa2SAnup Patel 	unsigned long flags;
16874967aa2SAnup Patel 	struct aplic_irq *irqd;
16974967aa2SAnup Patel 
17074967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
17174967aa2SAnup Patel 		return false;
17274967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
17374967aa2SAnup Patel 
17474967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
17574967aa2SAnup Patel 	ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
17674967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
17774967aa2SAnup Patel 
17874967aa2SAnup Patel 	return ret;
17974967aa2SAnup Patel }
18074967aa2SAnup Patel 
aplic_write_enabled(struct aplic * aplic,u32 irq,bool enabled)18174967aa2SAnup Patel static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
18274967aa2SAnup Patel {
18374967aa2SAnup Patel 	unsigned long flags;
18474967aa2SAnup Patel 	struct aplic_irq *irqd;
18574967aa2SAnup Patel 
18674967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
18774967aa2SAnup Patel 		return;
18874967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
18974967aa2SAnup Patel 
19074967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
19174967aa2SAnup Patel 	if (enabled)
19274967aa2SAnup Patel 		irqd->state |= APLIC_IRQ_STATE_ENABLED;
19374967aa2SAnup Patel 	else
19474967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
19574967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
19674967aa2SAnup Patel }
19774967aa2SAnup Patel 
aplic_read_input(struct aplic * aplic,u32 irq)19874967aa2SAnup Patel static bool aplic_read_input(struct aplic *aplic, u32 irq)
19974967aa2SAnup Patel {
200*651bf5b1SAnup Patel 	u32 sourcecfg, sm, raw_input, irq_inverted;
20174967aa2SAnup Patel 	struct aplic_irq *irqd;
202*651bf5b1SAnup Patel 	unsigned long flags;
203*651bf5b1SAnup Patel 	bool ret = false;
20474967aa2SAnup Patel 
20574967aa2SAnup Patel 	if (!irq || aplic->nr_irqs <= irq)
20674967aa2SAnup Patel 		return false;
20774967aa2SAnup Patel 	irqd = &aplic->irqs[irq];
20874967aa2SAnup Patel 
20974967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
210*651bf5b1SAnup Patel 
211*651bf5b1SAnup Patel 	sourcecfg = irqd->sourcecfg;
212*651bf5b1SAnup Patel 	if (sourcecfg & APLIC_SOURCECFG_D)
213*651bf5b1SAnup Patel 		goto skip;
214*651bf5b1SAnup Patel 
215*651bf5b1SAnup Patel 	sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
216*651bf5b1SAnup Patel 	if (sm == APLIC_SOURCECFG_SM_INACTIVE)
217*651bf5b1SAnup Patel 		goto skip;
218*651bf5b1SAnup Patel 
219*651bf5b1SAnup Patel 	raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
220*651bf5b1SAnup Patel 	irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
221*651bf5b1SAnup Patel 			sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
222*651bf5b1SAnup Patel 	ret = !!(raw_input ^ irq_inverted);
223*651bf5b1SAnup Patel 
224*651bf5b1SAnup Patel skip:
22574967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
22674967aa2SAnup Patel 
22774967aa2SAnup Patel 	return ret;
22874967aa2SAnup Patel }
22974967aa2SAnup Patel 
aplic_inject_msi(struct kvm * kvm,u32 irq,u32 target)23074967aa2SAnup Patel static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
23174967aa2SAnup Patel {
23274967aa2SAnup Patel 	u32 hart_idx, guest_idx, eiid;
23374967aa2SAnup Patel 
23474967aa2SAnup Patel 	hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
23574967aa2SAnup Patel 	hart_idx &= APLIC_TARGET_HART_IDX_MASK;
23674967aa2SAnup Patel 	guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
23774967aa2SAnup Patel 	guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
23874967aa2SAnup Patel 	eiid = target & APLIC_TARGET_EIID_MASK;
23974967aa2SAnup Patel 	kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
24074967aa2SAnup Patel }
24174967aa2SAnup Patel 
aplic_update_irq_range(struct kvm * kvm,u32 first,u32 last)24274967aa2SAnup Patel static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
24374967aa2SAnup Patel {
24474967aa2SAnup Patel 	bool inject;
24574967aa2SAnup Patel 	u32 irq, target;
24674967aa2SAnup Patel 	unsigned long flags;
24774967aa2SAnup Patel 	struct aplic_irq *irqd;
24874967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
24974967aa2SAnup Patel 
25074967aa2SAnup Patel 	if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
25174967aa2SAnup Patel 		return;
25274967aa2SAnup Patel 
25374967aa2SAnup Patel 	for (irq = first; irq <= last; irq++) {
25474967aa2SAnup Patel 		if (!irq || aplic->nr_irqs <= irq)
25574967aa2SAnup Patel 			continue;
25674967aa2SAnup Patel 		irqd = &aplic->irqs[irq];
25774967aa2SAnup Patel 
25874967aa2SAnup Patel 		raw_spin_lock_irqsave(&irqd->lock, flags);
25974967aa2SAnup Patel 
26074967aa2SAnup Patel 		inject = false;
26174967aa2SAnup Patel 		target = irqd->target;
26274967aa2SAnup Patel 		if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
26374967aa2SAnup Patel 		    APLIC_IRQ_STATE_ENPEND) {
26474967aa2SAnup Patel 			irqd->state &= ~APLIC_IRQ_STATE_PENDING;
26574967aa2SAnup Patel 			inject = true;
26674967aa2SAnup Patel 		}
26774967aa2SAnup Patel 
26874967aa2SAnup Patel 		raw_spin_unlock_irqrestore(&irqd->lock, flags);
26974967aa2SAnup Patel 
27074967aa2SAnup Patel 		if (inject)
27174967aa2SAnup Patel 			aplic_inject_msi(kvm, irq, target);
27274967aa2SAnup Patel 	}
27374967aa2SAnup Patel }
27474967aa2SAnup Patel 
kvm_riscv_aia_aplic_inject(struct kvm * kvm,u32 source,bool level)27574967aa2SAnup Patel int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
27674967aa2SAnup Patel {
27774967aa2SAnup Patel 	u32 target;
27874967aa2SAnup Patel 	bool inject = false, ie;
27974967aa2SAnup Patel 	unsigned long flags;
28074967aa2SAnup Patel 	struct aplic_irq *irqd;
28174967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
28274967aa2SAnup Patel 
28374967aa2SAnup Patel 	if (!aplic || !source || (aplic->nr_irqs <= source))
28474967aa2SAnup Patel 		return -ENODEV;
28574967aa2SAnup Patel 	irqd = &aplic->irqs[source];
28674967aa2SAnup Patel 	ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
28774967aa2SAnup Patel 
28874967aa2SAnup Patel 	raw_spin_lock_irqsave(&irqd->lock, flags);
28974967aa2SAnup Patel 
29074967aa2SAnup Patel 	if (irqd->sourcecfg & APLIC_SOURCECFG_D)
29174967aa2SAnup Patel 		goto skip_unlock;
29274967aa2SAnup Patel 
29374967aa2SAnup Patel 	switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
29474967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_EDGE_RISE:
29574967aa2SAnup Patel 		if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
29674967aa2SAnup Patel 		    !(irqd->state & APLIC_IRQ_STATE_PENDING))
29774967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
29874967aa2SAnup Patel 		break;
29974967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_EDGE_FALL:
30074967aa2SAnup Patel 		if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
30174967aa2SAnup Patel 		    !(irqd->state & APLIC_IRQ_STATE_PENDING))
30274967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
30374967aa2SAnup Patel 		break;
30474967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_LEVEL_HIGH:
30574967aa2SAnup Patel 		if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
30674967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
30774967aa2SAnup Patel 		break;
30874967aa2SAnup Patel 	case APLIC_SOURCECFG_SM_LEVEL_LOW:
30974967aa2SAnup Patel 		if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
31074967aa2SAnup Patel 			irqd->state |= APLIC_IRQ_STATE_PENDING;
31174967aa2SAnup Patel 		break;
31274967aa2SAnup Patel 	}
31374967aa2SAnup Patel 
31474967aa2SAnup Patel 	if (level)
31574967aa2SAnup Patel 		irqd->state |= APLIC_IRQ_STATE_INPUT;
31674967aa2SAnup Patel 	else
31774967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_INPUT;
31874967aa2SAnup Patel 
31974967aa2SAnup Patel 	target = irqd->target;
32074967aa2SAnup Patel 	if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
32174967aa2SAnup Patel 		   APLIC_IRQ_STATE_ENPEND)) {
32274967aa2SAnup Patel 		irqd->state &= ~APLIC_IRQ_STATE_PENDING;
32374967aa2SAnup Patel 		inject = true;
32474967aa2SAnup Patel 	}
32574967aa2SAnup Patel 
32674967aa2SAnup Patel skip_unlock:
32774967aa2SAnup Patel 	raw_spin_unlock_irqrestore(&irqd->lock, flags);
32874967aa2SAnup Patel 
32974967aa2SAnup Patel 	if (inject)
33074967aa2SAnup Patel 		aplic_inject_msi(kvm, source, target);
33174967aa2SAnup Patel 
33274967aa2SAnup Patel 	return 0;
33374967aa2SAnup Patel }
33474967aa2SAnup Patel 
aplic_read_input_word(struct aplic * aplic,u32 word)33574967aa2SAnup Patel static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
33674967aa2SAnup Patel {
33774967aa2SAnup Patel 	u32 i, ret = 0;
33874967aa2SAnup Patel 
33974967aa2SAnup Patel 	for (i = 0; i < 32; i++)
34074967aa2SAnup Patel 		ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
34174967aa2SAnup Patel 
34274967aa2SAnup Patel 	return ret;
34374967aa2SAnup Patel }
34474967aa2SAnup Patel 
aplic_read_pending_word(struct aplic * aplic,u32 word)34574967aa2SAnup Patel static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
34674967aa2SAnup Patel {
34774967aa2SAnup Patel 	u32 i, ret = 0;
34874967aa2SAnup Patel 
34974967aa2SAnup Patel 	for (i = 0; i < 32; i++)
35074967aa2SAnup Patel 		ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
35174967aa2SAnup Patel 
35274967aa2SAnup Patel 	return ret;
35374967aa2SAnup Patel }
35474967aa2SAnup Patel 
aplic_write_pending_word(struct aplic * aplic,u32 word,u32 val,bool pending)35574967aa2SAnup Patel static void aplic_write_pending_word(struct aplic *aplic, u32 word,
35674967aa2SAnup Patel 				     u32 val, bool pending)
35774967aa2SAnup Patel {
35874967aa2SAnup Patel 	u32 i;
35974967aa2SAnup Patel 
36074967aa2SAnup Patel 	for (i = 0; i < 32; i++) {
36174967aa2SAnup Patel 		if (val & BIT(i))
36274967aa2SAnup Patel 			aplic_write_pending(aplic, word * 32 + i, pending);
36374967aa2SAnup Patel 	}
36474967aa2SAnup Patel }
36574967aa2SAnup Patel 
aplic_read_enabled_word(struct aplic * aplic,u32 word)36674967aa2SAnup Patel static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
36774967aa2SAnup Patel {
36874967aa2SAnup Patel 	u32 i, ret = 0;
36974967aa2SAnup Patel 
37074967aa2SAnup Patel 	for (i = 0; i < 32; i++)
37174967aa2SAnup Patel 		ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
37274967aa2SAnup Patel 
37374967aa2SAnup Patel 	return ret;
37474967aa2SAnup Patel }
37574967aa2SAnup Patel 
aplic_write_enabled_word(struct aplic * aplic,u32 word,u32 val,bool enabled)37674967aa2SAnup Patel static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
37774967aa2SAnup Patel 				     u32 val, bool enabled)
37874967aa2SAnup Patel {
37974967aa2SAnup Patel 	u32 i;
38074967aa2SAnup Patel 
38174967aa2SAnup Patel 	for (i = 0; i < 32; i++) {
38274967aa2SAnup Patel 		if (val & BIT(i))
38374967aa2SAnup Patel 			aplic_write_enabled(aplic, word * 32 + i, enabled);
38474967aa2SAnup Patel 	}
38574967aa2SAnup Patel }
38674967aa2SAnup Patel 
aplic_mmio_read_offset(struct kvm * kvm,gpa_t off,u32 * val32)38774967aa2SAnup Patel static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
38874967aa2SAnup Patel {
38974967aa2SAnup Patel 	u32 i;
39074967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
39174967aa2SAnup Patel 
39274967aa2SAnup Patel 	if ((off & 0x3) != 0)
39374967aa2SAnup Patel 		return -EOPNOTSUPP;
39474967aa2SAnup Patel 
39574967aa2SAnup Patel 	if (off == APLIC_DOMAINCFG) {
39674967aa2SAnup Patel 		*val32 = APLIC_DOMAINCFG_RDONLY |
39774967aa2SAnup Patel 			 aplic->domaincfg | APLIC_DOMAINCFG_DM;
39874967aa2SAnup Patel 	} else if ((off >= APLIC_SOURCECFG_BASE) &&
39974967aa2SAnup Patel 		 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
40074967aa2SAnup Patel 		i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
40174967aa2SAnup Patel 		*val32 = aplic_read_sourcecfg(aplic, i);
40274967aa2SAnup Patel 	} else if ((off >= APLIC_SETIP_BASE) &&
40374967aa2SAnup Patel 		   (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
40474967aa2SAnup Patel 		i = (off - APLIC_SETIP_BASE) >> 2;
40574967aa2SAnup Patel 		*val32 = aplic_read_pending_word(aplic, i);
40674967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM) {
40774967aa2SAnup Patel 		*val32 = 0;
40874967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIP_BASE) &&
40974967aa2SAnup Patel 		   (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
41074967aa2SAnup Patel 		i = (off - APLIC_CLRIP_BASE) >> 2;
41174967aa2SAnup Patel 		*val32 = aplic_read_input_word(aplic, i);
41274967aa2SAnup Patel 	} else if (off == APLIC_CLRIPNUM) {
41374967aa2SAnup Patel 		*val32 = 0;
41474967aa2SAnup Patel 	} else if ((off >= APLIC_SETIE_BASE) &&
41574967aa2SAnup Patel 		   (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
41674967aa2SAnup Patel 		i = (off - APLIC_SETIE_BASE) >> 2;
41774967aa2SAnup Patel 		*val32 = aplic_read_enabled_word(aplic, i);
41874967aa2SAnup Patel 	} else if (off == APLIC_SETIENUM) {
41974967aa2SAnup Patel 		*val32 = 0;
42074967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIE_BASE) &&
42174967aa2SAnup Patel 		   (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
42274967aa2SAnup Patel 		*val32 = 0;
42374967aa2SAnup Patel 	} else if (off == APLIC_CLRIENUM) {
42474967aa2SAnup Patel 		*val32 = 0;
42574967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_LE) {
42674967aa2SAnup Patel 		*val32 = 0;
42774967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_BE) {
42874967aa2SAnup Patel 		*val32 = 0;
42974967aa2SAnup Patel 	} else if (off == APLIC_GENMSI) {
43074967aa2SAnup Patel 		*val32 = aplic->genmsi;
43174967aa2SAnup Patel 	} else if ((off >= APLIC_TARGET_BASE) &&
43274967aa2SAnup Patel 		   (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
43374967aa2SAnup Patel 		i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
43474967aa2SAnup Patel 		*val32 = aplic_read_target(aplic, i);
43574967aa2SAnup Patel 	} else
43674967aa2SAnup Patel 		return -ENODEV;
43774967aa2SAnup Patel 
43874967aa2SAnup Patel 	return 0;
43974967aa2SAnup Patel }
44074967aa2SAnup Patel 
aplic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)44174967aa2SAnup Patel static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
44274967aa2SAnup Patel 			   gpa_t addr, int len, void *val)
44374967aa2SAnup Patel {
44474967aa2SAnup Patel 	if (len != 4)
44574967aa2SAnup Patel 		return -EOPNOTSUPP;
44674967aa2SAnup Patel 
44774967aa2SAnup Patel 	return aplic_mmio_read_offset(vcpu->kvm,
44874967aa2SAnup Patel 				      addr - vcpu->kvm->arch.aia.aplic_addr,
44974967aa2SAnup Patel 				      val);
45074967aa2SAnup Patel }
45174967aa2SAnup Patel 
aplic_mmio_write_offset(struct kvm * kvm,gpa_t off,u32 val32)45274967aa2SAnup Patel static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
45374967aa2SAnup Patel {
45474967aa2SAnup Patel 	u32 i;
45574967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
45674967aa2SAnup Patel 
45774967aa2SAnup Patel 	if ((off & 0x3) != 0)
45874967aa2SAnup Patel 		return -EOPNOTSUPP;
45974967aa2SAnup Patel 
46074967aa2SAnup Patel 	if (off == APLIC_DOMAINCFG) {
46174967aa2SAnup Patel 		/* Only IE bit writeable */
46274967aa2SAnup Patel 		aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
46374967aa2SAnup Patel 	} else if ((off >= APLIC_SOURCECFG_BASE) &&
46474967aa2SAnup Patel 		 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
46574967aa2SAnup Patel 		i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
46674967aa2SAnup Patel 		aplic_write_sourcecfg(aplic, i, val32);
46774967aa2SAnup Patel 	} else if ((off >= APLIC_SETIP_BASE) &&
46874967aa2SAnup Patel 		   (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
46974967aa2SAnup Patel 		i = (off - APLIC_SETIP_BASE) >> 2;
47074967aa2SAnup Patel 		aplic_write_pending_word(aplic, i, val32, true);
47174967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM) {
47274967aa2SAnup Patel 		aplic_write_pending(aplic, val32, true);
47374967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIP_BASE) &&
47474967aa2SAnup Patel 		   (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
47574967aa2SAnup Patel 		i = (off - APLIC_CLRIP_BASE) >> 2;
47674967aa2SAnup Patel 		aplic_write_pending_word(aplic, i, val32, false);
47774967aa2SAnup Patel 	} else if (off == APLIC_CLRIPNUM) {
47874967aa2SAnup Patel 		aplic_write_pending(aplic, val32, false);
47974967aa2SAnup Patel 	} else if ((off >= APLIC_SETIE_BASE) &&
48074967aa2SAnup Patel 		   (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
48174967aa2SAnup Patel 		i = (off - APLIC_SETIE_BASE) >> 2;
48274967aa2SAnup Patel 		aplic_write_enabled_word(aplic, i, val32, true);
48374967aa2SAnup Patel 	} else if (off == APLIC_SETIENUM) {
48474967aa2SAnup Patel 		aplic_write_enabled(aplic, val32, true);
48574967aa2SAnup Patel 	} else if ((off >= APLIC_CLRIE_BASE) &&
48674967aa2SAnup Patel 		   (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
48774967aa2SAnup Patel 		i = (off - APLIC_CLRIE_BASE) >> 2;
48874967aa2SAnup Patel 		aplic_write_enabled_word(aplic, i, val32, false);
48974967aa2SAnup Patel 	} else if (off == APLIC_CLRIENUM) {
49074967aa2SAnup Patel 		aplic_write_enabled(aplic, val32, false);
49174967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_LE) {
49274967aa2SAnup Patel 		aplic_write_pending(aplic, val32, true);
49374967aa2SAnup Patel 	} else if (off == APLIC_SETIPNUM_BE) {
49474967aa2SAnup Patel 		aplic_write_pending(aplic, __swab32(val32), true);
49574967aa2SAnup Patel 	} else if (off == APLIC_GENMSI) {
49674967aa2SAnup Patel 		aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
49774967aa2SAnup Patel 					  APLIC_TARGET_GUEST_IDX_SHIFT);
49874967aa2SAnup Patel 		kvm_riscv_aia_inject_msi_by_id(kvm,
49974967aa2SAnup Patel 				val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
50074967aa2SAnup Patel 				val32 & APLIC_TARGET_EIID_MASK);
50174967aa2SAnup Patel 	} else if ((off >= APLIC_TARGET_BASE) &&
50274967aa2SAnup Patel 		   (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
50374967aa2SAnup Patel 		i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
50474967aa2SAnup Patel 		aplic_write_target(aplic, i, val32);
50574967aa2SAnup Patel 	} else
50674967aa2SAnup Patel 		return -ENODEV;
50774967aa2SAnup Patel 
50874967aa2SAnup Patel 	aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
50974967aa2SAnup Patel 
51074967aa2SAnup Patel 	return 0;
51174967aa2SAnup Patel }
51274967aa2SAnup Patel 
aplic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)51374967aa2SAnup Patel static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
51474967aa2SAnup Patel 			    gpa_t addr, int len, const void *val)
51574967aa2SAnup Patel {
51674967aa2SAnup Patel 	if (len != 4)
51774967aa2SAnup Patel 		return -EOPNOTSUPP;
51874967aa2SAnup Patel 
51974967aa2SAnup Patel 	return aplic_mmio_write_offset(vcpu->kvm,
52074967aa2SAnup Patel 				       addr - vcpu->kvm->arch.aia.aplic_addr,
52174967aa2SAnup Patel 				       *((const u32 *)val));
52274967aa2SAnup Patel }
52374967aa2SAnup Patel 
52474967aa2SAnup Patel static struct kvm_io_device_ops aplic_iodoev_ops = {
52574967aa2SAnup Patel 	.read = aplic_mmio_read,
52674967aa2SAnup Patel 	.write = aplic_mmio_write,
52774967aa2SAnup Patel };
52874967aa2SAnup Patel 
kvm_riscv_aia_aplic_set_attr(struct kvm * kvm,unsigned long type,u32 v)529289a007bSAnup Patel int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
530289a007bSAnup Patel {
531289a007bSAnup Patel 	int rc;
532289a007bSAnup Patel 
533289a007bSAnup Patel 	if (!kvm->arch.aia.aplic_state)
534289a007bSAnup Patel 		return -ENODEV;
535289a007bSAnup Patel 
536289a007bSAnup Patel 	rc = aplic_mmio_write_offset(kvm, type, v);
537289a007bSAnup Patel 	if (rc)
538289a007bSAnup Patel 		return rc;
539289a007bSAnup Patel 
540289a007bSAnup Patel 	return 0;
541289a007bSAnup Patel }
542289a007bSAnup Patel 
kvm_riscv_aia_aplic_get_attr(struct kvm * kvm,unsigned long type,u32 * v)543289a007bSAnup Patel int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
544289a007bSAnup Patel {
545289a007bSAnup Patel 	int rc;
546289a007bSAnup Patel 
547289a007bSAnup Patel 	if (!kvm->arch.aia.aplic_state)
548289a007bSAnup Patel 		return -ENODEV;
549289a007bSAnup Patel 
550289a007bSAnup Patel 	rc = aplic_mmio_read_offset(kvm, type, v);
551289a007bSAnup Patel 	if (rc)
552289a007bSAnup Patel 		return rc;
553289a007bSAnup Patel 
554289a007bSAnup Patel 	return 0;
555289a007bSAnup Patel }
556289a007bSAnup Patel 
kvm_riscv_aia_aplic_has_attr(struct kvm * kvm,unsigned long type)557289a007bSAnup Patel int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
558289a007bSAnup Patel {
559289a007bSAnup Patel 	int rc;
560289a007bSAnup Patel 	u32 val;
561289a007bSAnup Patel 
562289a007bSAnup Patel 	if (!kvm->arch.aia.aplic_state)
563289a007bSAnup Patel 		return -ENODEV;
564289a007bSAnup Patel 
565289a007bSAnup Patel 	rc = aplic_mmio_read_offset(kvm, type, &val);
566289a007bSAnup Patel 	if (rc)
567289a007bSAnup Patel 		return rc;
568289a007bSAnup Patel 
569289a007bSAnup Patel 	return 0;
570289a007bSAnup Patel }
571289a007bSAnup Patel 
kvm_riscv_aia_aplic_init(struct kvm * kvm)57274967aa2SAnup Patel int kvm_riscv_aia_aplic_init(struct kvm *kvm)
57374967aa2SAnup Patel {
57474967aa2SAnup Patel 	int i, ret = 0;
57574967aa2SAnup Patel 	struct aplic *aplic;
57674967aa2SAnup Patel 
57774967aa2SAnup Patel 	/* Do nothing if we have zero sources */
57874967aa2SAnup Patel 	if (!kvm->arch.aia.nr_sources)
57974967aa2SAnup Patel 		return 0;
58074967aa2SAnup Patel 
58174967aa2SAnup Patel 	/* Allocate APLIC global state */
58274967aa2SAnup Patel 	aplic = kzalloc(sizeof(*aplic), GFP_KERNEL);
58374967aa2SAnup Patel 	if (!aplic)
58474967aa2SAnup Patel 		return -ENOMEM;
58574967aa2SAnup Patel 	kvm->arch.aia.aplic_state = aplic;
58674967aa2SAnup Patel 
58774967aa2SAnup Patel 	/* Setup APLIC IRQs */
58874967aa2SAnup Patel 	aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
58974967aa2SAnup Patel 	aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
59074967aa2SAnup Patel 	aplic->irqs = kcalloc(aplic->nr_irqs,
59174967aa2SAnup Patel 			      sizeof(*aplic->irqs), GFP_KERNEL);
59274967aa2SAnup Patel 	if (!aplic->irqs) {
59374967aa2SAnup Patel 		ret = -ENOMEM;
59474967aa2SAnup Patel 		goto fail_free_aplic;
59574967aa2SAnup Patel 	}
59674967aa2SAnup Patel 	for (i = 0; i < aplic->nr_irqs; i++)
59774967aa2SAnup Patel 		raw_spin_lock_init(&aplic->irqs[i].lock);
59874967aa2SAnup Patel 
59974967aa2SAnup Patel 	/* Setup IO device */
60074967aa2SAnup Patel 	kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
60174967aa2SAnup Patel 	mutex_lock(&kvm->slots_lock);
60274967aa2SAnup Patel 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
60374967aa2SAnup Patel 				      kvm->arch.aia.aplic_addr,
60474967aa2SAnup Patel 				      KVM_DEV_RISCV_APLIC_SIZE,
60574967aa2SAnup Patel 				      &aplic->iodev);
60674967aa2SAnup Patel 	mutex_unlock(&kvm->slots_lock);
60774967aa2SAnup Patel 	if (ret)
60874967aa2SAnup Patel 		goto fail_free_aplic_irqs;
60974967aa2SAnup Patel 
61074967aa2SAnup Patel 	/* Setup default IRQ routing */
61174967aa2SAnup Patel 	ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
61274967aa2SAnup Patel 	if (ret)
61374967aa2SAnup Patel 		goto fail_unreg_iodev;
61474967aa2SAnup Patel 
61574967aa2SAnup Patel 	return 0;
61674967aa2SAnup Patel 
61774967aa2SAnup Patel fail_unreg_iodev:
61874967aa2SAnup Patel 	mutex_lock(&kvm->slots_lock);
61974967aa2SAnup Patel 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
62074967aa2SAnup Patel 	mutex_unlock(&kvm->slots_lock);
62174967aa2SAnup Patel fail_free_aplic_irqs:
62274967aa2SAnup Patel 	kfree(aplic->irqs);
62374967aa2SAnup Patel fail_free_aplic:
62474967aa2SAnup Patel 	kvm->arch.aia.aplic_state = NULL;
62574967aa2SAnup Patel 	kfree(aplic);
62674967aa2SAnup Patel 	return ret;
62774967aa2SAnup Patel }
62874967aa2SAnup Patel 
kvm_riscv_aia_aplic_cleanup(struct kvm * kvm)62974967aa2SAnup Patel void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
63074967aa2SAnup Patel {
63174967aa2SAnup Patel 	struct aplic *aplic = kvm->arch.aia.aplic_state;
63274967aa2SAnup Patel 
63374967aa2SAnup Patel 	if (!aplic)
63474967aa2SAnup Patel 		return;
63574967aa2SAnup Patel 
63674967aa2SAnup Patel 	mutex_lock(&kvm->slots_lock);
63774967aa2SAnup Patel 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
63874967aa2SAnup Patel 	mutex_unlock(&kvm->slots_lock);
63974967aa2SAnup Patel 
64074967aa2SAnup Patel 	kfree(aplic->irqs);
64174967aa2SAnup Patel 
64274967aa2SAnup Patel 	kvm->arch.aia.aplic_state = NULL;
64374967aa2SAnup Patel 	kfree(aplic);
64474967aa2SAnup Patel }
645