summaryrefslogtreecommitdiff
path: root/virt/kvm/arm/vgic-v3-emul.c
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2014-06-03 10:13:13 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-20 18:25:32 +0100
commit6d52f35af10cf24d59b43f3fd8c938ad23cab543 (patch)
treec20bda289d5090b4a4411a53b0a9a346ffa3df1b /virt/kvm/arm/vgic-v3-emul.c
parent7e5802781c3e109558ddfd8b02155ad24d872ee7 (diff)
downloadkernel-6d52f35af10cf24d59b43f3fd8c938ad23cab543.tar.gz
arm64: KVM: add SGI generation register emulation
While the generation of a (virtual) inter-processor interrupt (SGI) on a GICv2 works by writing to a MMIO register, GICv3 uses the system register ICC_SGI1R_EL1 to trigger them. Add a trap handler function that calls the new SGI register handler in the GICv3 code. As ICC_SRE_EL1.SRE at this point is still always 0, this will not trap yet, but will only be used later when all the data structures have been initialized properly. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm/arm/vgic-v3-emul.c')
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c111
1 files changed, 111 insertions, 0 deletions
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index 8db1db597223..2d2199d85b74 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -841,6 +841,117 @@ void vgic_v3_init_emulation(struct kvm *kvm)
kvm->arch.max_vcpus = KVM_MAX_VCPUS;
}
+/*
+ * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
+ * generation register ICC_SGI1R_EL1) with a given VCPU.
+ * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
+ * return -1.
+ */
+static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
+{
+ unsigned long affinity;
+ int level0;
+
+ /*
+ * Split the current VCPU's MPIDR into affinity level 0 and the
+ * rest as this is what we have to compare against.
+ */
+ affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+ level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
+ affinity &= ~MPIDR_LEVEL_MASK;
+
+ /* bail out if the upper three levels don't match */
+ if (sgi_aff != affinity)
+ return -1;
+
+ /* Is this VCPU's bit set in the mask ? */
+ if (!(sgi_cpu_mask & BIT(level0)))
+ return -1;
+
+ return level0;
+}
+
+#define SGI_AFFINITY_LEVEL(reg, level) \
+ ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
+ >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+
+/**
+ * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
+ * @vcpu: The VCPU requesting a SGI
+ * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
+ *
+ * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
+ * This will trap in sys_regs.c and call this function.
+ * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
+ * target processors as well as a bitmask of 16 Aff0 CPUs.
+ * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
+ * check for matching ones. If this bit is set, we signal all, but not the
+ * calling VCPU.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *c_vcpu;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u16 target_cpus;
+ u64 mpidr;
+ int sgi, c;
+ int vcpu_id = vcpu->vcpu_id;
+ bool broadcast;
+ int updated = 0;
+
+ sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
+ broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+ target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+ mpidr = SGI_AFFINITY_LEVEL(reg, 3);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+
+ /*
+ * We take the dist lock here, because we come from the sysregs
+ * code path and not from the MMIO one (which already takes the lock).
+ */
+ spin_lock(&dist->lock);
+
+ /*
+ * We iterate over all VCPUs to find the MPIDRs matching the request.
+ * If we have handled one CPU, we clear it's bit to detect early
+ * if we are already finished. This avoids iterating through all
+ * VCPUs when most of the times we just signal a single VCPU.
+ */
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+
+ /* Exit early if we have dealt with all requested CPUs */
+ if (!broadcast && target_cpus == 0)
+ break;
+
+ /* Don't signal the calling VCPU */
+ if (broadcast && c == vcpu_id)
+ continue;
+
+ if (!broadcast) {
+ int level0;
+
+ level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+ if (level0 == -1)
+ continue;
+
+ /* remove this matching VCPU from the mask */
+ target_cpus &= ~BIT(level0);
+ }
+
+ /* Flag the SGI as pending */
+ vgic_dist_irq_set_pending(c_vcpu, sgi);
+ updated = 1;
+ kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
+ }
+ if (updated)
+ vgic_update_state(vcpu->kvm);
+ spin_unlock(&dist->lock);
+ if (updated)
+ vgic_kick_vcpus(vcpu->kvm);
+}
+
static int vgic_v3_create(struct kvm_device *dev, u32 type)
{
return kvm_vgic_create(dev->kvm, type);