aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/hyperv/hv_apic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-08 13:26:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-08 13:26:55 -0700
commit6f27a64092ae0993e3389081673a258a4c926186 (patch)
treebc8e070ca1238ec43d726d8fc3fca139d0635ce2 /arch/x86/hyperv/hv_apic.c
parent6fb2489d7f09e1a9360d0afec65bcde1a6a472d4 (diff)
parent15279df6f26cf2013d713904b4a0c957ae8abb96 (diff)
downloadlinux-stericsson-6f27a64092ae0993e3389081673a258a4c926186.tar.gz
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: - Prevent an out-of-bounds access in mtrr_write() - Break a circular dependency in the new hyperv IPI acceleration code - Address the build breakage related to inline functions by enforcing gnu_inline and explicitly bringing native_save_fl() out of line, which also adds a set of _ARM_ARG macros which provide 32/64bit safety. - Initialize the shadow CR4 per cpu variable before using it. * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mtrr: Don't copy out-of-bounds data in mtrr_write x86/hyper-v: Fix the circular dependency in IPI enlightenment x86/paravirt: Make native_save_fl() extern inline x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h> compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations x86/mm/32: Initialize the CR4 shadow before __flush_tlb_all()
Diffstat (limited to 'arch/x86/hyperv/hv_apic.c')
-rw-r--r--arch/x86/hyperv/hv_apic.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index f68855499391..402338365651 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
}
+ if (nr_bank < 0)
+ goto ipi_mask_ex_done;
if (!nr_bank)
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+ if (vcpu == VP_INVAL)
+ goto ipi_mask_done;
+
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.