diff options
author | Steve Capper <steve.capper@linaro.org> | 2014-02-11 16:33:54 +0000 |
---|---|---|
committer | Steve Capper <steve.capper@linaro.org> | 2014-09-01 11:52:22 +0100 |
commit | 0e169dc51e89ef1ca2ce9e49441d68764d385bbe (patch) | |
tree | 47250976f843250b78b4535bca14e274c4070a36 | |
parent | c771688e21fcfbe91388b15dabe7d5e597338d98 (diff) | |
download | linux-0e169dc51e89ef1ca2ce9e49441d68764d385bbe.tar.gz |
arm64: mm: Enable HAVE_RCU_TABLE_FREE logic
In order to implement fast_get_user_pages we need to ensure that the
page table walker is protected from page table pages being freed from
under it.
This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging
to address spaces with multiple users will be call_rcu_sched freed.
Meaning that disabling interrupts will block the free and protect the
fast gup page walker.
Signed-off-by: Steve Capper <steve.capper@linaro.org>
Tested-by: Dann Frazier <dann.frazier@canonical.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
[steve.capper@linaro.org: rebased against 3.15-rc8]
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlb.h | 18 |
2 files changed, 17 insertions, 2 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e759af5d709..24203903d93 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -43,6 +43,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE select IRQ_DOMAIN select MODULES_USE_ELF_RELA select NO_BOOTMEM diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 80e2c08900d..8e4dde52290 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -23,6 +23,20 @@ #include <asm-generic/tlb.h> +#include <linux/pagemap.h> +#include <linux/swap.h> + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * There's three ways the TLB shootdown code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). @@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } #ifndef CONFIG_ARM64_64K_PAGES @@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif |