aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@linaro.org>2014-02-05 09:51:50 +0000
committerSteve Capper <steve.capper@linaro.org>2014-02-05 15:16:16 +0000
commit0f1cb8ed1319413ed58ca14a8625727ab5c7382b (patch)
treee6e7dc1e152645926f2db0c014e5626e9c11dcc2
parentbbc8081610176cdc5acfbf3fab5c504746d98593 (diff)
downloadlinux-0f1cb8ed1319413ed58ca14a8625727ab5c7382b.tar.gz
arm64: mm: Enable HAVE_RCU_TABLE_FREE logic
In order to implement fast_get_user_pages we need to ensure that the page table walker is protected from page table pages being freed from under it. This patch enables HAVE_RCU_TABLE_FREE and incorporates it into the existing arm64 TLB logic. Any page table pages belonging to address spaces with multiple users will be call_rcu_sched freed. Meaning that disabling interrupts will block the free and protect the fast gup page walker. Signed-off-by: Steve Capper <steve.capper@linaro.org>
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/tlb.h27
2 files changed, 26 insertions, 2 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6d4dd22ee4b..129bd6af82b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -28,6 +28,7 @@ config ARM64
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
select HAVE_PERF_EVENTS
+ select HAVE_RCU_TABLE_FREE
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 717031a762c..89998239d5e 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -27,12 +27,33 @@
#define MMU_GATHER_BUNDLE 8
+static inline void __tlb_remove_table(void *_table)
+{
+ free_page_and_swap_cache((struct page *)_table);
+}
+
+struct mmu_table_batch {
+ struct rcu_head rcu;
+ unsigned int nr;
+ void *tables[0];
+};
+
+#define MAX_TABLE_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+
+extern void tlb_table_flush(struct mmu_gather *tlb);
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+#define tlb_remove_entry(tlb,entry) tlb_remove_table(tlb,entry)
+
/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
+ struct mmu_table_batch *batch;
+ unsigned int need_flush;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long start, end;
@@ -91,6 +112,7 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
+ tlb_table_flush(tlb);
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
if (tlb->pages == tlb->local)
@@ -109,6 +131,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
tlb->pages = tlb->local;
tlb->nr = 0;
__tlb_alloc_page(tlb);
+ tlb->batch = NULL;
}
static inline void
@@ -172,7 +195,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, pte);
+ tlb_remove_entry(tlb, pte);
}
#ifndef CONFIG_ARM64_64K_PAGES
@@ -180,7 +203,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, virt_to_page(pmdp));
+ tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif