aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-04-26 18:15:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-04-26 18:15:33 -0700
commitce944935eef2a56f7efe5c765794e77380651b58 (patch)
tree2223065a8f33e7cf5cdf1bac36aa3366e4a34eff
parent857e17c2ee7c426c1f8998ccd48e2d5a7542148d (diff)
parent89189557b47b35683a27c80ee78aef18248eefb4 (diff)
download96b-common-ce944935eef2a56f7efe5c765794e77380651b58.tar.gz
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "9 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: fs/proc/proc_sysctl.c: Fix a NULL pointer dereference mm/page_alloc.c: fix never set ALLOC_NOFRAGMENT flag mm/page_alloc.c: avoid potential NULL pointer dereference mm, page_alloc: always use a captured page regardless of compaction result mm: do not boost watermarks to avoid fragmentation for the DISCONTIG memory model lib/test_vmalloc.c: do not create cpumask_t variable on stack lib/Kconfig.debug: fix build error without CONFIG_BLOCK zram: pass down the bvec we need to read into in the work struct mm/memory_hotplug.c: drop memory device reference after find_memory_block()
-rw-r--r--Documentation/sysctl/vm.txt16
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--fs/proc/proc_sysctl.c6
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/test_vmalloc.c6
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/page_alloc.c27
7 files changed, 39 insertions, 23 deletions
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6af24cdb25cc..3f13d8599337 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
increase the success rate of future high-order allocations such as SLUB
allocations, THP and hugetlbfs pages.
-To make it sensible with respect to the watermark_scale_factor parameter,
-the unit is in fractions of 10,000. The default value of 15,000 means
-that up to 150% of the high watermark will be reclaimed in the event of
-a pageblock being mixed due to fragmentation. The level of reclaim is
-determined by the number of fragmentation events that occurred in the
-recent past. If this value is smaller than a pageblock then a pageblocks
-worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
-of 0 will disable the feature.
+To make it sensible with respect to the watermark_scale_factor
+parameter, the unit is in fractions of 10,000. The default value of
+15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
+watermark will be reclaimed in the event of a pageblock being mixed due
+to fragmentation. The level of reclaim is determined by the number of
+fragmentation events that occurred in the recent past. If this value is
+smaller than a pageblock then a pageblocks worth of pages will be reclaimed
+(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
=============================================================
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 399cad7daae7..d58a359a6622 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -774,18 +774,18 @@ struct zram_work {
struct zram *zram;
unsigned long entry;
struct bio *bio;
+ struct bio_vec bvec;
};
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
- struct bio_vec bvec;
struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram;
unsigned long entry = zw->entry;
struct bio *bio = zw->bio;
- read_from_bdev_async(zram, &bvec, entry, bio);
+ read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
/*
@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
struct zram_work work;
+ work.bvec = *bvec;
work.zram = zram;
work.entry = entry;
work.bio = bio;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d65390727541..7325baa8f9d4 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
if (--header->nreg)
return;
- if (parent)
+ if (parent) {
put_links(header);
- start_unregistering(header);
+ start_unregistering(header);
+ }
+
if (!--header->count)
kfree_rcu(header, rcu);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 00dbcdbc9a0d..d5a4a4036d2f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1929,6 +1929,7 @@ config TEST_KMOD
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
+ depends on BLOCK
select TEST_LKM
select XFS_FS
select TUN
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 83cdcaa82bf6..f832b095afba 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
static int test_func(void *private)
{
struct test_driver *t = private;
- cpumask_t newmask = CPU_MASK_NONE;
int random_array[ARRAY_SIZE(test_case_array)];
int index, i, j, ret;
ktime_t kt;
u64 delta;
- cpumask_set_cpu(t->cpu, &newmask);
- set_cpus_allowed_ptr(current, &newmask);
+ ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
+ if (ret < 0)
+ pr_err("Failed to set affinity to %d CPU\n", t->cpu);
for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
random_array[i] = i;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0082d699be94..b236069ff0d8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -874,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
*/
mem = find_memory_block(__pfn_to_section(pfn));
nid = mem->nid;
+ put_device(&mem->dev);
/* associate pfn range with the zone */
zone = move_pfn_range(online_type, nid, pfn, nr_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c6ce20aaf80b..c02cff1ed56e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
+ * are not on separate NUMA nodes. Functionally this works but with
+ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
+ * quite small. By default, do not boost watermarks on discontigmem as in
+ * many cases very high-order allocations like THP are likely to be
+ * unsupported and the premature reclaim offsets the advantage of long-term
+ * fragmentation avoidance.
+ */
+int watermark_boost_factor __read_mostly;
+#else
int watermark_boost_factor __read_mostly = 15000;
+#endif
int watermark_scale_factor = 10;
static unsigned long nr_kernel_pages __initdata;
@@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
alloc_flags |= ALLOC_KSWAPD;
#ifdef CONFIG_ZONE_DMA32
+ if (!zone)
+ return alloc_flags;
+
if (zone_idx(zone) != ZONE_NORMAL)
- goto out;
+ return alloc_flags;
/*
* If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
*/
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
if (nr_online_nodes > 1 && !populated_zone(--zone))
- goto out;
+ return alloc_flags;
-out:
+ alloc_flags |= ALLOC_NOFRAGMENT;
#endif /* CONFIG_ZONE_DMA32 */
return alloc_flags;
}
@@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
memalloc_noreclaim_restore(noreclaim_flag);
psi_memstall_leave(&pflags);
- if (*compact_result <= COMPACT_INACTIVE) {
- WARN_ON_ONCE(page);
- return NULL;
- }
-
/*
* At least in one zone compaction wasn't deferred or skipped, so let's
* count a compaction stall