summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c71
1 files changed, 40 insertions, 31 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 024b71da5224..f0c36df1def7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -48,7 +48,8 @@
#include <linux/rcupdate_wait.h>
#include <linux/sched/mm.h>
#include <linux/sysctl.h>
-#include <asm/pgalloc.h>
+#include <linux/pgalloc.h>
+
#include <asm/tlbflush.h>
#include "internal.h"
@@ -3253,11 +3254,47 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
struct file *fpin = NULL;
vm_flags_t vm_flags = vmf->vma->vm_flags;
+ bool force_thp_readahead = false;
unsigned short mmap_miss;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */
- if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ (vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER)
+ force_thp_readahead = true;
+
+ if (!force_thp_readahead) {
+ /*
+ * If we don't want any read-ahead, don't bother.
+ * VM_EXEC case below is already intended for random access.
+ */
+ if ((vm_flags & (VM_RAND_READ | VM_EXEC)) == VM_RAND_READ)
+ return fpin;
+
+ if (!ra->ra_pages)
+ return fpin;
+
+ if (vm_flags & VM_SEQ_READ) {
+ fpin = maybe_unlock_mmap_for_io(vmf, fpin);
+ page_cache_sync_ra(&ractl, ra->ra_pages);
+ return fpin;
+ }
+ }
+
+ if (!(vm_flags & VM_SEQ_READ)) {
+ /* Avoid banging the cache line if not needed */
+ mmap_miss = READ_ONCE(ra->mmap_miss);
+ if (mmap_miss < MMAP_LOTSAMISS * 10)
+ WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
+
+ /*
+ * Do we miss much more than hit in this file? If so,
+ * stop bothering with read-ahead. It will only hurt.
+ */
+ if (mmap_miss > MMAP_LOTSAMISS)
+ return fpin;
+ }
+
+ if (force_thp_readahead) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR;
@@ -3272,34 +3309,6 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
page_cache_ra_order(&ractl, ra);
return fpin;
}
-#endif
-
- /*
- * If we don't want any read-ahead, don't bother. VM_EXEC case below is
- * already intended for random access.
- */
- if ((vm_flags & (VM_RAND_READ | VM_EXEC)) == VM_RAND_READ)
- return fpin;
- if (!ra->ra_pages)
- return fpin;
-
- if (vm_flags & VM_SEQ_READ) {
- fpin = maybe_unlock_mmap_for_io(vmf, fpin);
- page_cache_sync_ra(&ractl, ra->ra_pages);
- return fpin;
- }
-
- /* Avoid banging the cache line if not needed */
- mmap_miss = READ_ONCE(ra->mmap_miss);
- if (mmap_miss < MMAP_LOTSAMISS * 10)
- WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
-
- /*
- * Do we miss much more than hit in this file? If so,
- * stop bothering with read-ahead. It will only hurt.
- */
- if (mmap_miss > MMAP_LOTSAMISS)
- return fpin;
if (vm_flags & VM_EXEC) {
/*