diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 88e2cc4d4e7539cb6e9299116ed69eec520bea39..7ae8a9d836b4fb30107453f0d3c743c573085780 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -11,6 +11,7 @@ #include #include #include +#include static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); @@ -30,6 +31,12 @@ static __init void init_cea_offsets(void) unsigned int max_cea; unsigned int i, j; + if (!kaslr_enabled()) { + for_each_possible_cpu(i) + per_cpu(_cea_offset, i) = i; + return; + } + max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE; /* O(sodding terrible) */ diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e02dead5b3b44ec28637a975b0e1d592888a1620..6dddac4548e118f94a608afd1f79a9da0d69ef28 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1491,6 +1491,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pmd_pbha(pmd)) flags |= PM_PBHA_BIT0; + if (page && !PageAnon(page)) + flags |= PM_FILE; if (page && !migration && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; diff --git a/mm/kfence/core.c b/mm/kfence/core.c index f67418a30282ce2cd82ccdcc2b594f0105268485..491b721f66f12986e12e9a129730f838be89964c 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -921,10 +921,14 @@ static const struct file_operations objects_fops = { .release = seq_release, }; -static int __init kfence_debugfs_init(void) +static int kfence_debugfs_init(void) { - struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); + struct dentry *kfence_dir; + if (!READ_ONCE(kfence_enabled)) + return 0; + + kfence_dir = debugfs_create_dir("kfence", NULL); debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); /* Variable kfence_metadata may fail to allocate. */ @@ -1128,6 +1132,8 @@ static int kfence_init_late(void) } kfence_init_enable(); + kfence_debugfs_init(); + return 0; }