diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a14b0059f17737872da198189789393250d47ca9..e101416c64e54f037c6927fcbc0ca248e150ffb8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -26,6 +26,7 @@ #include #include #include +#include /* check_stable_address_space */ #include @@ -1050,6 +1051,9 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) goto free; mmap_write_lock(mm); + if (check_stable_address_space(mm)) + goto unlock; + vma = find_vma(mm, info->vaddr); if (!vma || !valid_vma(vma, is_register) || file_inode(vma->vm_file) != uprobe->inode) diff --git a/kernel/fork.c b/kernel/fork.c index e1550745ae78e1ec6502fd9dc94f95e75471f81b..698d7829f2e448d5684fad1d8ad0c593c9755c3e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -794,7 +794,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, mt_set_in_rcu(vmi.mas.tree); ksm_fork(mm, oldmm); khugepaged_fork(mm, oldmm); - } else if (mpnt) { + } else { + /* * The entire maple tree has already been duplicated. If the * mmap duplication fails, mark the failure point with @@ -802,8 +803,18 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, * stop releasing VMAs that have not been duplicated after this * point. */ - mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); - mas_store(&vmi.mas, XA_ZERO_ENTRY); + if (mpnt) { + mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); + mas_store(&vmi.mas, XA_ZERO_ENTRY); + /* Avoid OOM iterating a broken tree */ + set_bit(MMF_OOM_SKIP, &mm->flags); + } + /* + * The mm_struct is going to exit, but the locks will be dropped + * first. Set the mm_struct as unstable is advisable as it is + * not fully initialised. + */ + set_bit(MMF_UNSTABLE, &mm->flags); } out: mmap_write_unlock(mm);