diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e458995f2b5da2671af609a9bfae27f1da9dd8b1..76c30f0f7c77c92ec7cfdcc76764477c3880108a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -960,6 +960,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, ext4_ext_show_path(inode, path); + if (orig_path) + *orig_path = path; return path; err: @@ -3260,6 +3262,24 @@ static int ext4_split_extent_at(handle_t *handle, if (err != -ENOSPC && err != -EDQUOT) goto out; + /* + * Update path is required because previous ext4_ext_insert_extent() + * may have freed or reallocated the path. Using EXT4_EX_NOFAIL + * guarantees that ext4_find_extent() will not return -ENOMEM, + * otherwise -ENOMEM will cause a retry in do_writepages(), and a + * WARN_ON may be triggered in ext4_da_update_reserve_space() due to + * an incorrect ee_len causing the i_reserved_data_blocks exception. + */ + path = ext4_find_extent(inode, ee_block, ppath, + flags | EXT4_EX_NOFAIL); + if (IS_ERR(path)) { + EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld", + split, PTR_ERR(path)); + return PTR_ERR(path); + } + depth = ext_depth(inode); + ex = path[depth].p_ext; + if (EXT4_EXT_MAY_ZEROOUT & split_flag) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { if (split_flag & EXT4_EXT_DATA_VALID1) { @@ -3312,7 +3332,7 @@ static int ext4_split_extent_at(handle_t *handle, ext4_ext_dirty(handle, inode, path + path->p_depth); return err; out: - ext4_ext_show_leaf(inode, path); + ext4_ext_show_leaf(inode, *ppath); return err; } diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index f8dd5d972c337035b29060f3f5e7e355ad252e60..661a8544d7817ea19aa9a482db552d8025f42bb9 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -36,7 +36,6 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock, *ppath = NULL; return -ENODATA; } - *ppath = path; return 0; } diff --git a/mm/mmap.c b/mm/mmap.c index 14e6bfdebbb82de95dfc3d463d0865642ca6de3f..04919dc9fbbf40938f8e368b5731f028a75994b3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3040,6 +3040,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; + vm_flags_t vm_flags; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); @@ -3056,15 +3057,60 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; - if (mmap_write_lock_killable(mm)) + if (mmap_read_lock_killable(mm)) return -EINTR; - vma = find_vma(mm, start); + /* + * Look up VMA under read lock first so we can perform the security + * without holding locks (which can be problematic). We reacquire a + * write lock later and check nothing changed underneath us. + */ + vma = vma_lookup(mm, start); + + if (!vma || !(vma->vm_flags & VM_SHARED)) { + mmap_read_unlock(mm); + return -EINVAL; + } - if (!vma || !(vma->vm_flags & VM_SHARED)) + prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; + prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; + prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; + + flags &= MAP_NONBLOCK; + flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; + if (vma->vm_flags & VM_LOCKED) + flags |= MAP_LOCKED; + + /* Save vm_flags used to calculate prot and flags, and recheck later. */ + vm_flags = vma->vm_flags; + file = get_file(vma->vm_file); + + mmap_read_unlock(mm); + + /* Call outside mmap_lock to be consistent with other callers. */ + ret = security_mmap_file(file, prot, flags); + if (ret) { + fput(file); + return ret; + } + + ret = -EINVAL; + + /* OK security check passed, take write lock + let it rip. */ + if (mmap_write_lock_killable(mm)) { + fput(file); + return -EINTR; + } + + vma = vma_lookup(mm, start); + + if (!vma) goto out; - if (start < vma->vm_start) + /* Make sure things didn't change under us. */ + if (vma->vm_flags != vm_flags) + goto out; + if (vma->vm_file != file) goto out; if (start + size > vma->vm_end) { @@ -3089,37 +3135,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, goto out; } - prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; - prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; - prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; - - flags &= MAP_NONBLOCK; - flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; - if (vma->vm_flags & VM_LOCKED) { - struct vm_area_struct *tmp; - flags |= MAP_LOCKED; - - /* drop PG_Mlocked flag for over-mapped range */ - for (tmp = vma; tmp->vm_start >= start + size; - tmp = tmp->vm_next) { - /* - * Split pmd and munlock page on the border - * of the range. - */ - vma_adjust_trans_huge(tmp, start, start + size, 0); - - munlock_vma_pages_range(tmp, - max(tmp->vm_start, start), - min(tmp->vm_end, start + size)); - } - } - - file = get_file(vma->vm_file); ret = do_mmap(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); - fput(file); out: mmap_write_unlock(mm); + fput(file); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7e3eb0af4b0e3cc4f82a6decef9df1fbcf96d37c..ac746c1a26e136f83ff21c8d025f7bd75127541a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -174,7 +174,7 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; - if (rt_idev->dev == dev) { + if (rt_idev && rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(loopback_dev); in6_dev_put(rt_idev); }