diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 4572cb05795188beb5a54f732427a45e67dc0c1f..f125b5642873037b4401f33d4eedd6ea6634dcc2 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -750,6 +750,12 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, ext4_write_lock_xattr(inode, &no_expand); BUG_ON(!ext4_has_inline_data(inode)); + /* + * ei->i_inline_off may have changed since ext4_write_begin() + * called ext4_try_to_write_inline_data() + */ + (void) ext4_find_inline_data_nolock(inode); + kaddr = kmap_atomic(page); ext4_write_inline_data(inode, &iloc, kaddr, pos, len); kunmap_atomic(kaddr); diff --git a/ipc/msg.c b/ipc/msg.c index ac4de3f672615f928f3095178b670464f63f4428..9a1ff5669cfbd0becd305320230d0004009c918c 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -137,7 +137,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int msgflg = params->flg; - msq = kvmalloc(sizeof(*msq), GFP_KERNEL); + msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); if (unlikely(!msq)) return -ENOMEM; diff --git a/ipc/sem.c b/ipc/sem.c index 2bf535dd0b93481a631165d1894d3bb25170c30f..171881795a302d5606a1e4b41d532d14a329c805 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -494,7 +494,7 @@ static struct sem_array *sem_alloc(size_t nsems) return NULL; size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); - sma = kvmalloc(size, GFP_KERNEL); + sma = kvmalloc(size, GFP_KERNEL_ACCOUNT); if (unlikely(!sma)) return NULL; @@ -1813,7 +1813,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) undo_list = current->sysvsem.undo_list; if (!undo_list) { - undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); if (undo_list == NULL) return -ENOMEM; spin_lock_init(&undo_list->lock); @@ -1897,7 +1897,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) rcu_read_unlock(); /* step 2: allocate new undo structure */ - new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT); if (!new) { ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); @@ -1961,7 +1961,8 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, if (nsops > ns->sc_semopm) return -E2BIG; if (nsops > SEMOPM_FAST) { - sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + sops = kvmalloc_array(nsops, sizeof(*sops), + GFP_KERNEL_ACCOUNT); if (sops == NULL) return -ENOMEM; } diff --git a/ipc/shm.c b/ipc/shm.c index 1c65fb357395eace45f25a7c6a37d91ebe30a722..904fe53256af03cddbc719ac5be6f27439ee1643 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -619,7 +619,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = kvmalloc(sizeof(*shp), GFP_KERNEL); + shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); if (unlikely(!shp)) return -ENOMEM; diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index ba6fc3c1186b97c6fd7bb01d4de691cdb254f0ec..e91838a7b8497608448b1d6f7c1d0a86eb930707 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -98,6 +98,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; + newdp->dccps_hc_rx_ccid = NULL; + newdp->dccps_hc_tx_ccid = NULL; newdp->dccps_service = dreq->dreq_service; newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9312c7e750ed33452bed6930866497e1ec23dbf0..14ed735ca29225aed63ab7145e2d52c6e294db76 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1490,6 +1490,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) return true; } +static int kvm_try_get_pfn(kvm_pfn_t pfn) +{ + if (kvm_is_reserved_pfn(pfn)) + return 1; + return get_page_unless_zero(pfn_to_page(pfn)); +} + static int hva_to_pfn_remapped(struct vm_area_struct *vma, unsigned long addr, bool *async, bool write_fault, bool *writable, @@ -1532,11 +1539,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * Whoever called remap_pfn_range is also going to call e.g. * unmap_mapping_range before the underlying pages are freed, * causing a call to our MMU notifier. + * + * Certain IO or PFNMAP mappings can be backed with valid + * struct pages, but be allocated without refcounting e.g., + * tail pages of non-compound higher order allocations, which + * would then underflow the refcount when the caller does the + * required put_page. Don't allow those pages here. */ - kvm_get_pfn(pfn); + if (!kvm_try_get_pfn(pfn)) + r = -EFAULT; *p_pfn = pfn; - return 0; + + return r; } /*