diff --git a/fs/proc/etmem_swap.c b/fs/proc/etmem_swap.c index 86f5cf8c90a1d3811904b73c7a07106272997b18..becc7f7c2fa8cdb78d8f8d8498d8e165fdc0202e 100644 --- a/fs/proc/etmem_swap.c +++ b/fs/proc/etmem_swap.c @@ -70,6 +70,8 @@ static ssize_t swap_pages_write(struct file *file, const char __user *buf, continue; add_page_for_swap(page, &pagelist); + /* release the ref count we get from the get_page_from_vaddr interface */ + put_page(page); } if (!list_empty(&pagelist)) diff --git a/mm/vmscan.c b/mm/vmscan.c index 7cb214d44417aa5085ebcd4157879fee24e87c2b..08213f1c18ee7a0d67eaccc4d7ad6553aa929815 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4623,7 +4623,6 @@ EXPORT_SYMBOL_GPL(check_move_unevictable_pages); #ifdef CONFIG_ETMEM int add_page_for_swap(struct page *page, struct list_head *pagelist) { - int err = -EBUSY; struct page *head; /* If the page is mapped by more than one process, do not swap it */ @@ -4634,19 +4633,15 @@ int add_page_for_swap(struct page *page, struct list_head *pagelist) return -EACCES; head = compound_head(page); - err = isolate_lru_page(head); - if (err) { - put_page(page); - return err; - } - put_page(page); - if (PageUnevictable(page)) - putback_lru_page(page); + if (isolate_lru_page(head)) + return -EBUSY; + + if (PageUnevictable(head)) + putback_lru_page(head); else list_add_tail(&head->lru, pagelist); - err = 0; - return err; + return 0; } EXPORT_SYMBOL_GPL(add_page_for_swap);