mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Revert "mm, pagevec: remove cold parameter for pagevecs"
This reverts commit 9443cf7bcca4bd5a56a070fd422710e3438db00c.
This commit is contained in:
parent
aa4e942d07
commit
2c5929ae4f
@ -1871,7 +1871,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
||||
INIT_LIST_HEAD(&vm->unbound_list);
|
||||
|
||||
list_add_tail(&vm->global_link, &dev_priv->vm_list);
|
||||
pagevec_init(&vm->free_pages);
|
||||
pagevec_init(&vm->free_pages, false);
|
||||
}
|
||||
|
||||
static void i915_address_space_fini(struct i915_address_space *vm)
|
||||
|
@ -310,7 +310,7 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
|
||||
_enter("{%x:%u},%lx-%lx",
|
||||
vnode->fid.vid, vnode->fid.vnode, first, last);
|
||||
|
||||
pagevec_init(&pv);
|
||||
pagevec_init(&pv, 0);
|
||||
|
||||
do {
|
||||
_debug("kill %lx-%lx", first, last);
|
||||
@ -611,7 +611,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
|
||||
|
||||
ASSERT(wb != NULL);
|
||||
|
||||
pagevec_init(&pv);
|
||||
pagevec_init(&pv, 0);
|
||||
|
||||
do {
|
||||
_debug("done %lx-%lx", first, last);
|
||||
|
@ -3803,7 +3803,7 @@ int btree_write_cache_pages(struct address_space *mapping,
|
||||
int scanned = 0;
|
||||
int tag;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
if (wbc->range_cyclic) {
|
||||
index = mapping->writeback_index; /* Start from prev offset */
|
||||
end = -1;
|
||||
@ -3946,7 +3946,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
||||
if (!igrab(inode))
|
||||
return 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
if (wbc->range_cyclic) {
|
||||
index = mapping->writeback_index; /* Start from prev offset */
|
||||
end = -1;
|
||||
|
@ -1649,7 +1649,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
|
||||
struct buffer_head *head;
|
||||
|
||||
end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
|
||||
count = pagevec_count(&pvec);
|
||||
for (i = 0; i < count; i++) {
|
||||
@ -3579,7 +3579,7 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
|
||||
if (length <= 0)
|
||||
return -ENOENT;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
do {
|
||||
unsigned nr_pages, i;
|
||||
|
@ -724,7 +724,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
|
||||
/* calculate the shift required to use bmap */
|
||||
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
|
||||
|
||||
pagevec_init(&pagevec);
|
||||
pagevec_init(&pagevec, 0);
|
||||
|
||||
op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
|
||||
op->op.flags |= FSCACHE_OP_ASYNC;
|
||||
@ -858,7 +858,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op,
|
||||
|
||||
ret = cachefiles_has_space(cache, 0, *nr_pages);
|
||||
if (ret == 0) {
|
||||
pagevec_init(&pagevec);
|
||||
pagevec_init(&pagevec, 0);
|
||||
|
||||
list_for_each_entry(page, pages, lru) {
|
||||
if (pagevec_add(&pagevec, page) == 0)
|
||||
|
@ -672,7 +672,7 @@ static void ceph_release_pages(struct page **pages, int num)
|
||||
struct pagevec pvec;
|
||||
int i;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
for (i = 0; i < num; i++) {
|
||||
if (pagevec_add(&pvec, pages[i]) == 0)
|
||||
pagevec_release(&pvec);
|
||||
@ -803,7 +803,7 @@ static int ceph_writepages_start(struct address_space *mapping,
|
||||
if (fsc->mount_options->wsize < wsize)
|
||||
wsize = fsc->mount_options->wsize;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
|
||||
index = start_index;
|
||||
|
2
fs/dax.c
2
fs/dax.c
@ -789,7 +789,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
|
||||
tag_pages_for_writeback(mapping, start_index, end_index);
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
while (!done) {
|
||||
pvec.nr = find_get_entries_tag(mapping, start_index,
|
||||
PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
|
||||
|
@ -478,7 +478,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
||||
index = startoff >> PAGE_SHIFT;
|
||||
end = (endoff - 1) >> PAGE_SHIFT;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
do {
|
||||
int i;
|
||||
unsigned long nr_pages;
|
||||
|
@ -1765,7 +1765,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
|
||||
ext4_es_remove_extent(inode, start, last - start + 1);
|
||||
}
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
while (index <= end) {
|
||||
nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
|
||||
if (nr_pages == 0)
|
||||
@ -2401,7 +2401,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
|
||||
lblk = start << bpp_bits;
|
||||
pblock = mpd->map.m_pblk;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
while (start <= end) {
|
||||
nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
|
||||
&start, end);
|
||||
@ -2672,7 +2672,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
|
||||
else
|
||||
tag = PAGECACHE_TAG_DIRTY;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
mpd->map.m_len = 0;
|
||||
mpd->next_page = index;
|
||||
while (index <= end) {
|
||||
|
@ -385,7 +385,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
|
||||
};
|
||||
struct blk_plug plug;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
|
||||
|
@ -3043,7 +3043,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
|
||||
int submitted = 0;
|
||||
int i;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
if (get_dirty_pages(mapping->host) <=
|
||||
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
|
||||
|
@ -1511,7 +1511,7 @@ static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
struct page *last_page = NULL;
|
||||
int nr_pages;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
index = 0;
|
||||
|
||||
while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
|
||||
@ -1734,7 +1734,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
return PTR_ERR_OR_ZERO(last_page);
|
||||
}
|
||||
retry:
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
index = 0;
|
||||
|
||||
while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
|
||||
@ -1881,7 +1881,7 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
|
||||
struct pagevec pvec;
|
||||
int nr_pages;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
while ((nr_pages = pagevec_lookup_tag(&pvec,
|
||||
NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
|
||||
@ -1931,7 +1931,7 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
|
||||
int ret = 0;
|
||||
int nr_pages, done = 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
next_step:
|
||||
index = 0;
|
||||
|
@ -1180,7 +1180,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
|
||||
return;
|
||||
}
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
next = 0;
|
||||
do {
|
||||
if (!pagevec_lookup(&pvec, mapping, &next))
|
||||
|
@ -371,7 +371,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
|
||||
int range_whole = 0;
|
||||
int tag;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
if (wbc->range_cyclic) {
|
||||
writeback_index = mapping->writeback_index; /* prev offset */
|
||||
index = writeback_index;
|
||||
|
@ -422,7 +422,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
||||
|
||||
memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
|
||||
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
next = start;
|
||||
while (next < end) {
|
||||
/*
|
||||
|
@ -2156,7 +2156,7 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
|
||||
level++)
|
||||
INIT_LIST_HEAD(&lists[level]);
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
while (pagevec_lookup_tag(&pvec, btcache, &index,
|
||||
PAGECACHE_TAG_DIRTY)) {
|
||||
|
@ -255,7 +255,7 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
|
||||
pgoff_t index = 0;
|
||||
int err = 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
repeat:
|
||||
if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
|
||||
return 0;
|
||||
@ -309,7 +309,7 @@ void nilfs_copy_back_pages(struct address_space *dmap,
|
||||
pgoff_t index = 0;
|
||||
int err;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
repeat:
|
||||
n = pagevec_lookup(&pvec, smap, &index);
|
||||
if (!n)
|
||||
@ -373,7 +373,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
|
||||
unsigned int i;
|
||||
pgoff_t index = 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
while (pagevec_lookup_tag(&pvec, mapping, &index,
|
||||
PAGECACHE_TAG_DIRTY)) {
|
||||
@ -518,7 +518,7 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
|
||||
index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
|
||||
nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
repeat:
|
||||
pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
|
||||
|
@ -708,7 +708,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
|
||||
index = start >> PAGE_SHIFT;
|
||||
last = end >> PAGE_SHIFT;
|
||||
}
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
repeat:
|
||||
if (unlikely(index > last) ||
|
||||
!pagevec_lookup_range_tag(&pvec, mapping, &index, last,
|
||||
@ -753,7 +753,7 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
|
||||
unsigned int i;
|
||||
pgoff_t index = 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
while (pagevec_lookup_tag(&pvec, mapping, &index,
|
||||
PAGECACHE_TAG_DIRTY)) {
|
||||
|
@ -17,6 +17,7 @@ struct address_space;
|
||||
|
||||
struct pagevec {
|
||||
unsigned long nr;
|
||||
bool cold;
|
||||
bool drained;
|
||||
struct page *pages[PAGEVEC_SIZE];
|
||||
};
|
||||
@ -50,9 +51,10 @@ static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
|
||||
return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
|
||||
}
|
||||
|
||||
static inline void pagevec_init(struct pagevec *pvec)
|
||||
static inline void pagevec_init(struct pagevec *pvec, int cold)
|
||||
{
|
||||
pvec->nr = 0;
|
||||
pvec->cold = cold;
|
||||
pvec->drained = false;
|
||||
}
|
||||
|
||||
|
@ -430,7 +430,7 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
|
||||
if (end_byte < start_byte)
|
||||
return;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
while (index <= end) {
|
||||
unsigned i;
|
||||
|
||||
|
@ -289,7 +289,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
|
||||
struct pagevec pvec_putback;
|
||||
int pgrescued = 0;
|
||||
|
||||
pagevec_init(&pvec_putback);
|
||||
pagevec_init(&pvec_putback, 0);
|
||||
|
||||
/* Phase 1: page isolation */
|
||||
spin_lock_irq(zone_lru_lock(zone));
|
||||
@ -450,7 +450,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
|
||||
struct pagevec pvec;
|
||||
struct zone *zone;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
/*
|
||||
* Although FOLL_DUMP is intended for get_dump_page(),
|
||||
* it just so happens that its special treatment of the
|
||||
|
@ -2174,7 +2174,7 @@ int write_cache_pages(struct address_space *mapping,
|
||||
int range_whole = 0;
|
||||
int tag;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
if (wbc->range_cyclic) {
|
||||
writeback_index = mapping->writeback_index; /* prev offset */
|
||||
index = writeback_index;
|
||||
|
@ -765,7 +765,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
|
||||
pgoff_t indices[PAGEVEC_SIZE];
|
||||
pgoff_t index = 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
/*
|
||||
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
|
||||
*/
|
||||
@ -808,7 +808,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
if (lend == -1)
|
||||
end = -1; /* unsigned, so actually very big */
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
index = start;
|
||||
while (index < end) {
|
||||
pvec.nr = find_get_entries(mapping, index,
|
||||
@ -2612,7 +2612,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
|
||||
bool done = false;
|
||||
int i;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
pvec.nr = 1; /* start small: we may be there already */
|
||||
while (!done) {
|
||||
pvec.nr = find_get_entries(mapping, index,
|
||||
|
@ -210,7 +210,7 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
|
||||
}
|
||||
if (pgdat)
|
||||
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
|
||||
release_pages(pvec->pages, pvec->nr, 0);
|
||||
release_pages(pvec->pages, pvec->nr, pvec->cold);
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
|
||||
@ -837,7 +837,7 @@ void __pagevec_release(struct pagevec *pvec)
|
||||
lru_add_drain();
|
||||
pvec->drained = true;
|
||||
}
|
||||
release_pages(pvec->pages, pagevec_count(pvec), 0);
|
||||
release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
|
||||
pagevec_reinit(pvec);
|
||||
}
|
||||
EXPORT_SYMBOL(__pagevec_release);
|
||||
|
@ -287,7 +287,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
||||
else
|
||||
end = (lend + 1) >> PAGE_SHIFT;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
index = start;
|
||||
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE),
|
||||
@ -504,7 +504,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||
unsigned long count = 0;
|
||||
int i;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
||||
indices)) {
|
||||
@ -634,7 +634,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
|
||||
goto out;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
pagevec_init(&pvec, 0);
|
||||
index = start;
|
||||
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
|
||||
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
|
||||
|
Loading…
x
Reference in New Issue
Block a user