mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
memcg: remove mem_cgroup_uncharge
Nothing uses mem_cgroup_uncharge apart from mem_cgroup_uncharge_page, (a trivial wrapper around it) and mem_cgroup_end_migration (which does the same as mem_cgroup_uncharge_page). And it often ends up having to lock just to let its caller unlock. Remove it (but leave the silly locking until a later patch). Moved mem_cgroup_cache_charge next to mem_cgroup_charge in memcontrol.h. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7e924aafa4
commit
8289546e57
@ -35,7 +35,8 @@ extern void mm_free_cgroup(struct mm_struct *mm);
|
|||||||
extern struct page_cgroup *page_get_page_cgroup(struct page *page);
|
extern struct page_cgroup *page_get_page_cgroup(struct page *page);
|
||||||
extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
|
extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
|
||||||
gfp_t gfp_mask);
|
gfp_t gfp_mask);
|
||||||
extern void mem_cgroup_uncharge(struct page_cgroup *pc);
|
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
||||||
|
gfp_t gfp_mask);
|
||||||
extern void mem_cgroup_uncharge_page(struct page *page);
|
extern void mem_cgroup_uncharge_page(struct page *page);
|
||||||
extern void mem_cgroup_move_lists(struct page *page, bool active);
|
extern void mem_cgroup_move_lists(struct page *page, bool active);
|
||||||
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
||||||
@ -45,8 +46,6 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
|||||||
struct mem_cgroup *mem_cont,
|
struct mem_cgroup *mem_cont,
|
||||||
int active);
|
int active);
|
||||||
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
|
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
|
||||||
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|
||||||
gfp_t gfp_mask);
|
|
||||||
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
|
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
|
||||||
|
|
||||||
#define mm_match_cgroup(mm, cgroup) \
|
#define mm_match_cgroup(mm, cgroup) \
|
||||||
@ -92,14 +91,16 @@ static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
|
static inline int mem_cgroup_charge(struct page *page,
|
||||||
gfp_t gfp_mask)
|
struct mm_struct *mm, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_uncharge(struct page_cgroup *pc)
|
static inline int mem_cgroup_cache_charge(struct page *page,
|
||||||
|
struct mm_struct *mm, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_uncharge_page(struct page *page)
|
static inline void mem_cgroup_uncharge_page(struct page *page)
|
||||||
@ -110,13 +111,6 @@ static inline void mem_cgroup_move_lists(struct page *page, bool active)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mem_cgroup_cache_charge(struct page *page,
|
|
||||||
struct mm_struct *mm,
|
|
||||||
gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
|
static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -697,20 +697,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Uncharging is always a welcome operation, we never complain, simply
|
* Uncharging is always a welcome operation, we never complain, simply
|
||||||
* uncharge. This routine should be called with lock_page_cgroup held
|
* uncharge.
|
||||||
*/
|
*/
|
||||||
void mem_cgroup_uncharge(struct page_cgroup *pc)
|
void mem_cgroup_uncharge_page(struct page *page)
|
||||||
{
|
{
|
||||||
|
struct page_cgroup *pc;
|
||||||
struct mem_cgroup *mem;
|
struct mem_cgroup *mem;
|
||||||
struct mem_cgroup_per_zone *mz;
|
struct mem_cgroup_per_zone *mz;
|
||||||
struct page *page;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if our page_cgroup is valid
|
* Check if our page_cgroup is valid
|
||||||
*/
|
*/
|
||||||
|
lock_page_cgroup(page);
|
||||||
|
pc = page_get_page_cgroup(page);
|
||||||
if (!pc)
|
if (!pc)
|
||||||
return;
|
goto unlock;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&pc->ref_cnt)) {
|
if (atomic_dec_and_test(&pc->ref_cnt)) {
|
||||||
page = pc->page;
|
page = pc->page;
|
||||||
@ -731,12 +733,8 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
|
|||||||
}
|
}
|
||||||
lock_page_cgroup(page);
|
lock_page_cgroup(page);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void mem_cgroup_uncharge_page(struct page *page)
|
unlock:
|
||||||
{
|
|
||||||
lock_page_cgroup(page);
|
|
||||||
mem_cgroup_uncharge(page_get_page_cgroup(page));
|
|
||||||
unlock_page_cgroup(page);
|
unlock_page_cgroup(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -759,12 +757,7 @@ int mem_cgroup_prepare_migration(struct page *page)
|
|||||||
|
|
||||||
void mem_cgroup_end_migration(struct page *page)
|
void mem_cgroup_end_migration(struct page *page)
|
||||||
{
|
{
|
||||||
struct page_cgroup *pc;
|
mem_cgroup_uncharge_page(page);
|
||||||
|
|
||||||
lock_page_cgroup(page);
|
|
||||||
pc = page_get_page_cgroup(page);
|
|
||||||
mem_cgroup_uncharge(pc);
|
|
||||||
unlock_page_cgroup(page);
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
|
* We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user