attribute page lock and waitqueue functions as sched

trace_sched_blocked_trace in CFS is really useful for debugging via
trace because it tell where the process was stuck on callstack.

For example,
           <...>-6143  ( 6136) [005] d..2    50.278987: sched_blocked_reason: pid=6136 iowait=0 caller=SyS_mprotect+0x88/0x208
           <...>-6136  ( 6136) [005] d..2    50.278990: sched_blocked_reason: pid=6142 iowait=0 caller=do_page_fault+0x1f4/0x3b0
           <...>-6142  ( 6136) [006] d..2    50.278996: sched_blocked_reason: pid=6144 iowait=0 caller=SyS_prctl+0x52c/0xb58
           <...>-6144  ( 6136) [006] d..2    50.279007: sched_blocked_reason: pid=6136 iowait=0 caller=vm_mmap_pgoff+0x74/0x104

However, sometime it gives pointless information like this.
    RenderThread-2322  ( 1805) [006] d.s3    50.319046: sched_blocked_reason: pid=6136 iowait=1 caller=__lock_page_killable+0x17c/0x220
     logd.writer-594   (  587) [002] d.s3    50.334011: sched_blocked_reason: pid=6126 iowait=1 caller=wait_on_page_bit+0x194/0x208
  kworker/u16:13-333   (  333) [007] d.s4    50.343161: sched_blocked_reason: pid=6136 iowait=1 caller=__lock_page_killable+0x17c/0x220

Such wait_on_page_bit, __lock_page_killable are pointless because it doesn't
carry on higher information to identify the callstack.

The reason is page_lock and waitqueue are special synchronization method unlike
other normal locks(mutex, spinlock).
Let's mark them as "__sched" so get_wchan which used in trace_sched_blocked_trace
could detect it and skip them. It will produce more meaningful callstack
function like this.

           <...>-2867  ( 1068) [002] d.h4   124.209701: sched_blocked_reason: pid=329 iowait=0 caller=worker_thread+0x378/0x470
           <...>-2867  ( 1068) [002] d.s3   124.209763: sched_blocked_reason: pid=8454 iowait=1 caller=__filemap_fdatawait_range+0xa0/0x104
           <...>-2867  ( 1068) [002] d.s4   124.209803: sched_blocked_reason: pid=869 iowait=0 caller=worker_thread+0x378/0x470
 ScreenDecoratio-2364  ( 1867) [002] d.s3   124.209973: sched_blocked_reason: pid=8454 iowait=1 caller=f2fs_wait_on_page_writeback+0x84/0xcc
 ScreenDecoratio-2364  ( 1867) [002] d.s4   124.209986: sched_blocked_reason: pid=869 iowait=0 caller=worker_thread+0x378/0x470
           <...>-329   (  329) [000] d..3   124.210435: sched_blocked_reason: pid=538 iowait=0 caller=worker_thread+0x378/0x470
  kworker/u16:13-538   (  538) [007] d..3   124.210450: sched_blocked_reason: pid=6 iowait=0 caller=worker_thread+0x378/0x470

Bug: 144713689
Change-Id: I30397400c5d056946bdfbc86c9ef5f4d7e6c98fe
Signed-off-by: Minchan Kim <minchan@google.com>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
Minchan Kim 2019-11-18 12:56:17 -08:00 committed by azrim
parent d5d8788c4e
commit 4593261b8a
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C
4 changed files with 24 additions and 23 deletions

View File

@ -467,9 +467,9 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
extern void __sched __lock_page(struct page *page);
extern int __sched __lock_page_killable(struct page *page);
extern int __sched __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
@ -482,7 +482,7 @@ static inline int trylock_page(struct page *page)
/*
* lock_page may only be called if we have the page's inode pinned.
*/
static inline void lock_page(struct page *page)
static inline __sched void lock_page(struct page *page)
{
might_sleep();
if (!trylock_page(page))
@ -494,7 +494,7 @@ static inline void lock_page(struct page *page)
* signals. It returns 0 if it locked the page and -EINTR if it was
* killed while waiting.
*/
static inline int lock_page_killable(struct page *page)
static inline __sched int lock_page_killable(struct page *page)
{
might_sleep();
if (!trylock_page(page))
@ -509,7 +509,7 @@ static inline int lock_page_killable(struct page *page)
* Return value and mmap_sem implications depend on flags; see
* __lock_page_or_retry().
*/
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
static inline __sched int lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
might_sleep();
@ -520,8 +520,8 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
* This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
* and should not be used directly.
*/
extern void wait_on_page_bit(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
extern void __sched wait_on_page_bit(struct page *page, int bit_nr);
extern int __sched wait_on_page_bit_killable(struct page *page, int bit_nr);
/*
* Wait for a page to be unlocked.
@ -530,13 +530,13 @@ extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
* ie with increased "page->count" so that the page won't
* go away during the wait..
*/
static inline void wait_on_page_locked(struct page *page)
static inline __sched void wait_on_page_locked(struct page *page)
{
if (PageLocked(page))
wait_on_page_bit(compound_head(page), PG_locked);
}
static inline int wait_on_page_locked_killable(struct page *page)
static inline __sched int wait_on_page_locked_killable(struct page *page)
{
if (!PageLocked(page))
return 0;
@ -546,7 +546,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
/*
* Wait for a page to complete writeback
*/
static inline void wait_on_page_writeback(struct page *page)
static inline __sched void wait_on_page_writeback(struct page *page)
{
if (PageWriteback(page))
wait_on_page_bit(page, PG_writeback);

View File

@ -7,6 +7,7 @@
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/sched/debug.h>
#include <asm/current.h>
#include <uapi/linux/wait.h>
@ -1013,9 +1014,9 @@ void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queu
void prepare_to_wait_exclusive_lifo(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
int __sched woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
#define DEFINE_WAIT_FUNC(name, function) \
struct wait_queue_entry name = { \

View File

@ -404,7 +404,7 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
}
EXPORT_SYMBOL(finish_wait);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
int __sched autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
int ret = default_wake_function(wq_entry, mode, sync, key);
@ -439,7 +439,7 @@ static inline bool is_kthread_should_stop(void)
* } smp_mb(); // C
* remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
*/
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
long __sched wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{
/*
* The below executes an smp_mb(), which matches with the full barrier
@ -464,7 +464,7 @@ long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
}
EXPORT_SYMBOL(wait_woken);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
int __sched woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{
/* Pairs with the smp_store_mb() in wait_woken(). */
smp_mb(); /* C */

View File

@ -1001,7 +1001,7 @@ static void wake_up_page(struct page *page, int bit)
wake_up_page_bit(page, bit);
}
static inline int wait_on_page_bit_common(wait_queue_head_t *q,
static inline __sched int wait_on_page_bit_common(wait_queue_head_t *q,
struct page *page, int bit_nr, int state, bool lock)
{
struct wait_page_queue wait_page;
@ -1073,14 +1073,14 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
return ret;
}
void wait_on_page_bit(struct page *page, int bit_nr)
void __sched wait_on_page_bit(struct page *page, int bit_nr)
{
wait_queue_head_t *q = page_waitqueue(page);
wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
}
EXPORT_SYMBOL(wait_on_page_bit);
int wait_on_page_bit_killable(struct page *page, int bit_nr)
int __sched wait_on_page_bit_killable(struct page *page, int bit_nr)
{
wait_queue_head_t *q = page_waitqueue(page);
return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
@ -1211,7 +1211,7 @@ EXPORT_SYMBOL_GPL(page_endio);
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @__page: the page to lock
*/
void __lock_page(struct page *__page)
void __sched __lock_page(struct page *__page)
{
struct page *page = compound_head(__page);
wait_queue_head_t *q = page_waitqueue(page);
@ -1219,7 +1219,7 @@ void __lock_page(struct page *__page)
}
EXPORT_SYMBOL(__lock_page);
int __lock_page_killable(struct page *__page)
int __sched __lock_page_killable(struct page *__page)
{
struct page *page = compound_head(__page);
wait_queue_head_t *q = page_waitqueue(page);
@ -1238,7 +1238,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
* If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
* with the page locked and the mmap_sem unperturbed.
*/
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
int __sched __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
if (flags & FAULT_FLAG_ALLOW_RETRY) {