mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
sched/headers: Move the PREEMPT_COUNT defines from <linux/sched.h> to <linux/preempt.h>
These defines are not really part of the scheduler's driver API, but are related to the preempt count - so move them to <linux/preempt.h>. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c7af7877ee
commit
d04b0ad37e
@ -55,6 +55,27 @@
|
||||
/* We use the MSB mostly because its available */
|
||||
#define PREEMPT_NEED_RESCHED 0x80000000
|
||||
|
||||
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
|
||||
|
||||
/*
|
||||
* Disable preemption until the scheduler is running -- use an unconditional
|
||||
* value so that it also works on !PREEMPT_COUNT kernels.
|
||||
*
|
||||
* Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
|
||||
*/
|
||||
#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
|
||||
|
||||
/*
|
||||
* Initial preempt_count value; reflects the preempt_count schedule invariant
|
||||
* which states that during context switches:
|
||||
*
|
||||
* preempt_count() == 2*PREEMPT_DISABLE_OFFSET
|
||||
*
|
||||
* Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
|
||||
* Note: See finish_task_switch().
|
||||
*/
|
||||
#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
|
||||
|
||||
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
|
||||
#include <asm/preempt.h>
|
||||
|
||||
|
@ -265,27 +265,6 @@ struct task_cputime_atomic {
|
||||
.sum_exec_runtime = ATOMIC64_INIT(0), \
|
||||
}
|
||||
|
||||
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
|
||||
|
||||
/*
|
||||
* Disable preemption until the scheduler is running -- use an unconditional
|
||||
* value so that it also works on !PREEMPT_COUNT kernels.
|
||||
*
|
||||
* Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
|
||||
*/
|
||||
#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
|
||||
|
||||
/*
|
||||
* Initial preempt_count value; reflects the preempt_count schedule invariant
|
||||
* which states that during context switches:
|
||||
*
|
||||
* preempt_count() == 2*PREEMPT_DISABLE_OFFSET
|
||||
*
|
||||
* Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
|
||||
* Note: See finish_task_switch().
|
||||
*/
|
||||
#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
|
||||
|
||||
/**
|
||||
* struct thread_group_cputimer - thread group interval timer counts
|
||||
* @cputime_atomic: atomic thread group interval timers.
|
||||
|
Loading…
x
Reference in New Issue
Block a user