1
#ifndef __ASM_PREEMPT_H
2
#define __ASM_PREEMPT_H
4
#include <linux/thread_info.h>
6
#define PREEMPT_ENABLED (0)
8
static __always_inline int preempt_count(void)
10
return current_thread_info()->preempt_count;
13
static __always_inline int *preempt_count_ptr(void)
15
return ¤t_thread_info()->preempt_count;
18
static __always_inline void preempt_count_set(int pc)
20
*preempt_count_ptr() = pc;
24
* must be macros to avoid header recursion hell
26
#define task_preempt_count(p) \
27
(task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
29
#define init_task_preempt_count(p) do { \
30
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
33
#define init_idle_preempt_count(p, cpu) do { \
34
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
37
static __always_inline void set_preempt_need_resched(void)
41
static __always_inline void clear_preempt_need_resched(void)
45
static __always_inline bool test_preempt_need_resched(void)
51
* The various preempt_count add/sub methods
54
static __always_inline void __preempt_count_add(int val)
56
*preempt_count_ptr() += val;
59
static __always_inline void __preempt_count_sub(int val)
61
*preempt_count_ptr() -= val;
64
static __always_inline bool __preempt_count_dec_and_test(void)
67
* Because of load-store architectures cannot do per-cpu atomic
68
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
71
return !--*preempt_count_ptr() && tif_need_resched();
75
* Returns true when we need to resched and can (barring IRQ state).
77
static __always_inline bool should_resched(void)
79
return unlikely(!preempt_count() && tif_need_resched());
83
extern asmlinkage void preempt_schedule(void);
84
#define __preempt_schedule() preempt_schedule()
86
#ifdef CONFIG_CONTEXT_TRACKING
87
extern asmlinkage void preempt_schedule_context(void);
88
#define __preempt_schedule_context() preempt_schedule_context()
90
#endif /* CONFIG_PREEMPT */
92
#endif /* __ASM_PREEMPT_H */