~ubuntu-branches/debian/experimental/linux-tools/experimental

« back to all changes in this revision

Viewing changes to include/asm-generic/preempt.h

  • Committer: Package Import Robot
  • Author(s): Ben Hutchings
  • Date: 2014-02-02 16:57:49 UTC
  • mfrom: (1.1.10) (0.1.21 sid)
  • Revision ID: package-import@ubuntu.com-20140202165749-tw94o9t1t0a8txk6
Tags: 3.13-1~exp2
Merge changes from sid up to 3.12.6-3

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
#ifndef __ASM_PREEMPT_H
 
2
#define __ASM_PREEMPT_H
 
3
 
 
4
#include <linux/thread_info.h>
 
5
 
 
6
#define PREEMPT_ENABLED (0)
 
7
 
 
8
static __always_inline int preempt_count(void)
 
9
{
 
10
        return current_thread_info()->preempt_count;
 
11
}
 
12
 
 
13
static __always_inline int *preempt_count_ptr(void)
 
14
{
 
15
        return &current_thread_info()->preempt_count;
 
16
}
 
17
 
 
18
static __always_inline void preempt_count_set(int pc)
 
19
{
 
20
        *preempt_count_ptr() = pc;
 
21
}
 
22
 
 
23
/*
 
24
 * must be macros to avoid header recursion hell
 
25
 */
 
26
#define task_preempt_count(p) \
 
27
        (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
 
28
 
 
29
#define init_task_preempt_count(p) do { \
 
30
        task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
 
31
} while (0)
 
32
 
 
33
#define init_idle_preempt_count(p, cpu) do { \
 
34
        task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
 
35
} while (0)
 
36
 
 
37
static __always_inline void set_preempt_need_resched(void)
 
38
{
 
39
}
 
40
 
 
41
static __always_inline void clear_preempt_need_resched(void)
 
42
{
 
43
}
 
44
 
 
45
static __always_inline bool test_preempt_need_resched(void)
 
46
{
 
47
        return false;
 
48
}
 
49
 
 
50
/*
 
51
 * The various preempt_count add/sub methods
 
52
 */
 
53
 
 
54
static __always_inline void __preempt_count_add(int val)
 
55
{
 
56
        *preempt_count_ptr() += val;
 
57
}
 
58
 
 
59
static __always_inline void __preempt_count_sub(int val)
 
60
{
 
61
        *preempt_count_ptr() -= val;
 
62
}
 
63
 
 
64
static __always_inline bool __preempt_count_dec_and_test(void)
 
65
{
 
66
        /*
 
67
         * Because of load-store architectures cannot do per-cpu atomic
 
68
         * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
 
69
         * lost.
 
70
         */
 
71
        return !--*preempt_count_ptr() && tif_need_resched();
 
72
}
 
73
 
 
74
/*
 
75
 * Returns true when we need to resched and can (barring IRQ state).
 
76
 */
 
77
static __always_inline bool should_resched(void)
 
78
{
 
79
        return unlikely(!preempt_count() && tif_need_resched());
 
80
}
 
81
 
 
82
#ifdef CONFIG_PREEMPT
 
83
extern asmlinkage void preempt_schedule(void);
 
84
#define __preempt_schedule() preempt_schedule()
 
85
 
 
86
#ifdef CONFIG_CONTEXT_TRACKING
 
87
extern asmlinkage void preempt_schedule_context(void);
 
88
#define __preempt_schedule_context() preempt_schedule_context()
 
89
#endif
 
90
#endif /* CONFIG_PREEMPT */
 
91
 
 
92
#endif /* __ASM_PREEMPT_H */