1046
1071
struct hlist_head preempt_notifiers;
1050
* fpu_counter contains the number of consecutive context switches
1051
* that the FPU is used. If this is over a threshold, the lazy fpu
1052
* saving becomes unlazy to save the trap. This is an unsigned char
1053
* so that after 256 times the counter wraps and the behavior turns
1054
* lazy again; this to deal with bursty apps that only use FPU for
1057
unsigned char fpu_counter;
1058
1074
#ifdef CONFIG_BLK_DEV_IO_TRACE
1059
1075
unsigned int btrace_seq;
1325
1341
#ifdef CONFIG_NUMA_BALANCING
1326
1342
int numa_scan_seq;
1327
int numa_migrate_seq;
1328
1343
unsigned int numa_scan_period;
1344
unsigned int numa_scan_period_max;
1345
int numa_preferred_nid;
1346
int numa_migrate_deferred;
1347
unsigned long numa_migrate_retry;
1329
1348
u64 node_stamp; /* migration stamp */
1330
1349
struct callback_head numa_work;
1351
struct list_head numa_entry;
1352
struct numa_group *numa_group;
1355
* Exponential decaying average of faults on a per-node basis.
1356
* Scheduling placement decisions are made based on the these counts.
1357
* The values remain static for the duration of a PTE scan
1359
unsigned long *numa_faults;
1360
unsigned long total_numa_faults;
1363
* numa_faults_buffer records faults per node during the current
1364
* scan window. When the scan completes, the counts in numa_faults
1365
* decay and these values are copied.
1367
unsigned long *numa_faults_buffer;
1370
* numa_faults_locality tracks if faults recorded during the last
1371
* scan window were remote/local. The task scan period is adapted
1372
* based on the locality of the faults with different weights
1373
* depending on whether they were shared or private faults
1375
unsigned long numa_faults_locality[2];
1377
unsigned long numa_pages_migrated;
1331
1378
#endif /* CONFIG_NUMA_BALANCING */
1333
1380
struct rcu_head rcu;
1412
1459
/* Future-safe accessor for struct task_struct's cpus_allowed. */
1413
1460
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1462
#define TNF_MIGRATED 0x01
1463
#define TNF_NO_GROUP 0x02
1464
#define TNF_SHARED 0x04
1465
#define TNF_FAULT_LOCAL 0x08
1415
1467
#ifdef CONFIG_NUMA_BALANCING
1416
extern void task_numa_fault(int node, int pages, bool migrated);
1468
extern void task_numa_fault(int last_node, int node, int pages, int flags);
1469
extern pid_t task_numa_group_id(struct task_struct *p);
1417
1470
extern void set_numabalancing_state(bool enabled);
1471
extern void task_numa_free(struct task_struct *p);
1473
extern unsigned int sysctl_numa_balancing_migrate_deferred;
1419
static inline void task_numa_fault(int node, int pages, bool migrated)
1475
static inline void task_numa_fault(int last_node, int node, int pages,
1479
static inline pid_t task_numa_group_id(struct task_struct *p)
1422
1483
static inline void set_numabalancing_state(bool enabled)
1486
static inline void task_numa_free(struct task_struct *p)
1427
1491
static inline struct pid *task_pid(struct task_struct *task)
2475
2534
return task_thread_info(p)->status & TS_POLLING;
2477
static inline void current_set_polling(void)
2536
static inline void __current_set_polling(void)
2479
2538
current_thread_info()->status |= TS_POLLING;
2482
static inline void current_clr_polling(void)
2541
static inline bool __must_check current_set_polling_and_test(void)
2543
__current_set_polling();
2546
* Polling state must be visible before we test NEED_RESCHED,
2547
* paired by resched_task()
2551
return unlikely(tif_need_resched());
2554
static inline void __current_clr_polling(void)
2484
2556
current_thread_info()->status &= ~TS_POLLING;
2485
smp_mb__after_clear_bit();
2559
static inline bool __must_check current_clr_polling_and_test(void)
2561
__current_clr_polling();
2564
* Polling state must be visible before we test NEED_RESCHED,
2565
* paired by resched_task()
2569
return unlikely(tif_need_resched());
2487
2571
#elif defined(TIF_POLLING_NRFLAG)
2488
2572
static inline int tsk_is_polling(struct task_struct *p)
2490
2574
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2492
static inline void current_set_polling(void)
2577
static inline void __current_set_polling(void)
2494
2579
set_thread_flag(TIF_POLLING_NRFLAG);
2497
static inline void current_clr_polling(void)
2582
static inline bool __must_check current_set_polling_and_test(void)
2584
__current_set_polling();
2587
* Polling state must be visible before we test NEED_RESCHED,
2588
* paired by resched_task()
2590
* XXX: assumes set/clear bit are identical barrier wise.
2592
smp_mb__after_clear_bit();
2594
return unlikely(tif_need_resched());
2597
static inline void __current_clr_polling(void)
2499
2599
clear_thread_flag(TIF_POLLING_NRFLAG);
2602
static inline bool __must_check current_clr_polling_and_test(void)
2604
__current_clr_polling();
2607
* Polling state must be visible before we test NEED_RESCHED,
2608
* paired by resched_task()
2610
smp_mb__after_clear_bit();
2612
return unlikely(tif_need_resched());
2502
2616
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2503
static inline void current_set_polling(void) { }
2504
static inline void current_clr_polling(void) { }
2617
static inline void __current_set_polling(void) { }
2618
static inline void __current_clr_polling(void) { }
2620
static inline bool __must_check current_set_polling_and_test(void)
2622
return unlikely(tif_need_resched());
2624
static inline bool __must_check current_clr_polling_and_test(void)
2626
return unlikely(tif_need_resched());
2630
static __always_inline bool need_resched(void)
2632
return unlikely(tif_need_resched());
2508
2636
* Thread group CPU time accounting.