24
24
struct address_space;
26
#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
26
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
29
32
* Each physical page in the system has a struct page associated with
42
45
/* First double word block */
43
46
unsigned long flags; /* Atomic flags, some possibly
44
47
* updated asynchronously */
45
struct address_space *mapping; /* If low bit clear, points to
46
* inode address_space, or NULL.
47
* If page mapped as anonymous
48
* memory, low bit is set, and
49
* it points to anon_vma object:
50
* see PAGE_MAPPING_ANON below.
49
struct address_space *mapping; /* If low bit clear, points to
50
* inode address_space, or NULL.
51
* If page mapped as anonymous
52
* memory, low bit is set, and
53
* it points to anon_vma object:
54
* see PAGE_MAPPING_ANON below.
56
void *s_mem; /* slab first object */
52
59
/* Second double word */
55
62
pgoff_t index; /* Our offset within mapping. */
56
void *freelist; /* slub/slob first free object */
63
void *freelist; /* sl[aou]b first free object */
57
64
bool pfmemalloc; /* If set by the page allocator,
58
65
* ALLOC_NO_WATERMARKS was set
59
66
* and the low watermark was not
131
139
struct list_head list; /* slobs list of pages */
132
140
struct slab *slab_page; /* slab fields */
141
struct rcu_head rcu_head; /* Used by SLAB
142
* when destroying via RCU
144
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
145
pgtable_t pmd_huge_pte; /* protected by page->ptl */
135
149
/* Remainder is not double word aligned */
141
155
* indicates order in the buddy
142
156
* system if PG_buddy is set.
144
#if USE_SPLIT_PTLOCKS
158
#if USE_SPLIT_PTE_PTLOCKS
159
#if ALLOC_SPLIT_PTLOCKS
147
165
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
148
166
struct page *first_page; /* Compound tail pages */
312
#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
330
#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
313
331
#define SPLIT_RSS_COUNTING
314
332
/* per-thread cached information, */
315
333
struct task_rss_stat {
316
334
int events; /* for synchronization threshold */
317
335
int count[NR_MM_COUNTERS];
319
#endif /* USE_SPLIT_PTLOCKS */
337
#endif /* USE_SPLIT_PTE_PTLOCKS */
321
339
struct mm_rss_stat {
322
340
atomic_long_t count[NR_MM_COUNTERS];
340
358
atomic_t mm_users; /* How many users with user space? */
341
359
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
360
atomic_long_t nr_ptes; /* Page table pages */
342
361
int map_count; /* number of VMAs */
344
363
spinlock_t page_table_lock; /* Protects page tables and some counters */
360
379
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
361
380
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
362
381
unsigned long def_flags;
363
unsigned long nr_ptes; /* Page table pages */
364
382
unsigned long start_code, end_code, start_data, end_data;
365
383
unsigned long start_brk, brk, start_stack;
366
384
unsigned long arg_start, arg_end, env_start, env_end;
406
424
#ifdef CONFIG_MMU_NOTIFIER
407
425
struct mmu_notifier_mm *mmu_notifier_mm;
409
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
427
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
410
428
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
412
430
#ifdef CONFIG_CPUMASK_OFFSTACK
421
439
unsigned long numa_next_scan;
423
/* numa_next_reset is when the PTE scanner period will be reset */
424
unsigned long numa_next_reset;
426
441
/* Restart point for scanning and setting pte_numa */
427
442
unsigned long numa_scan_offset;
429
444
/* numa_scan_seq prevents two threads setting pte_numa */
430
445
int numa_scan_seq;
447
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
433
* The first node a task was scheduled on. If a task runs on
434
* a different node than Make PTE Scan Go Now.
449
* An operation with batched TLB flushing is going on. Anything that
450
* can move process memory needs to flush the TLB when moving a
451
* PROT_NONE or PROT_NUMA mapped page.
453
bool tlb_flush_pending;
438
455
struct uprobes_state uprobes_state;
441
/* first nid will either be a valid NID or one of these values */
442
#define NUMA_PTE_SCAN_INIT -1
443
#define NUMA_PTE_SCAN_ACTIVE -2
445
458
static inline void mm_init_cpumask(struct mm_struct *mm)
447
460
#ifdef CONFIG_CPUMASK_OFFSTACK
455
468
return mm->cpu_vm_mask_var;
471
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
473
* Memory barriers to keep this state in sync are graciously provided by
474
* the page table locks, outside of which no page table modifications happen.
475
* The barriers below prevent the compiler from re-ordering the instructions
476
* around the memory barriers that are already present in the code.
478
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
481
return mm->tlb_flush_pending;
483
static inline void set_tlb_flush_pending(struct mm_struct *mm)
485
mm->tlb_flush_pending = true;
488
* Guarantee that the tlb_flush_pending store does not leak into the
489
* critical section updating the page tables
491
smp_mb__before_spinlock();
493
/* Clearing is done after a TLB flush, which also provides a barrier. */
494
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
497
mm->tlb_flush_pending = false;
500
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
504
static inline void set_tlb_flush_pending(struct mm_struct *mm)
507
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
458
512
#endif /* _LINUX_MM_TYPES_H */