~ubuntu-branches/debian/experimental/linux-tools/experimental

« back to all changes in this revision

Viewing changes to include/linux/mm_types.h

  • Committer: Package Import Robot
  • Author(s): Ben Hutchings
  • Date: 2014-02-02 16:57:49 UTC
  • mfrom: (1.1.10) (0.1.21 sid)
  • Revision ID: package-import@ubuntu.com-20140202165749-tw94o9t1t0a8txk6
Tags: 3.13-1~exp2
Merge changes from sid up to 3.12.6-3

Show diffs side-by-side

added added

removed removed

Lines of Context:
23
23
 
24
24
struct address_space;
25
25
 
26
 
#define USE_SPLIT_PTLOCKS       (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 
26
#define USE_SPLIT_PTE_PTLOCKS   (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 
27
#define USE_SPLIT_PMD_PTLOCKS   (USE_SPLIT_PTE_PTLOCKS && \
 
28
                IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
 
29
#define ALLOC_SPLIT_PTLOCKS     (SPINLOCK_SIZE > BITS_PER_LONG/8)
27
30
 
28
31
/*
29
32
 * Each physical page in the system has a struct page associated with
42
45
        /* First double word block */
43
46
        unsigned long flags;            /* Atomic flags, some possibly
44
47
                                         * updated asynchronously */
45
 
        struct address_space *mapping;  /* If low bit clear, points to
46
 
                                         * inode address_space, or NULL.
47
 
                                         * If page mapped as anonymous
48
 
                                         * memory, low bit is set, and
49
 
                                         * it points to anon_vma object:
50
 
                                         * see PAGE_MAPPING_ANON below.
51
 
                                         */
 
48
        union {
 
49
                struct address_space *mapping;  /* If low bit clear, points to
 
50
                                                 * inode address_space, or NULL.
 
51
                                                 * If page mapped as anonymous
 
52
                                                 * memory, low bit is set, and
 
53
                                                 * it points to anon_vma object:
 
54
                                                 * see PAGE_MAPPING_ANON below.
 
55
                                                 */
 
56
                void *s_mem;                    /* slab first object */
 
57
        };
 
58
 
52
59
        /* Second double word */
53
60
        struct {
54
61
                union {
55
62
                        pgoff_t index;          /* Our offset within mapping. */
56
 
                        void *freelist;         /* slub/slob first free object */
 
63
                        void *freelist;         /* sl[aou]b first free object */
57
64
                        bool pfmemalloc;        /* If set by the page allocator,
58
65
                                                 * ALLOC_NO_WATERMARKS was set
59
66
                                                 * and the low watermark was not
109
116
                                };
110
117
                                atomic_t _count;                /* Usage count, see below. */
111
118
                        };
 
119
                        unsigned int active;    /* SLAB */
112
120
                };
113
121
        };
114
122
 
130
138
 
131
139
                struct list_head list;  /* slobs list of pages */
132
140
                struct slab *slab_page; /* slab fields */
 
141
                struct rcu_head rcu_head;       /* Used by SLAB
 
142
                                                 * when destroying via RCU
 
143
                                                 */
 
144
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
 
145
                pgtable_t pmd_huge_pte; /* protected by page->ptl */
 
146
#endif
133
147
        };
134
148
 
135
149
        /* Remainder is not double word aligned */
141
155
                                                 * indicates order in the buddy
142
156
                                                 * system if PG_buddy is set.
143
157
                                                 */
144
 
#if USE_SPLIT_PTLOCKS
 
158
#if USE_SPLIT_PTE_PTLOCKS
 
159
#if ALLOC_SPLIT_PTLOCKS
 
160
                spinlock_t *ptl;
 
161
#else
145
162
                spinlock_t ptl;
146
163
#endif
 
164
#endif
147
165
                struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
148
166
                struct page *first_page;        /* Compound tail pages */
149
167
        };
174
192
        void *shadow;
175
193
#endif
176
194
 
177
 
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
178
 
        int _last_nid;
 
195
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 
196
        int _last_cpupid;
179
197
#endif
180
198
}
181
199
/*
309
327
        NR_MM_COUNTERS
310
328
};
311
329
 
312
 
#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
 
330
#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
313
331
#define SPLIT_RSS_COUNTING
314
332
/* per-thread cached information, */
315
333
struct task_rss_stat {
316
334
        int events;     /* for synchronization threshold */
317
335
        int count[NR_MM_COUNTERS];
318
336
};
319
 
#endif /* USE_SPLIT_PTLOCKS */
 
337
#endif /* USE_SPLIT_PTE_PTLOCKS */
320
338
 
321
339
struct mm_rss_stat {
322
340
        atomic_long_t count[NR_MM_COUNTERS];
339
357
        pgd_t * pgd;
340
358
        atomic_t mm_users;                      /* How many users with user space? */
341
359
        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
 
360
        atomic_long_t nr_ptes;                  /* Page table pages */
342
361
        int map_count;                          /* number of VMAs */
343
362
 
344
363
        spinlock_t page_table_lock;             /* Protects page tables and some counters */
360
379
        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
361
380
        unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
362
381
        unsigned long def_flags;
363
 
        unsigned long nr_ptes;          /* Page table pages */
364
382
        unsigned long start_code, end_code, start_data, end_data;
365
383
        unsigned long start_brk, brk, start_stack;
366
384
        unsigned long arg_start, arg_end, env_start, env_end;
406
424
#ifdef CONFIG_MMU_NOTIFIER
407
425
        struct mmu_notifier_mm *mmu_notifier_mm;
408
426
#endif
409
 
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
427
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
410
428
        pgtable_t pmd_huge_pte; /* protected by page_table_lock */
411
429
#endif
412
430
#ifdef CONFIG_CPUMASK_OFFSTACK
420
438
         */
421
439
        unsigned long numa_next_scan;
422
440
 
423
 
        /* numa_next_reset is when the PTE scanner period will be reset */
424
 
        unsigned long numa_next_reset;
425
 
 
426
441
        /* Restart point for scanning and setting pte_numa */
427
442
        unsigned long numa_scan_offset;
428
443
 
429
444
        /* numa_scan_seq prevents two threads setting pte_numa */
430
445
        int numa_scan_seq;
431
 
 
 
446
#endif
 
447
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
432
448
        /*
433
 
         * The first node a task was scheduled on. If a task runs on
434
 
         * a different node than Make PTE Scan Go Now.
 
449
         * An operation with batched TLB flushing is going on. Anything that
 
450
         * can move process memory needs to flush the TLB when moving a
 
451
         * PROT_NONE or PROT_NUMA mapped page.
435
452
         */
436
 
        int first_nid;
 
453
        bool tlb_flush_pending;
437
454
#endif
438
455
        struct uprobes_state uprobes_state;
439
456
};
440
457
 
441
 
/* first nid will either be a valid NID or one of these values */
442
 
#define NUMA_PTE_SCAN_INIT      -1
443
 
#define NUMA_PTE_SCAN_ACTIVE    -2
444
 
 
445
458
static inline void mm_init_cpumask(struct mm_struct *mm)
446
459
{
447
460
#ifdef CONFIG_CPUMASK_OFFSTACK
455
468
        return mm->cpu_vm_mask_var;
456
469
}
457
470
 
 
471
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 
472
/*
 
473
 * Memory barriers to keep this state in sync are graciously provided by
 
474
 * the page table locks, outside of which no page table modifications happen.
 
475
 * The barriers below prevent the compiler from re-ordering the instructions
 
476
 * around the memory barriers that are already present in the code.
 
477
 */
 
478
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 
479
{
 
480
        barrier();
 
481
        return mm->tlb_flush_pending;
 
482
}
 
483
static inline void set_tlb_flush_pending(struct mm_struct *mm)
 
484
{
 
485
        mm->tlb_flush_pending = true;
 
486
 
 
487
        /*
 
488
         * Guarantee that the tlb_flush_pending store does not leak into the
 
489
         * critical section updating the page tables
 
490
         */
 
491
        smp_mb__before_spinlock();
 
492
}
 
493
/* Clearing is done after a TLB flush, which also provides a barrier. */
 
494
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 
495
{
 
496
        barrier();
 
497
        mm->tlb_flush_pending = false;
 
498
}
 
499
#else
 
500
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 
501
{
 
502
        return false;
 
503
}
 
504
static inline void set_tlb_flush_pending(struct mm_struct *mm)
 
505
{
 
506
}
 
507
static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 
508
{
 
509
}
 
510
#endif
 
511
 
458
512
#endif /* _LINUX_MM_TYPES_H */