1
/******************************************************************************
4
* TLB flushes are timestamped using a global virtual 'clock' which ticks
5
* on any TLB flush on any processor.
7
* Copyright (c) 2003-2006, K A Fraser
10
#include <xen/config.h>
11
#include <xen/sched.h>
12
#include <xen/softirq.h>
13
#include <asm/flushtlb.h>
16
/* Debug builds: Wrap frequently to stress-test the wrap logic. */
18
#define WRAP_MASK (0xFFFFFFFFU)
20
#define WRAP_MASK (0x000003FFU)
23
u32 tlbflush_clock = 1U;
24
DEFINE_PER_CPU(u32, tlbflush_time);
27
* pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value.
29
* This must happen *before* we flush the TLB. If we do it after, we race other
30
* CPUs invalidating PTEs. For example, a page invalidated after the flush
31
* might get the old timestamp, but this CPU can speculatively fetch the
32
* mapping into its TLB after the flush but before inc'ing the clock.
34
static u32 pre_flush(void)
41
/* Clock wrapped: someone else is leading a global TLB shootdown. */
42
if ( unlikely(t1 == 0) )
44
t2 = (t + 1) & WRAP_MASK;
46
while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) );
48
/* Clock wrapped: we will lead a global TLB shootdown. */
49
if ( unlikely(t2 == 0) )
50
raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
57
* post_flush(): Update this CPU's timestamp with specified clock value.
59
* Note that this happens *after* flushing the TLB, as otherwise we can race a
60
* NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU
61
* stamp and so does not force a synchronous TLB flush, but the flush in this
62
* function hasn't yet occurred and so the TLB might be stale). The ordering
63
* would only actually matter if this function were interruptible, and
64
* something that abuses the stale mapping could exist in an interrupt
65
* handler. In fact neither of these is the case, so really we are being ultra
68
static void post_flush(u32 t)
70
this_cpu(tlbflush_time) = t;
73
void write_cr3(unsigned long cr3)
78
/* This non-reentrant function is sometimes called in interrupt context. */
79
local_irq_save(flags);
83
hvm_flush_guest_tlbs();
85
#ifdef USER_MAPPINGS_ARE_GLOBAL
87
unsigned long cr4 = read_cr4();
88
write_cr4(cr4 & ~X86_CR4_PGE);
89
asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
93
asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
98
local_irq_restore(flags);
101
void flush_area_local(const void *va, unsigned int flags)
103
const struct cpuinfo_x86 *c = ¤t_cpu_data;
104
unsigned int order = (flags - 1) & FLUSH_ORDER_MASK;
107
/* This non-reentrant function is sometimes called in interrupt context. */
108
local_irq_save(irqfl);
110
if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
115
* We don't INVLPG multi-page regions because the 2M/4M/1G
116
* region may not have been mapped with a superpage. Also there
117
* are various errata surrounding INVLPG usage on superpages, and
118
* a full flush is in any case not *that* expensive.
120
asm volatile ( "invlpg %0"
121
: : "m" (*(const char *)(va)) : "memory" );
127
hvm_flush_guest_tlbs();
129
#ifndef USER_MAPPINGS_ARE_GLOBAL
130
if ( !(flags & FLUSH_TLB_GLOBAL) || !(read_cr4() & X86_CR4_PGE) )
132
asm volatile ( "mov %0, %%cr3"
133
: : "r" (read_cr3()) : "memory" );
138
unsigned long cr4 = read_cr4();
139
write_cr4(cr4 & ~X86_CR4_PGE);
148
if ( flags & FLUSH_CACHE )
150
unsigned long i, sz = 0;
152
if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) )
153
sz = 1UL << (order + PAGE_SHIFT);
155
if ( c->x86_clflush_size && c->x86_cache_size && sz &&
156
((sz >> 10) < c->x86_cache_size) )
158
va = (const void *)((unsigned long)va & ~(sz - 1));
159
for ( i = 0; i < sz; i += c->x86_clflush_size )
160
asm volatile ( "clflush %0"
161
: : "m" (((const char *)va)[i]) );
169
local_irq_restore(irqfl);