179
179
panic("Unsafe to continue.");
182
void flush_remote_page(struct page *page, int order)
184
int i, pages = (1 << order);
185
for (i = 0; i < pages; ++i, ++page) {
186
void *p = kmap_atomic(page);
188
int home = page_home(page);
189
#if CHIP_HAS_CBOX_HOME_MAP()
190
if (home == PAGE_HOME_HASH)
194
BUG_ON(home < 0 || home >= NR_CPUS);
195
finv_buffer_remote(p, PAGE_SIZE, hfh);
182
200
void homecache_evict(const struct cpumask *mask)
184
202
flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
187
/* Return a mask of the cpus whose caches currently own these pages. */
188
static void homecache_mask(struct page *page, int pages,
189
struct cpumask *home_mask)
206
* Return a mask of the cpus whose caches currently own these pages.
207
* The return value is whether the pages are all coherently cached
208
* (i.e. none are immutable, incoherent, or uncached).
210
static int homecache_mask(struct page *page, int pages,
211
struct cpumask *home_mask)
214
int cached_coherently = 1;
192
215
cpumask_clear(home_mask);
193
216
for (i = 0; i < pages; ++i) {
194
217
int home = page_home(&page[i]);
195
218
if (home == PAGE_HOME_IMMUTABLE ||
196
219
home == PAGE_HOME_INCOHERENT) {
197
220
cpumask_copy(home_mask, cpu_possible_mask);
200
223
#if CHIP_HAS_CBOX_HOME_MAP()
201
224
if (home == PAGE_HOME_HASH) {
206
if (home == PAGE_HOME_UNCACHED)
229
if (home == PAGE_HOME_UNCACHED) {
230
cached_coherently = 0;
208
233
BUG_ON(home < 0 || home >= NR_CPUS);
209
234
cpumask_set_cpu(home, home_mask);
236
return cached_coherently;
386
412
pte_t *ptep = virt_to_pte(NULL, kva);
387
413
pte_t pteval = *ptep;
388
414
BUG_ON(!pte_present(pteval) || pte_huge(pteval));
389
*ptep = pte_set_home(pteval, home);
415
__set_pte(ptep, pte_set_home(pteval, home));