4
#include <asm/kmap_types.h>
6
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
7
#define LAST_PKMAP PTRS_PER_PTE
8
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
9
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
10
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
12
#define kmap_prot PAGE_KERNEL
14
#define flush_cache_kmaps() \
16
if (cache_is_vivt()) \
20
extern pte_t *pkmap_page_table;
22
extern void *kmap_high(struct page *page);
23
extern void kunmap_high(struct page *page);
26
* The reason for kmap_high_get() is to ensure that the currently kmap'd
27
* page usage count does not decrease to zero while we're using its
28
* existing virtual mapping in an atomic context. With a VIVT cache this
29
* is essential to do, but with a VIPT cache this is only an optimization
30
* so not to pay the price of establishing a second mapping if an existing
31
* one can be used. However, on platforms without hardware TLB maintenance
32
* broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
33
* the locking involved must also disable IRQs which is incompatible with
34
* the IPI mechanism used by global TLB operations.
36
#define ARCH_NEEDS_KMAP_HIGH_GET
37
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
38
#undef ARCH_NEEDS_KMAP_HIGH_GET
39
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
40
#error "The sum of features in your kernel config cannot be supported together"
44
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
45
extern void *kmap_high_get(struct page *page);
47
static inline void *kmap_high_get(struct page *page)
54
* The following functions are already defined by <linux/highmem.h>
55
* when CONFIG_HIGHMEM is not set.
58
extern void *kmap(struct page *page);
59
extern void kunmap(struct page *page);
60
extern void *__kmap_atomic(struct page *page);
61
extern void __kunmap_atomic(void *kvaddr);
62
extern void *kmap_atomic_pfn(unsigned long pfn);
63
extern struct page *kmap_atomic_to_page(const void *ptr);