35
35
unsigned page_size_mask;
38
static void __init find_early_table_space(struct map_range *mr, unsigned long end,
39
int use_pse, int use_gbpages)
39
* First calculate space needed for kernel direct mapping page tables to cover
40
* mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
41
* pages. Then find enough contiguous space for those page tables.
43
static void __init find_early_table_space(struct map_range *mr, int nr_range)
41
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
46
unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47
unsigned long start = 0, good_end;
44
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
50
for (i = 0; i < nr_range; i++) {
51
unsigned long range, extra;
53
range = mr[i].end - mr[i].start;
54
puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
56
if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
57
extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
58
pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
60
pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
63
if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
64
extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
68
ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
70
ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
45
74
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
50
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
51
pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
53
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
55
75
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
60
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
63
/* The first 2/4M doesn't use large pages. */
64
if (mr->start < PMD_SIZE)
65
extra += mr->end - mr->start;
68
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
70
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
72
76
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
74
78
#ifdef CONFIG_X86_32
86
90
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
88
92
printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
89
end - 1, pgt_buf_start << PAGE_SHIFT,
93
mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
90
94
(pgt_buf_top << PAGE_SHIFT) - 1);
267
271
* nodes are discovered.
269
273
if (!after_bootmem)
270
find_early_table_space(&mr[0], end, use_pse, use_gbpages);
274
find_early_table_space(mr, nr_range);
272
276
for (i = 0; i < nr_range; i++)
273
277
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,