|
1 #ifndef _ASM_X86_PGTABLE_32_H |
|
2 #define _ASM_X86_PGTABLE_32_H |
|
3 |
|
4 |
|
5 /* |
|
6 * The Linux memory management assumes a three-level page table setup. On |
|
7 * the i386, we use that, but "fold" the mid level into the top-level page |
|
8 * table, so that we physically have the same two-level page table as the |
|
9 * i386 mmu expects. |
|
10 * |
|
11 * This file contains the functions and defines necessary to modify and use |
|
12 * the i386 page table tree. |
|
13 */ |
|
14 #ifndef __ASSEMBLY__ |
|
15 #include <asm/processor.h> |
|
16 #include <asm/fixmap.h> |
|
17 #include <linux/threads.h> |
|
18 #include <asm/paravirt.h> |
|
19 |
|
20 #include <linux/bitops.h> |
|
21 #include <linux/slab.h> |
|
22 #include <linux/list.h> |
|
23 #include <linux/spinlock.h> |
|
24 |
|
25 struct mm_struct; |
|
26 struct vm_area_struct; |
|
27 |
|
28 extern pgd_t swapper_pg_dir[1024]; |
|
29 |
|
30 static inline void pgtable_cache_init(void) { } |
|
31 static inline void check_pgt_cache(void) { } |
|
32 void paging_init(void); |
|
33 |
|
34 extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); |
|
35 |
|
36 /* |
|
37 * The Linux x86 paging architecture is 'compile-time dual-mode', it |
|
38 * implements both the traditional 2-level x86 page tables and the |
|
39 * newer 3-level PAE-mode page tables. |
|
40 */ |
|
41 #ifdef CONFIG_X86_PAE |
|
42 # include <asm/pgtable-3level-defs.h> |
|
43 # define PMD_SIZE (1UL << PMD_SHIFT) |
|
44 # define PMD_MASK (~(PMD_SIZE - 1)) |
|
45 #else |
|
46 # include <asm/pgtable-2level-defs.h> |
|
47 #endif |
|
48 |
|
49 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
|
50 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
|
51 |
|
52 /* Just any arbitrary offset to the start of the vmalloc VM area: the |
|
53 * current 8MB value just means that there will be a 8MB "hole" after the |
|
54 * physical memory until the kernel virtual memory starts. That means that |
|
55 * any out-of-bounds memory accesses will hopefully be caught. |
|
56 * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
|
57 * area for the same reason. ;) |
|
58 */ |
|
59 #define VMALLOC_OFFSET (8 * 1024 * 1024) |
|
60 #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) |
|
61 #ifdef CONFIG_X86_PAE |
|
62 #define LAST_PKMAP 512 |
|
63 #else |
|
64 #define LAST_PKMAP 1024 |
|
65 #endif |
|
66 |
|
67 #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ |
|
68 & PMD_MASK) |
|
69 |
|
70 #ifdef CONFIG_HIGHMEM |
|
71 # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) |
|
72 #else |
|
73 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) |
|
74 #endif |
|
75 |
|
76 #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) |
|
77 |
|
78 /* |
|
79 * Define this if things work differently on an i386 and an i486: |
|
80 * it will (on an i486) warn about kernel memory accesses that are |
|
81 * done without a 'access_ok(VERIFY_WRITE,..)' |
|
82 */ |
|
83 #undef TEST_ACCESS_OK |
|
84 |
|
85 /* The boot page tables (all created as a single array) */ |
|
86 extern unsigned long pg0[]; |
|
87 |
|
88 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
|
89 |
|
90 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
|
91 #define pmd_none(x) (!(unsigned long)pmd_val((x))) |
|
92 #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) |
|
93 #define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
|
94 |
|
95 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
|
96 |
|
97 #ifdef CONFIG_X86_PAE |
|
98 # include <asm/pgtable-3level.h> |
|
99 #else |
|
100 # include <asm/pgtable-2level.h> |
|
101 #endif |
|
102 |
|
103 /* |
|
104 * Macro to mark a page protection value as "uncacheable". |
|
105 * On processors which do not support it, this is a no-op. |
|
106 */ |
|
107 #define pgprot_noncached(prot) \ |
|
108 ((boot_cpu_data.x86 > 3) \ |
|
109 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ |
|
110 : (prot)) |
|
111 |
|
112 /* |
|
113 * Conversion functions: convert a page and protection to a page entry, |
|
114 * and a page entry and page directory to the page they refer to. |
|
115 */ |
|
116 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
|
117 |
|
118 |
|
119 static inline int pud_large(pud_t pud) { return 0; } |
|
120 |
|
121 /* |
|
122 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
|
123 * |
|
124 * this macro returns the index of the entry in the pmd page which would |
|
125 * control the given virtual address |
|
126 */ |
|
127 #define pmd_index(address) \ |
|
128 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
|
129 |
|
130 /* |
|
131 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
|
132 * |
|
133 * this macro returns the index of the entry in the pte page which would |
|
134 * control the given virtual address |
|
135 */ |
|
136 #define pte_index(address) \ |
|
137 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
|
138 #define pte_offset_kernel(dir, address) \ |
|
139 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) |
|
140 |
|
141 #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) |
|
142 |
|
143 #define pmd_page_vaddr(pmd) \ |
|
144 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK)) |
|
145 |
|
146 #if defined(CONFIG_HIGHPTE) |
|
147 #define pte_offset_map(dir, address) \ |
|
148 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ |
|
149 pte_index((address))) |
|
150 #define pte_offset_map_nested(dir, address) \ |
|
151 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ |
|
152 pte_index((address))) |
|
153 #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) |
|
154 #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) |
|
155 #else |
|
156 #define pte_offset_map(dir, address) \ |
|
157 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) |
|
158 #define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) |
|
159 #define pte_unmap(pte) do { } while (0) |
|
160 #define pte_unmap_nested(pte) do { } while (0) |
|
161 #endif |
|
162 |
|
163 /* Clear a kernel PTE and flush it from the TLB */ |
|
164 #define kpte_clear_flush(ptep, vaddr) \ |
|
165 do { \ |
|
166 pte_clear(&init_mm, (vaddr), (ptep)); \ |
|
167 __flush_tlb_one((vaddr)); \ |
|
168 } while (0) |
|
169 |
|
170 /* |
|
171 * The i386 doesn't have any external MMU info: the kernel page |
|
172 * tables contain all the necessary information. |
|
173 */ |
|
174 #define update_mmu_cache(vma, address, pte) do { } while (0) |
|
175 |
|
176 #endif /* !__ASSEMBLY__ */ |
|
177 |
|
178 /* |
|
179 * kern_addr_valid() is (1) for FLATMEM and (0) for |
|
180 * SPARSEMEM and DISCONTIGMEM |
|
181 */ |
|
182 #ifdef CONFIG_FLATMEM |
|
183 #define kern_addr_valid(addr) (1) |
|
184 #else |
|
185 #define kern_addr_valid(kaddr) (0) |
|
186 #endif |
|
187 |
|
188 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
|
189 remap_pfn_range(vma, vaddr, pfn, size, prot) |
|
190 |
|
191 #endif /* _ASM_X86_PGTABLE_32_H */ |