|
1 #ifndef _ASM_X86_XEN_PAGE_H |
|
2 #define _ASM_X86_XEN_PAGE_H |
|
3 |
|
4 #include <linux/pfn.h> |
|
5 |
|
6 #include <asm/uaccess.h> |
|
7 #include <asm/pgtable.h> |
|
8 |
|
9 #include <xen/features.h> |
|
10 |
|
11 /* Xen machine address */ |
|
12 typedef struct xmaddr { |
|
13 phys_addr_t maddr; |
|
14 } xmaddr_t; |
|
15 |
|
16 /* Xen pseudo-physical address */ |
|
17 typedef struct xpaddr { |
|
18 phys_addr_t paddr; |
|
19 } xpaddr_t; |
|
20 |
|
21 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) |
|
22 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) |
|
23 |
|
24 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ |
|
25 #define INVALID_P2M_ENTRY (~0UL) |
|
26 #define FOREIGN_FRAME_BIT (1UL<<31) |
|
27 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) |
|
28 |
|
29 /* Maximum amount of memory we can handle in a domain in pages */ |
|
30 #define MAX_DOMAIN_PAGES \ |
|
31 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) |
|
32 |
|
33 |
|
34 extern unsigned long get_phys_to_machine(unsigned long pfn); |
|
35 extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
|
36 |
|
37 static inline unsigned long pfn_to_mfn(unsigned long pfn) |
|
38 { |
|
39 if (xen_feature(XENFEAT_auto_translated_physmap)) |
|
40 return pfn; |
|
41 |
|
42 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT; |
|
43 } |
|
44 |
|
45 static inline int phys_to_machine_mapping_valid(unsigned long pfn) |
|
46 { |
|
47 if (xen_feature(XENFEAT_auto_translated_physmap)) |
|
48 return 1; |
|
49 |
|
50 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; |
|
51 } |
|
52 |
|
53 static inline unsigned long mfn_to_pfn(unsigned long mfn) |
|
54 { |
|
55 unsigned long pfn; |
|
56 |
|
57 if (xen_feature(XENFEAT_auto_translated_physmap)) |
|
58 return mfn; |
|
59 |
|
60 #if 0 |
|
61 if (unlikely((mfn >> machine_to_phys_order) != 0)) |
|
62 return max_mapnr; |
|
63 #endif |
|
64 |
|
65 pfn = 0; |
|
66 /* |
|
67 * The array access can fail (e.g., device space beyond end of RAM). |
|
68 * In such cases it doesn't matter what we return (we return garbage), |
|
69 * but we must handle the fault without crashing! |
|
70 */ |
|
71 __get_user(pfn, &machine_to_phys_mapping[mfn]); |
|
72 |
|
73 return pfn; |
|
74 } |
|
75 |
|
76 static inline xmaddr_t phys_to_machine(xpaddr_t phys) |
|
77 { |
|
78 unsigned offset = phys.paddr & ~PAGE_MASK; |
|
79 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); |
|
80 } |
|
81 |
|
82 static inline xpaddr_t machine_to_phys(xmaddr_t machine) |
|
83 { |
|
84 unsigned offset = machine.maddr & ~PAGE_MASK; |
|
85 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); |
|
86 } |
|
87 |
|
88 /* |
|
89 * We detect special mappings in one of two ways: |
|
90 * 1. If the MFN is an I/O page then Xen will set the m2p entry |
|
91 * to be outside our maximum possible pseudophys range. |
|
92 * 2. If the MFN belongs to a different domain then we will certainly |
|
93 * not have MFN in our p2m table. Conversely, if the page is ours, |
|
94 * then we'll have p2m(m2p(MFN))==MFN. |
|
95 * If we detect a special mapping then it doesn't have a 'struct page'. |
|
96 * We force !pfn_valid() by returning an out-of-range pointer. |
|
97 * |
|
98 * NB. These checks require that, for any MFN that is not in our reservation, |
|
99 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if |
|
100 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. |
|
101 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. |
|
102 * |
|
103 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* |
|
104 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we |
|
105 * require. In all the cases we care about, the FOREIGN_FRAME bit is |
|
106 * masked (e.g., pfn_to_mfn()) so behaviour there is correct. |
|
107 */ |
|
108 static inline unsigned long mfn_to_local_pfn(unsigned long mfn) |
|
109 { |
|
110 extern unsigned long max_mapnr; |
|
111 unsigned long pfn = mfn_to_pfn(mfn); |
|
112 if ((pfn < max_mapnr) |
|
113 && !xen_feature(XENFEAT_auto_translated_physmap) |
|
114 && (get_phys_to_machine(pfn) != mfn)) |
|
115 return max_mapnr; /* force !pfn_valid() */ |
|
116 /* XXX fixme; not true with sparsemem */ |
|
117 return pfn; |
|
118 } |
|
119 |
|
120 /* VIRT <-> MACHINE conversion */ |
|
121 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) |
|
122 #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) |
|
123 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
|
124 |
|
125 static inline unsigned long pte_mfn(pte_t pte) |
|
126 { |
|
127 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; |
|
128 } |
|
129 |
|
130 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) |
|
131 { |
|
132 pte_t pte; |
|
133 |
|
134 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | |
|
135 (pgprot_val(pgprot) & __supported_pte_mask); |
|
136 |
|
137 return pte; |
|
138 } |
|
139 |
|
140 static inline pteval_t pte_val_ma(pte_t pte) |
|
141 { |
|
142 return pte.pte; |
|
143 } |
|
144 |
|
145 static inline pte_t __pte_ma(pteval_t x) |
|
146 { |
|
147 return (pte_t) { .pte = x }; |
|
148 } |
|
149 |
|
150 #define pmd_val_ma(v) ((v).pmd) |
|
151 #ifdef __PAGETABLE_PUD_FOLDED |
|
152 #define pud_val_ma(v) ((v).pgd.pgd) |
|
153 #else |
|
154 #define pud_val_ma(v) ((v).pud) |
|
155 #endif |
|
156 #define __pmd_ma(x) ((pmd_t) { (x) } ) |
|
157 |
|
158 #define pgd_val_ma(x) ((x).pgd) |
|
159 |
|
160 |
|
161 xmaddr_t arbitrary_virt_to_machine(void *address); |
|
162 void make_lowmem_page_readonly(void *vaddr); |
|
163 void make_lowmem_page_readwrite(void *vaddr); |
|
164 |
|
165 #endif /* _ASM_X86_XEN_PAGE_H */ |