|
1 #ifndef _LINUX_HUGETLB_H |
|
2 #define _LINUX_HUGETLB_H |
|
3 |
|
4 #include <linux/fs.h> |
|
5 |
|
6 #ifdef CONFIG_HUGETLB_PAGE |
|
7 |
|
8 #include <linux/mempolicy.h> |
|
9 #include <linux/shm.h> |
|
10 #include <asm/tlbflush.h> |
|
11 |
|
12 struct ctl_table; |
|
13 |
|
14 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
|
15 { |
|
16 return vma->vm_flags & VM_HUGETLB; |
|
17 } |
|
18 |
|
19 void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
|
20 int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); |
|
21 int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); |
|
22 int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); |
|
23 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
|
24 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int); |
|
25 void unmap_hugepage_range(struct vm_area_struct *, |
|
26 unsigned long, unsigned long, struct page *); |
|
27 void __unmap_hugepage_range(struct vm_area_struct *, |
|
28 unsigned long, unsigned long, struct page *); |
|
29 int hugetlb_prefault(struct address_space *, struct vm_area_struct *); |
|
30 void hugetlb_report_meminfo(struct seq_file *); |
|
31 int hugetlb_report_node_meminfo(int, char *); |
|
32 unsigned long hugetlb_total_pages(void); |
|
33 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
|
34 unsigned long address, int write_access); |
|
35 int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
|
36 struct vm_area_struct *vma); |
|
37 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
|
38 |
|
39 extern unsigned long hugepages_treat_as_movable; |
|
40 extern const unsigned long hugetlb_zero, hugetlb_infinity; |
|
41 extern int sysctl_hugetlb_shm_group; |
|
42 extern struct list_head huge_boot_pages; |
|
43 |
|
44 /* arch callbacks */ |
|
45 |
|
46 pte_t *huge_pte_alloc(struct mm_struct *mm, |
|
47 unsigned long addr, unsigned long sz); |
|
48 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); |
|
49 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); |
|
50 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
|
51 int write); |
|
52 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
|
53 pmd_t *pmd, int write); |
|
54 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
|
55 pud_t *pud, int write); |
|
56 int pmd_huge(pmd_t pmd); |
|
57 int pud_huge(pud_t pmd); |
|
58 void hugetlb_change_protection(struct vm_area_struct *vma, |
|
59 unsigned long address, unsigned long end, pgprot_t newprot); |
|
60 |
|
61 #else /* !CONFIG_HUGETLB_PAGE */ |
|
62 |
|
63 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
|
64 { |
|
65 return 0; |
|
66 } |
|
67 |
|
68 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
|
69 { |
|
70 } |
|
71 |
|
72 static inline unsigned long hugetlb_total_pages(void) |
|
73 { |
|
74 return 0; |
|
75 } |
|
76 |
|
77 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) |
|
78 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) |
|
79 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) |
|
80 #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) |
|
81 #define unmap_hugepage_range(vma, start, end, page) BUG() |
|
82 static inline void hugetlb_report_meminfo(struct seq_file *m) |
|
83 { |
|
84 } |
|
85 #define hugetlb_report_node_meminfo(n, buf) 0 |
|
86 #define follow_huge_pmd(mm, addr, pmd, write) NULL |
|
87 #define follow_huge_pud(mm, addr, pud, write) NULL |
|
88 #define prepare_hugepage_range(file, addr, len) (-EINVAL) |
|
89 #define pmd_huge(x) 0 |
|
90 #define pud_huge(x) 0 |
|
91 #define is_hugepage_only_range(mm, addr, len) 0 |
|
92 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
|
93 #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) |
|
94 |
|
95 #define hugetlb_change_protection(vma, address, end, newprot) |
|
96 |
|
97 #ifndef HPAGE_MASK |
|
98 #define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */ |
|
99 #define HPAGE_SIZE PAGE_SIZE |
|
100 #endif |
|
101 |
|
102 #endif /* !CONFIG_HUGETLB_PAGE */ |
|
103 |
|
104 #ifdef CONFIG_HUGETLBFS |
|
105 struct hugetlbfs_config { |
|
106 uid_t uid; |
|
107 gid_t gid; |
|
108 umode_t mode; |
|
109 long nr_blocks; |
|
110 long nr_inodes; |
|
111 struct hstate *hstate; |
|
112 }; |
|
113 |
|
114 struct hugetlbfs_sb_info { |
|
115 long max_blocks; /* blocks allowed */ |
|
116 long free_blocks; /* blocks free */ |
|
117 long max_inodes; /* inodes allowed */ |
|
118 long free_inodes; /* inodes free */ |
|
119 spinlock_t stat_lock; |
|
120 struct hstate *hstate; |
|
121 }; |
|
122 |
|
123 |
|
124 struct hugetlbfs_inode_info { |
|
125 struct shared_policy policy; |
|
126 struct inode vfs_inode; |
|
127 }; |
|
128 |
|
129 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) |
|
130 { |
|
131 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); |
|
132 } |
|
133 |
|
134 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
|
135 { |
|
136 return sb->s_fs_info; |
|
137 } |
|
138 |
|
139 extern const struct file_operations hugetlbfs_file_operations; |
|
140 extern struct vm_operations_struct hugetlb_vm_ops; |
|
141 struct file *hugetlb_file_setup(const char *name, size_t); |
|
142 int hugetlb_get_quota(struct address_space *mapping, long delta); |
|
143 void hugetlb_put_quota(struct address_space *mapping, long delta); |
|
144 |
|
145 static inline int is_file_hugepages(struct file *file) |
|
146 { |
|
147 if (file->f_op == &hugetlbfs_file_operations) |
|
148 return 1; |
|
149 if (is_file_shm_hugepages(file)) |
|
150 return 1; |
|
151 |
|
152 return 0; |
|
153 } |
|
154 |
|
155 static inline void set_file_hugepages(struct file *file) |
|
156 { |
|
157 file->f_op = &hugetlbfs_file_operations; |
|
158 } |
|
159 #else /* !CONFIG_HUGETLBFS */ |
|
160 |
|
161 #define is_file_hugepages(file) 0 |
|
162 #define set_file_hugepages(file) BUG() |
|
163 #define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS) |
|
164 |
|
165 #endif /* !CONFIG_HUGETLBFS */ |
|
166 |
|
167 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
|
168 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
|
169 unsigned long len, unsigned long pgoff, |
|
170 unsigned long flags); |
|
171 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ |
|
172 |
|
173 #ifdef CONFIG_HUGETLB_PAGE |
|
174 |
|
175 #define HSTATE_NAME_LEN 32 |
|
176 /* Defines one hugetlb page size */ |
|
177 struct hstate { |
|
178 int hugetlb_next_nid; |
|
179 unsigned int order; |
|
180 unsigned long mask; |
|
181 unsigned long max_huge_pages; |
|
182 unsigned long nr_huge_pages; |
|
183 unsigned long free_huge_pages; |
|
184 unsigned long resv_huge_pages; |
|
185 unsigned long surplus_huge_pages; |
|
186 unsigned long nr_overcommit_huge_pages; |
|
187 struct list_head hugepage_freelists[MAX_NUMNODES]; |
|
188 unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
|
189 unsigned int free_huge_pages_node[MAX_NUMNODES]; |
|
190 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; |
|
191 char name[HSTATE_NAME_LEN]; |
|
192 }; |
|
193 |
|
194 struct huge_bootmem_page { |
|
195 struct list_head list; |
|
196 struct hstate *hstate; |
|
197 }; |
|
198 |
|
199 /* arch callback */ |
|
200 int __init alloc_bootmem_huge_page(struct hstate *h); |
|
201 |
|
202 void __init hugetlb_add_hstate(unsigned order); |
|
203 struct hstate *size_to_hstate(unsigned long size); |
|
204 |
|
205 #ifndef HUGE_MAX_HSTATE |
|
206 #define HUGE_MAX_HSTATE 1 |
|
207 #endif |
|
208 |
|
209 extern struct hstate hstates[HUGE_MAX_HSTATE]; |
|
210 extern unsigned int default_hstate_idx; |
|
211 |
|
212 #define default_hstate (hstates[default_hstate_idx]) |
|
213 |
|
214 static inline struct hstate *hstate_inode(struct inode *i) |
|
215 { |
|
216 struct hugetlbfs_sb_info *hsb; |
|
217 hsb = HUGETLBFS_SB(i->i_sb); |
|
218 return hsb->hstate; |
|
219 } |
|
220 |
|
221 static inline struct hstate *hstate_file(struct file *f) |
|
222 { |
|
223 return hstate_inode(f->f_dentry->d_inode); |
|
224 } |
|
225 |
|
226 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
|
227 { |
|
228 return hstate_file(vma->vm_file); |
|
229 } |
|
230 |
|
231 static inline unsigned long huge_page_size(struct hstate *h) |
|
232 { |
|
233 return (unsigned long)PAGE_SIZE << h->order; |
|
234 } |
|
235 |
|
236 static inline unsigned long huge_page_mask(struct hstate *h) |
|
237 { |
|
238 return h->mask; |
|
239 } |
|
240 |
|
241 static inline unsigned int huge_page_order(struct hstate *h) |
|
242 { |
|
243 return h->order; |
|
244 } |
|
245 |
|
246 static inline unsigned huge_page_shift(struct hstate *h) |
|
247 { |
|
248 return h->order + PAGE_SHIFT; |
|
249 } |
|
250 |
|
251 static inline unsigned int pages_per_huge_page(struct hstate *h) |
|
252 { |
|
253 return 1 << h->order; |
|
254 } |
|
255 |
|
256 static inline unsigned int blocks_per_huge_page(struct hstate *h) |
|
257 { |
|
258 return huge_page_size(h) / 512; |
|
259 } |
|
260 |
|
261 #include <asm/hugetlb.h> |
|
262 |
|
263 static inline struct hstate *page_hstate(struct page *page) |
|
264 { |
|
265 return size_to_hstate(PAGE_SIZE << compound_order(page)); |
|
266 } |
|
267 |
|
268 #else |
|
269 struct hstate {}; |
|
270 #define alloc_bootmem_huge_page(h) NULL |
|
271 #define hstate_file(f) NULL |
|
272 #define hstate_vma(v) NULL |
|
273 #define hstate_inode(i) NULL |
|
274 #define huge_page_size(h) PAGE_SIZE |
|
275 #define huge_page_mask(h) PAGE_MASK |
|
276 #define huge_page_order(h) 0 |
|
277 #define huge_page_shift(h) PAGE_SHIFT |
|
278 static inline unsigned int pages_per_huge_page(struct hstate *h) |
|
279 { |
|
280 return 1; |
|
281 } |
|
282 #endif |
|
283 |
|
284 #endif /* _LINUX_HUGETLB_H */ |