2
|
1 |
/* CPU virtualization extensions handling
|
|
2 |
*
|
|
3 |
* This should carry the code for handling CPU virtualization extensions
|
|
4 |
* that needs to live in the kernel core.
|
|
5 |
*
|
|
6 |
* Author: Eduardo Habkost <ehabkost@redhat.com>
|
|
7 |
*
|
|
8 |
* Copyright (C) 2008, Red Hat Inc.
|
|
9 |
*
|
|
10 |
* Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
|
|
11 |
*
|
|
12 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
13 |
* the COPYING file in the top-level directory.
|
|
14 |
*/
|
|
15 |
#ifndef _ASM_X86_VIRTEX_H
|
|
16 |
#define _ASM_X86_VIRTEX_H
|
|
17 |
|
|
18 |
#include <asm/processor.h>
|
|
19 |
#include <asm/system.h>
|
|
20 |
|
|
21 |
#include <asm/vmx.h>
|
|
22 |
#include <asm/svm.h>
|
|
23 |
|
|
24 |
/*
|
|
25 |
* VMX functions:
|
|
26 |
*/
|
|
27 |
|
|
28 |
static inline int cpu_has_vmx(void)
|
|
29 |
{
|
|
30 |
unsigned long ecx = cpuid_ecx(1);
|
|
31 |
return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
|
|
32 |
}
|
|
33 |
|
|
34 |
|
|
35 |
/** Disable VMX on the current CPU
|
|
36 |
*
|
|
37 |
* vmxoff causes a undefined-opcode exception if vmxon was not run
|
|
38 |
* on the CPU previously. Only call this function if you know VMX
|
|
39 |
* is enabled.
|
|
40 |
*/
|
|
41 |
static inline void cpu_vmxoff(void)
|
|
42 |
{
|
|
43 |
asm volatile (ASM_VMX_VMXOFF : : : "cc");
|
|
44 |
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
|
45 |
}
|
|
46 |
|
|
47 |
static inline int cpu_vmx_enabled(void)
|
|
48 |
{
|
|
49 |
return read_cr4() & X86_CR4_VMXE;
|
|
50 |
}
|
|
51 |
|
|
52 |
/** Disable VMX if it is enabled on the current CPU
|
|
53 |
*
|
|
54 |
* You shouldn't call this if cpu_has_vmx() returns 0.
|
|
55 |
*/
|
|
56 |
static inline void __cpu_emergency_vmxoff(void)
|
|
57 |
{
|
|
58 |
if (cpu_vmx_enabled())
|
|
59 |
cpu_vmxoff();
|
|
60 |
}
|
|
61 |
|
|
62 |
/** Disable VMX if it is supported and enabled on the current CPU
|
|
63 |
*/
|
|
64 |
static inline void cpu_emergency_vmxoff(void)
|
|
65 |
{
|
|
66 |
if (cpu_has_vmx())
|
|
67 |
__cpu_emergency_vmxoff();
|
|
68 |
}
|
|
69 |
|
|
70 |
|
|
71 |
|
|
72 |
|
|
73 |
/*
|
|
74 |
* SVM functions:
|
|
75 |
*/
|
|
76 |
|
|
77 |
/** Check if the CPU has SVM support
|
|
78 |
*
|
|
79 |
* You can use the 'msg' arg to get a message describing the problem,
|
|
80 |
* if the function returns zero. Simply pass NULL if you are not interested
|
|
81 |
* on the messages; gcc should take care of not generating code for
|
|
82 |
* the messages on this case.
|
|
83 |
*/
|
|
84 |
static inline int cpu_has_svm(const char **msg)
|
|
85 |
{
|
|
86 |
uint32_t eax, ebx, ecx, edx;
|
|
87 |
|
|
88 |
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
|
|
89 |
if (msg)
|
|
90 |
*msg = "not amd";
|
|
91 |
return 0;
|
|
92 |
}
|
|
93 |
|
|
94 |
cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
|
|
95 |
if (eax < SVM_CPUID_FUNC) {
|
|
96 |
if (msg)
|
|
97 |
*msg = "can't execute cpuid_8000000a";
|
|
98 |
return 0;
|
|
99 |
}
|
|
100 |
|
|
101 |
cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
|
|
102 |
if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
|
|
103 |
if (msg)
|
|
104 |
*msg = "svm not available";
|
|
105 |
return 0;
|
|
106 |
}
|
|
107 |
return 1;
|
|
108 |
}
|
|
109 |
|
|
110 |
|
|
111 |
/** Disable SVM on the current CPU
|
|
112 |
*
|
|
113 |
* You should call this only if cpu_has_svm() returned true.
|
|
114 |
*/
|
|
115 |
static inline void cpu_svm_disable(void)
|
|
116 |
{
|
|
117 |
uint64_t efer;
|
|
118 |
|
|
119 |
wrmsrl(MSR_VM_HSAVE_PA, 0);
|
|
120 |
rdmsrl(MSR_EFER, efer);
|
|
121 |
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
|
|
122 |
}
|
|
123 |
|
|
124 |
/** Makes sure SVM is disabled, if it is supported on the CPU
|
|
125 |
*/
|
|
126 |
static inline void cpu_emergency_svm_disable(void)
|
|
127 |
{
|
|
128 |
if (cpu_has_svm(NULL))
|
|
129 |
cpu_svm_disable();
|
|
130 |
}
|
|
131 |
|
|
132 |
#endif /* _ASM_X86_VIRTEX_H */
|