p***@xen.org
2018-11-02 11:22:14 UTC
commit 6cb27e417e57c2f4d689fa19971f20f75e9c0708
Author: Alexandru Isaila <***@bitdefender.com>
AuthorDate: Fri Nov 2 12:16:32 2018 +0100
Commit: Jan Beulich <***@suse.com>
CommitDate: Fri Nov 2 12:16:32 2018 +0100
x86/hvm: clean up may_defer from hvm_* helpers
The may_defer var was left with the older bool_t type. This patch
changes the type to bool.
Signed-off-by: Alexandru Isaila <***@bitdefender.com>
Acked-by: Razvan Cojocaru <***@bitdefender.com>
Reviewed-by: Wei Liu <***@citrix.com>
Acked-by: Brian Woods <***@amd.com>
Reviewed-by: Kevin Tian <***@intel.com>
Acked-by: Paul Durrant <***@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 8 ++++----
xen/arch/x86/hvm/hvm.c | 14 +++++++-------
xen/arch/x86/hvm/svm/nestedsvm.c | 14 +++++++-------
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vm_event.c | 8 ++++----
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/hvm/vmx/vvmx.c | 16 ++++++++--------
xen/include/asm-x86/hvm/support.h | 6 +++---
8 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index cd1d9a7c57..9e7deaa6cd 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2024,7 +2024,7 @@ static int hvmemul_write_cr(
switch ( reg )
{
case 0:
- rc = hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, true);
break;
case 2:
@@ -2033,11 +2033,11 @@ static int hvmemul_write_cr(
break;
case 3:
- rc = hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, true);
break;
case 4:
- rc = hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, true);
break;
default:
@@ -2092,7 +2092,7 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- int rc = hvm_msr_write_intercept(reg, val, 1);
+ int rc = hvm_msr_write_intercept(reg, val, true);
if ( rc == X86EMUL_EXCEPTION )
x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0e9d316b40..7be9cf4454 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2038,15 +2038,15 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
switch ( cr )
{
case 0:
- rc = hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, true);
break;
case 3:
- rc = hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, true);
break;
case 4:
- rc = hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, true);
break;
case 8:
@@ -2142,7 +2142,7 @@ static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
hvm_update_guest_cr(v, cr);
}
-int hvm_set_cr0(unsigned long value, bool_t may_defer)
+int hvm_set_cr0(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
@@ -2260,7 +2260,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
return X86EMUL_OKAY;
}
-int hvm_set_cr3(unsigned long value, bool_t may_defer)
+int hvm_set_cr3(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct page_info *page;
@@ -2314,7 +2314,7 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
return X86EMUL_UNHANDLEABLE;
}
-int hvm_set_cr4(unsigned long value, bool_t may_defer)
+int hvm_set_cr4(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
unsigned long old_cr;
@@ -2981,7 +2981,7 @@ void hvm_task_switch(
if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
goto out;
- rc = hvm_set_cr3(tss.cr3, 1);
+ rc = hvm_set_cr3(tss.cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if ( rc != X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 78a1016e94..088b3fd562 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -285,7 +285,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
/* CR4 */
v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
- rc = hvm_set_cr4(n1vmcb->_cr4, 1);
+ rc = hvm_set_cr4(n1vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -296,7 +296,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
svm->ns_cr0, v->arch.hvm.guest_cr[0]);
v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
- rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
+ rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -324,7 +324,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
v->arch.guest_table = pagetable_null();
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
}
- rc = hvm_set_cr3(n1vmcb->_cr3, 1);
+ rc = hvm_set_cr3(n1vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -556,7 +556,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
/* CR4 */
v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
+ rc = hvm_set_cr4(ns_vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -566,7 +566,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
svm->ns_cr0 = v->arch.hvm.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
- rc = hvm_set_cr0(cr0, 1);
+ rc = hvm_set_cr0(cr0, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -584,7 +584,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -598,7 +598,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index be48ca72c5..5d00256aaa 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2333,7 +2333,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
msr_split(regs, msr_content);
}
else
- rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true);
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 28d08a6630..0df8ab40e6 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -94,7 +94,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr0) )
{
- if ( hvm_set_cr0(w->cr0, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr0(w->cr0, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr0 = 0;
@@ -102,7 +102,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr4) )
{
- if ( hvm_set_cr4(w->cr4, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr4(w->cr4, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr4 = 0;
@@ -110,7 +110,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr3) )
{
- if ( hvm_set_cr3(w->cr3, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr3(w->cr3, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr3 = 0;
@@ -118,7 +118,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.msr) )
{
- if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+ if ( hvm_msr_write_intercept(w->msr, w->value, false) ==
X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c9406d02c1..e065f8bbdb 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2664,7 +2664,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- if ( (rc = hvm_set_cr0(value, 1)) == X86EMUL_EXCEPTION )
+ if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
return rc;
@@ -4000,7 +4000,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
}
case EXIT_REASON_MSR_WRITE:
- switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1) )
+ switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true) )
{
case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 8e2e8c266a..dfd08e2d0a 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1017,15 +1017,15 @@ static void load_shadow_guest_state(struct vcpu *v)
nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW);
- rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1);
+ rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1);
+ rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -1035,7 +1035,7 @@ static void load_shadow_guest_state(struct vcpu *v)
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
{
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), false);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
@@ -1223,15 +1223,15 @@ static void load_vvmcs_host_state(struct vcpu *v)
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1);
+ rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1);
+ rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -1241,7 +1241,7 @@ static void load_vvmcs_host_state(struct vcpu *v)
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
{
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 7222939a6a..e989aa7349 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -134,9 +134,9 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
* returned.
*/
int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value, bool_t may_defer);
-int hvm_set_cr3(unsigned long value, bool_t may_defer);
-int hvm_set_cr4(unsigned long value, bool_t may_defer);
+int hvm_set_cr0(unsigned long value, bool may_defer);
+int hvm_set_cr3(unsigned long value, bool may_defer);
+int hvm_set_cr4(unsigned long value, bool may_defer);
int hvm_descriptor_access_intercept(uint64_t exit_info,
uint64_t vmx_exit_qualification,
unsigned int descriptor, bool is_write);
--
generated by git-patchbot for /home/xen/git/xen.git#staging
Author: Alexandru Isaila <***@bitdefender.com>
AuthorDate: Fri Nov 2 12:16:32 2018 +0100
Commit: Jan Beulich <***@suse.com>
CommitDate: Fri Nov 2 12:16:32 2018 +0100
x86/hvm: clean up may_defer from hvm_* helpers
The may_defer var was left with the older bool_t type. This patch
changes the type to bool.
Signed-off-by: Alexandru Isaila <***@bitdefender.com>
Acked-by: Razvan Cojocaru <***@bitdefender.com>
Reviewed-by: Wei Liu <***@citrix.com>
Acked-by: Brian Woods <***@amd.com>
Reviewed-by: Kevin Tian <***@intel.com>
Acked-by: Paul Durrant <***@citrix.com>
---
xen/arch/x86/hvm/emulate.c | 8 ++++----
xen/arch/x86/hvm/hvm.c | 14 +++++++-------
xen/arch/x86/hvm/svm/nestedsvm.c | 14 +++++++-------
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/vm_event.c | 8 ++++----
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/hvm/vmx/vvmx.c | 16 ++++++++--------
xen/include/asm-x86/hvm/support.h | 6 +++---
8 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index cd1d9a7c57..9e7deaa6cd 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2024,7 +2024,7 @@ static int hvmemul_write_cr(
switch ( reg )
{
case 0:
- rc = hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, true);
break;
case 2:
@@ -2033,11 +2033,11 @@ static int hvmemul_write_cr(
break;
case 3:
- rc = hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, true);
break;
case 4:
- rc = hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, true);
break;
default:
@@ -2092,7 +2092,7 @@ static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- int rc = hvm_msr_write_intercept(reg, val, 1);
+ int rc = hvm_msr_write_intercept(reg, val, true);
if ( rc == X86EMUL_EXCEPTION )
x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0e9d316b40..7be9cf4454 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2038,15 +2038,15 @@ int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
switch ( cr )
{
case 0:
- rc = hvm_set_cr0(val, 1);
+ rc = hvm_set_cr0(val, true);
break;
case 3:
- rc = hvm_set_cr3(val, 1);
+ rc = hvm_set_cr3(val, true);
break;
case 4:
- rc = hvm_set_cr4(val, 1);
+ rc = hvm_set_cr4(val, true);
break;
case 8:
@@ -2142,7 +2142,7 @@ static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
hvm_update_guest_cr(v, cr);
}
-int hvm_set_cr0(unsigned long value, bool_t may_defer)
+int hvm_set_cr0(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
@@ -2260,7 +2260,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
return X86EMUL_OKAY;
}
-int hvm_set_cr3(unsigned long value, bool_t may_defer)
+int hvm_set_cr3(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct page_info *page;
@@ -2314,7 +2314,7 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
return X86EMUL_UNHANDLEABLE;
}
-int hvm_set_cr4(unsigned long value, bool_t may_defer)
+int hvm_set_cr4(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
unsigned long old_cr;
@@ -2981,7 +2981,7 @@ void hvm_task_switch(
if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
goto out;
- rc = hvm_set_cr3(tss.cr3, 1);
+ rc = hvm_set_cr3(tss.cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if ( rc != X86EMUL_OKAY )
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 78a1016e94..088b3fd562 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -285,7 +285,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
/* CR4 */
v->arch.hvm.guest_cr[4] = n1vmcb->_cr4;
- rc = hvm_set_cr4(n1vmcb->_cr4, 1);
+ rc = hvm_set_cr4(n1vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -296,7 +296,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
svm->ns_cr0, v->arch.hvm.guest_cr[0]);
v->arch.hvm.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
n1vmcb->rflags &= ~X86_EFLAGS_VM;
- rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, 1);
+ rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -324,7 +324,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
v->arch.guest_table = pagetable_null();
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
}
- rc = hvm_set_cr3(n1vmcb->_cr3, 1);
+ rc = hvm_set_cr3(n1vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -556,7 +556,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
/* CR4 */
v->arch.hvm.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4, 1);
+ rc = hvm_set_cr4(ns_vmcb->_cr4, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -566,7 +566,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
svm->ns_cr0 = v->arch.hvm.guest_cr[0];
cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
v->arch.hvm.guest_cr[0] = ns_vmcb->_cr0;
- rc = hvm_set_cr0(cr0, 1);
+ rc = hvm_set_cr0(cr0, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -584,7 +584,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
@@ -598,7 +598,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3, 1);
+ rc = hvm_set_cr3(ns_vmcb->_cr3, true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
if (rc != X86EMUL_OKAY)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index be48ca72c5..5d00256aaa 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2333,7 +2333,7 @@ static void svm_do_msr_access(struct cpu_user_regs *regs)
msr_split(regs, msr_content);
}
else
- rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1);
+ rc = hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true);
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 28d08a6630..0df8ab40e6 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -94,7 +94,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr0) )
{
- if ( hvm_set_cr0(w->cr0, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr0(w->cr0, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr0 = 0;
@@ -102,7 +102,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr4) )
{
- if ( hvm_set_cr4(w->cr4, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr4(w->cr4, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr4 = 0;
@@ -110,7 +110,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.cr3) )
{
- if ( hvm_set_cr3(w->cr3, 0) == X86EMUL_EXCEPTION )
+ if ( hvm_set_cr3(w->cr3, false) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
w->do_write.cr3 = 0;
@@ -118,7 +118,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( unlikely(w->do_write.msr) )
{
- if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+ if ( hvm_msr_write_intercept(w->msr, w->value, false) ==
X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c9406d02c1..e065f8bbdb 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2664,7 +2664,7 @@ static int vmx_cr_access(cr_access_qual_t qual)
(X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS));
HVMTRACE_LONG_1D(LMSW, value);
- if ( (rc = hvm_set_cr0(value, 1)) == X86EMUL_EXCEPTION )
+ if ( (rc = hvm_set_cr0(value, true)) == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
return rc;
@@ -4000,7 +4000,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
}
case EXIT_REASON_MSR_WRITE:
- switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), 1) )
+ switch ( hvm_msr_write_intercept(regs->ecx, msr_fold(regs), true) )
{
case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 8e2e8c266a..dfd08e2d0a 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1017,15 +1017,15 @@ static void load_shadow_guest_state(struct vcpu *v)
nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW);
nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW);
- rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, GUEST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1);
+ rc = hvm_set_cr4(get_vvmcs(v, GUEST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1);
+ rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -1035,7 +1035,7 @@ static void load_shadow_guest_state(struct vcpu *v)
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
{
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), false);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
@@ -1223,15 +1223,15 @@ static void load_vvmcs_host_state(struct vcpu *v)
__vmwrite(vmcs_h2g_field[i].guest_field, r);
}
- rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1);
+ rc = hvm_set_cr0(get_vvmcs(v, HOST_CR0), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1);
+ rc = hvm_set_cr4(get_vvmcs(v, HOST_CR4), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
- rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1);
+ rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
@@ -1241,7 +1241,7 @@ static void load_vvmcs_host_state(struct vcpu *v)
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
{
rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), true);
if ( rc == X86EMUL_EXCEPTION )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 7222939a6a..e989aa7349 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -134,9 +134,9 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
* returned.
*/
int hvm_set_efer(uint64_t value);
-int hvm_set_cr0(unsigned long value, bool_t may_defer);
-int hvm_set_cr3(unsigned long value, bool_t may_defer);
-int hvm_set_cr4(unsigned long value, bool_t may_defer);
+int hvm_set_cr0(unsigned long value, bool may_defer);
+int hvm_set_cr3(unsigned long value, bool may_defer);
+int hvm_set_cr4(unsigned long value, bool may_defer);
int hvm_descriptor_access_intercept(uint64_t exit_info,
uint64_t vmx_exit_qualification,
unsigned int descriptor, bool is_write);
--
generated by git-patchbot for /home/xen/git/xen.git#staging