#include "vtstart.h"
#include "asm64.h"
#include "vmxstruct.h"
#include "mem.h"
LONG gvt_uSubvertedCPUs = 0;
static KMUTEX MY_HvmMutex;
BOOLEAN g_vt_SubvertedCPU = FALSE;
_TEPTPPML4T EPT_Ptable;
/********************************************************************
检测当前的处理器是否支持Vt
********************************************************************/
BOOLEAN NTAPI VmxIsImplemented(
)
{
ULONG32 eax, ebx, ecx, edx;
GetCpuIdInfo(0, &eax, &ebx, &ecx, &edx);
if (eax < 1)
{
DbgPrint("vmx:VmxIsImplemented(): Extended CPUID functions not implemented\n");
return FALSE;
}
if (!(ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69))
{
DbgPrint("vmx:VmxIsImplemented(): Not an INTEL processor\n");
return FALSE;
}
//intel cpu use fun_0x1 to test VMX.
GetCpuIdInfo(0x1, &eax, &ebx, &ecx, &edx);
return (BOOLEAN)(CmIsBitSet(ecx, 5));
}
// 开启Intel VT内核调试
NTSTATUS NTAPI StartVirtualTechnology()
{
CCHAR cProcessorNumber;
NTSTATUS Status, CallbackStatus;
if (g_vt_SubvertedCPU)
{
DbgPrint("vmx:已经开启过VT\n");
return STATUS_UNSUCCESSFUL;
}
if (!VmxIsImplemented())
{
return STATUS_UNSUCCESSFUL;
}
DbgPrint("vmx:在每隔核心上开启VT HvmSwallowBluepill(): Going to subvert %d processor%s\n",
KeNumberProcessors, KeNumberProcessors == 1 ? "" : "s");
MmInitManager();
MmInitEptPageTable();
KeInitializeMutex(&MY_HvmMutex, 0);
//
// 获得互斥体MY_HvmMutex对象, 保证一时间只有一个HvmSwallowBluepill函数在执行
//
KeWaitForSingleObject(&MY_HvmMutex, Executive, KernelMode, FALSE, NULL);
//
// 遍历所有处理器
//
for (cProcessorNumber = 0; cProcessorNumber < KeNumberProcessors; cProcessorNumber++)
{
DbgPrint("vmx:HvmSwallowBluepill(): CPU数量 #%d\n", cProcessorNumber);
//
// 向每个处理器投递消息,要求执行CmSubvert
// CmDeliverToProcessor通过KeSetSystemAffinityThread将代码运行在指定CPU上,并提升IRQL为DPC_LEVEL
// CmSubvert的流程是保存所有寄存器(除了段寄存器)的内容到栈里后,调用HvmSubvertCpu
//
Status = CmDeliverToProcessor(cProcessorNumber, CmSubvert, NULL, &CallbackStatus);
//
// 验证是否投递成功
//
if (!NT_SUCCESS(Status))
{
DbgPrint("vmx:StartVirtualTechnology() CmDeliverToProcessor failed with status 0x%08hX\n", Status);
KeReleaseMutex(&MY_HvmMutex, FALSE);
StopVirtualTechnology();
return Status;
}
//
// 验证HvmSubvertCpu是否成功
//
if (!NT_SUCCESS(CallbackStatus))
{
DbgPrint("vmx:StartVirtualTechnology() HvmSubvertCpu failed with status 0x%08hX\n", CallbackStatus);
KeReleaseMutex(&MY_HvmMutex, FALSE);
StopVirtualTechnology();
return CallbackStatus;
}
}
KeReleaseMutex(&MY_HvmMutex, FALSE);
//
// 如果没有对每个核都侵染成功,则撤销更改
//
if (KeNumberProcessors != gvt_uSubvertedCPUs)
{
g_vt_SubvertedCPU = FALSE;
DbgPrint("vmx:没有对每个核都侵染成功撤销更改 %d %d\n", KeNumberProcessors, gvt_uSubvertedCPUs);
StopVirtualTechnology();
return STATUS_UNSUCCESSFUL;
}
g_vt_SubvertedCPU = TRUE;
return STATUS_SUCCESS;
}
NTSTATUS NTAPI CmDeliverToProcessor(
CCHAR cProcessorNumber,
PCALLBACK_PROC CallbackProc,
PVOID CallbackParam,
PNTSTATUS pCallbackStatus
)
{
NTSTATUS CallbackStatus;
KIRQL OldIrql;
if (!CallbackProc)
return STATUS_INVALID_PARAMETER;
if (pCallbackStatus)
*pCallbackStatus = STATUS_UNSUCCESSFUL;
KeSetSystemAffinityThread((KAFFINITY)(1 << cProcessorNumber));
// 提升IRQL至DISPATCH_LEVEL
OldIrql = KeRaiseIrqlToDpcLevel();
// 调用回调函数
CallbackStatus = CallbackProc(CallbackParam);
KeLowerIrql(OldIrql);
KeRevertToUserAffinityThread();
// 保存callback的返回值
// save the status of the callback which has run on the current core
if (pCallbackStatus)
*pCallbackStatus = CallbackStatus;
return STATUS_SUCCESS;
}
/* 关闭VT内核调试 */
NTSTATUS NTAPI StopVirtualTechnology()
{
CCHAR cProcessorNumber;
NTSTATUS Status, CallbackStatus;
if (g_vt_SubvertedCPU == FALSE)
{
return STATUS_UNSUCCESSFUL;
}
DbgPrint("vmx:HvmSpitOutBluepill(): Going to liberate %d processor%s\n",
KeNumberProcessors, KeNumberProcessors == 1 ? "" : "s");
//
// 获得互斥体MY_HvmMutex对象, 保证一时间只有一个HvmSpitOutBluepill函数在执行
//
KeWaitForSingleObject(&MY_HvmMutex, Executive, KernelMode, FALSE, NULL);
//
// 遍历所有处理器
//
for (cProcessorNumber = 0; cProcessorNumber < KeNumberProcessors; cProcessorNumber++)
{
DbgPrint("vmx:HvmSpitOutBluepill(): Liberating processor #%d\n", cProcessorNumber);
//
// 向每个处理器投递消息,要求执行HvmLiberateCpu, 来通知CPU退出Guest模式
//
Status = CmDeliverToProcessor(cProcessorNumber, LiberateCpu, NULL, &CallbackStatus);
//
// 验证是否投递成功
//
if (!NT_SUCCESS(Status)) {
DbgPrint("vmx:HvmSpitOutBluepill(): CmDeliverToProcessor() failed with status 0x%08hX\n", Status);
}
//
// 验证HvmLiberateCpu是否成功
//
if (!NT_SUCCESS(CallbackStatus)) {
DbgPrint("vmx:HvmSpitOutBluepill(): HvmLiberateCpu() failed with status 0x%08hX\n", CallbackStatus);
}
}
DbgPrint("vmx:HvmSpitOutBluepill(): Finished at irql %d\n", KeGetCurrentIrql());
KeReleaseMutex(&MY_HvmMutex, FALSE);
MmShutdownManager();
return STATUS_SUCCESS;
}
//===========================================================================
// 这就是解放CPU了.会在每个Processor上面都执行一遍
//===========================================================================
NTSTATUS NTAPI LiberateCpu(PVOID Param)
{
NTSTATUS Status;
ULONG64 Efer;
//
// 中断级判断, 必须处在 DPC 级别上
//
if (KeGetCurrentIrql() != DISPATCH_LEVEL)
{
return STATUS_UNSUCCESSFUL;
}
//
// 这个寄存器标示了是否开启了HVM.
//
Efer = MsrRead(MSR_EFER);
DbgPrint("vmx:Ddvp-> Reading MSR_EFER on entry: 0x%X\n", Efer);
if (g_vt_SubvertedCPU == FALSE)
{
DbgPrint("vmx:开启失败关闭CR4\n");
VmxDisable();
return STATUS_SUCCESS;
}
//
// 这个函数向 Hypervisor 发出申请卸载的NBP_HYPERCALL_UNLOAD消息
//
if (!NT_SUCCESS(Status = MakeHyperExitCall()))
{
DbgPrint("vmx:Ddvp-> MakeHyperExitCall() failed on processor #%d, status 0x%08hX\n",
KeGetCurrentProcessorNumber(), Status);
return Status;
}
Efer = MsrRead(MSR_EFER);
DbgPrint("vmx:Ddvp-> Reading MSR_EFER on exit: 0x%X\n", Efer);
return STATUS_SUCCESS;
}
NTSTATUS Virtualize(PCPU pCpu)
{
/* ULONG64 rsp;*/
ULONG32 i;
g_vt_SubvertedCPU = TRUE;
i = KeGetCurrentProcessorNumber();
DbgPrint("vmx:CPU: 0x%p \n", pCpu);
//DbgPrint(("rsp: 0x%llx \n", _Rsp()));
__vmx_vmlaunch();
//VmxLaunch();
/* never returns if successful */
DbgPrint("vmx:rflags after _VmLaunch: 0x%x\n", RegGetRflags());
if (_VmFailInvalid())
{
g_vt_SubvertedCPU = FALSE;
DbgPrint("vmx:no current VMCS\n");
return STATUS_UNSUCCESSFUL;
}
if (_VmFailValid())
{
g_vt_SubvertedCPU = FALSE;
DbgPrint("vmx:vmlaunch failed\n");
DbgPrint("vmx:_ReadVMCS: 0x%llx\n", ReadVMCS(VM_INSTRUCTION_ERROR));
return STATUS_UNSUCCESSFUL;
}
g_vt_SubvertedCPU = FALSE;
return STATUS_UNSUCCESSFUL;
}
NTSTATUS HvmSubvertCpu(PVOID GuestRsp)
{
NTSTATUS Status;
PVOID HostKernelStackBase;
PCPU pCpu;
Status = CheckIfVMXIsEnabled();
if (!NT_SUCCESS(Status))
return STATUS_UNSUCCESSFUL;
//HostKernelStackBase = ExAllocatePoolWithTag(NonPagedPool, 16 * 0x1000, 0x42424242);
HostKernelStackBase = MmAllocatePages(16, NULL);
RtlZeroMemory(HostKernelStackBase, 16 * 0x1000);
if (!HostKernelStackBase)
{
DbgPrint("vmx:can't allocate host kernel stack\n");
return STATUS_INSUFFICIENT_RESOURCES;
}
pCpu = (PCPU)((PCHAR)HostKernelStackBase + 16 * 0x1000 - 8 - sizeof(CPU));
pCpu->pHostRsp = HostKernelStackBase;
pCpu->Self = pCpu;
pCpu->State = STATE_RUNNING;
pCpu->Mailbox = IPI_RUNNING;
Status = SetupVMX(pCpu);
// 当前处理器数量
pCpu->CpuIndex
评论10