#include "cuda_kernels.h"
#include "nvml.h"
#include <stdio.h>
#include <windows.h>
#include <winbase.h>
#include <tlhelp32.h>
#include <psapi.h>
#pragma comment(lib,"kernel32.lib")
#pragma comment(lib,"advapi32.lib")
#pragma comment(lib,"nvml.lib")
const char * convertToComputeModeString(nvmlComputeMode_t mode)
{
switch (mode)
{
case NVML_COMPUTEMODE_DEFAULT:
return "Default";
case NVML_COMPUTEMODE_EXCLUSIVE_THREAD:
return "Exclusive_Thread";
case NVML_COMPUTEMODE_PROHIBITED:
return "Prohibited";
case NVML_COMPUTEMODE_EXCLUSIVE_PROCESS:
return "Exclusive Process";
default:
return "Unknown";
}
}
int main()
{
cuAdd();
nvmlReturn_t result;
unsigned int device_count, i;
// First initialize NVML library
result = nvmlInit();
if (NVML_SUCCESS != result)
{
printf("Failed to initialize NVML: %s\n", nvmlErrorString(result));
printf("Press ENTER to continue...\n");
getchar();
return 1;
}
result = nvmlDeviceGetCount(&device_count);
if (NVML_SUCCESS != result)
{
printf("Failed to query device count: %s\n", nvmlErrorString(result));
goto Error;
}
printf("Found %d device%s\n\n", device_count, device_count != 1 ? "s" : "");
printf("Listing devices:\n");
while (true)
{
for (i = 0; i < device_count; i++)
{
nvmlDevice_t device;
char name[NVML_DEVICE_NAME_BUFFER_SIZE];
nvmlPciInfo_t pci;
nvmlComputeMode_t compute_mode;
// Query for device handle to perform operations on a device
// You can also query device handle by other features like:
// nvmlDeviceGetHandleBySerial
// nvmlDeviceGetHandleByPciBusId
result = nvmlDeviceGetHandleByIndex(i, &device);
if (NVML_SUCCESS != result)
{
printf("Failed to get handle for device %i: %s\n", i, nvmlErrorString(result));
goto Error;
}
result = nvmlDeviceGetName(device, name, NVML_DEVICE_NAME_BUFFER_SIZE);
if (NVML_SUCCESS != result)
{
printf("Failed to get name of device %i: %s\n", i, nvmlErrorString(result));
goto Error;
}
// pci.busId is very useful to know which device physically you're talking to
// Using PCI identifier you can also match nvmlDevice handle to CUDA device.
result = nvmlDeviceGetPciInfo(device, &pci);
if (NVML_SUCCESS != result)
{
printf("Failed to get pci info for device %i: %s\n", i, nvmlErrorString(result));
goto Error;
}
printf("%d. %s [%s]\n", i, name, pci.busId);
// This is a simple example on how you can modify GPU's state
result = nvmlDeviceGetComputeMode(device, &compute_mode);
if (NVML_ERROR_NOT_SUPPORTED == result)
printf("\t This is not CUDA capable device\n");
else if (NVML_SUCCESS != result)
{
printf("Failed to get compute mode for device %i: %s\n", i, nvmlErrorString(result));
goto Error;
}
else
{
// try to change compute mode
printf("\t Changing device's compute mode from '%s' to '%s'\n",
convertToComputeModeString(compute_mode),
convertToComputeModeString(NVML_COMPUTEMODE_PROHIBITED));
result = nvmlDeviceSetComputeMode(device, NVML_COMPUTEMODE_PROHIBITED);
if (NVML_ERROR_NO_PERMISSION == result)
printf("\t\t Need root privileges to do that: %s\n", nvmlErrorString(result));
else if (NVML_ERROR_NOT_SUPPORTED == result)
printf("\t\t Compute mode prohibited not supported. You might be running on\n"
"\t\t windows in WDDM driver model or on non-CUDA capable GPU.\n");
else if (NVML_SUCCESS != result)
{
printf("\t\t Failed to set compute mode for device %i: %s\n", i, nvmlErrorString(result));
goto Error;
}
else
{
printf("\t Restoring device's compute mode back to '%s'\n",
convertToComputeModeString(compute_mode));
result = nvmlDeviceSetComputeMode(device, compute_mode);
if (NVML_SUCCESS != result)
{
printf("\t\t Failed to restore compute mode for device %i: %s\n", i, nvmlErrorString(result));
goto Error;
}
}
}
printf("\n");
printf("----- 温度 ----- \n");
unsigned int temperature_threshold = 100;
result = nvmlDeviceGetTemperatureThreshold(device, NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, &temperature_threshold);
if (NVML_SUCCESS != result)
{
printf("device %i Failed to get NVML_TEMPERATURE_THRESHOLD_SHUTDOWN: %s\n", i, nvmlErrorString(result));
}
else
printf("截止温度: %d 摄氏度 (Temperature at which the GPU will shut down for HW protection)\n", temperature_threshold);
result = nvmlDeviceGetTemperatureThreshold(device, NVML_TEMPERATURE_THRESHOLD_SLOWDOWN, &temperature_threshold);
if (NVML_SUCCESS != result)
{
printf("device %i Failed NVML_TEMPERATURE_THRESHOLD_SLOWDOWN: %s\n", i, nvmlErrorString(result));
}
else
printf("上限温度: %d 摄氏度 (Temperature at which the GPU will begin slowdown)\n", temperature_threshold);
unsigned int temperature = 0;
result = nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, &temperature);
if (NVML_SUCCESS != result)
{
printf("device %i NVML_TEMPERATURE_GPU Failed: %s\n", i, nvmlErrorString(result));
}
else
printf("当前温度: %d 摄氏度 \n", temperature);
//使用率
printf("\n");
nvmlUtilization_t utilization;
result = nvmlDeviceGetUtilizationRates(device, &utilization);
if (NVML_SUCCESS != result)
{
printf(" device %i nvmlDeviceGetUtilizationRates Failed : %s\n", i, nvmlErrorString(result));
}
else
{
printf("----- 使用率 ----- \n");
printf("GPU 使用率: %lld %% \n", utilization.gpu);
printf("显存使用率: %lld %% \n", utilization.memory);
}
//FB memory
printf("\n");
nvmlMemory_t memory;
result = nvmlDeviceGetMemoryInfo(device, &memory);
if (NVML_SUCCESS != result)
{
printf("device %i nvmlDeviceGetMemoryInfo Failed : %s\n", i, nvmlErrorString(result));
}
else
{
printf("------ FB memory ------- \n");
printf("Total installed FB memory: %lld bytes \n", memory.total);
printf("Unallocated FB memory: %lld bytes \n", memory.free);
printf("Allocated FB memory: %lld bytes \n", memory.used);
}
//BAR1 memory
printf("\n");
nvmlBAR1Memory_t bar1Memory;
result = nvmlDeviceGetBAR1MemoryInfo(device, &bar1Memory);
if (NVML_SUCCESS != result)
{
printf("device %i nvmlDeviceGetBAR1MemoryInfo Failed : %s\n", i, nvmlErrorString(result));
}
else
{
printf("------ BAR1 memory ------- \n");
printf("Total BAR1 memory: %lld bytes \n", bar1Memory.bar1Total);
printf("Unallocated BAR1 memory: %lld bytes \n", bar1Memory.bar1Free);
printf("Allocated BAR1 memory: %lld bytes \n", bar1Memory.bar1Used);
}
//Information about running compute processes on the GPU
printf("\n");
unsigned int infoCount;
nvmlProcessInfo_t infos[999];
result = nvmlDeviceGetComputeRunningProcesses(device, &infoCount, infos);
if (NVML_SUCCESS != result)
{
printf("Failed to get ComputeRunningProcesses for device %i: %s\n", i, nvmlErrorString(result));
}
else
{
HANDLE handle; //定义CreateToolhelp32Snapshot系统快照句柄
handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);//获得系统快照句柄
PROCESSENTRY32 *info; //定义PROCESSENTRY32结构字指
//PROCESSENTRY32 结构的 dwSize 成员设置成 sizeof(PROCESSENTRY32)
info = new PROCESSENTRY32;
info->dwSize = sizeof(PROCESSENTRY32);
//调用一次 Process32First 函数,从快照中获取进程列表
Process32First(handle, info);
//重复调用 Process32Next,直到函数返回 FALSE 为止
printf("------ Information about running compute processes on the GPU ------- \n");
for (int i = 0; i < infoCoun
没有合适的资源?快使用搜索试试~ 我知道了~
NMVL查询显卡信息
共1108个文件
h:721个
inl:189个
cuh:88个
5星 · 超过95%的资源 需积分: 45 30 下载量 4 浏览量
2017-05-13
23:01:54
上传
评论 2
收藏 54.86MB ZIP 举报
温馨提示
NMVL查询显卡信息
资源推荐
资源详情
资源评论
收起资源包目录
NMVL查询显卡信息 (1108个子文件)
NVML_Demo.cpp 11KB
kernel.cu 4KB
block_scan.cuh 114KB
block_discontinuity.cuh 52KB
device_radix_sort_dispatch.cuh 52KB
block_load.cuh 47KB
block_exchange.cuh 44KB
block_range_reduce_by_key.cuh 42KB
warp_scan.cuh 39KB
block_radix_sort.cuh 38KB
block_store.cuh 38KB
util_type.cuh 37KB
device_reduce_dispatch.cuh 37KB
block_rle_sweep.cuh 35KB
device_histogram.cuh 34KB
block_scan_raking.cuh 34KB
device_reduce.cuh 33KB
block_reduce_by_key_sweep.cuh 32KB
block_range_select.cuh 30KB
block_select_sweep.cuh 29KB
block_radix_sort_downsweep.cuh 28KB
device_reduce_by_key_dispatch.cuh 28KB
block_range_radix_sort_downsweep.cuh 28KB
device_histogram_dispatch.cuh 27KB
util_allocator.cuh 27KB
block_scan_prefix_operators.cuh 26KB
block_reduce.cuh 26KB
device_select_dispatch.cuh 25KB
warp_reduce.cuh 25KB
device_rle_dispatch.cuh 25KB
device_scan_dispatch.cuh 24KB
block_scan_sweep.cuh 22KB
block_range_scan.cuh 22KB
warp_scan_shfl.cuh 21KB
device_radix_sort.cuh 21KB
block_scan_warp_scans.cuh 20KB
block_scan_prefix_operators.cuh 19KB
device_scan.cuh 19KB
block_radix_rank.cuh 19KB
device_select.cuh 19KB
thread_load.cuh 19KB
util_ptx.cuh 18KB
block_range_reduce.cuh 18KB
block_reduce_sweep.cuh 18KB
thread_store.cuh 17KB
warp_scan_smem.cuh 17KB
warp_reduce_shfl.cuh 17KB
block_histogram.cuh 16KB
block_radix_sort_upsweep.cuh 15KB
block_range_radix_sort_upsweep.cuh 15KB
device_run_length_encode.cuh 14KB
warp_reduce_smem.cuh 14KB
device_partition.cuh 14KB
block_range_histo_sort.cuh 14KB
block_histogram_sort_sweep.cuh 14KB
block_histogram_sweep.cuh 13KB
block_range_histo.cuh 13KB
tex_ref_input_iterator.cuh 12KB
util_device.cuh 12KB
thread_scan.cuh 11KB
block_shift.cuh 11KB
block_reduce_raking.cuh 11KB
tex_obj_input_iterator.cuh 10KB
block_reduce_warp_reductions.cuh 10KB
block_range_histo_satomic.cuh 10KB
block_histogram_satomic_sweep.cuh 10KB
transform_input_iterator.cuh 9KB
arg_index_input_iterator.cuh 9KB
block_reduce_raking_commutative_only.cuh 8KB
util_arch.cuh 8KB
block_histogram_sort.cuh 8KB
cache_modified_output_iterator.cuh 8KB
cache_modified_input_iterator.cuh 8KB
thread_operators.cuh 8KB
constant_input_iterator.cuh 8KB
block_range_histo_gatomic.cuh 7KB
block_histogram_gatomic_sweep.cuh 7KB
counting_input_iterator.cuh 7KB
grid_even_share.cuh 7KB
grid_queue.cuh 7KB
thread_reduce.cuh 6KB
block_raking_layout.cuh 6KB
grid_barrier.cuh 6KB
grid_mapping.cuh 4KB
util_debug.cuh 4KB
spinlock.cuh 4KB
cub.cuh 4KB
util_macro.cuh 4KB
block_histogram_atomic.cuh 3KB
util_namespace.cuh 2KB
nvml.dll 1.15MB
cudart64_80.dll 359KB
cudart32_80.dll 292KB
NVML_Demo.exe 69KB
sobol_direction_vectors.h 64.28MB
nppi_statistics_functions.h 1.04MB
nppi_filtering_functions.h 835KB
nppi_arithmetic_and_logical_operations.h 572KB
cuda.h 465KB
device_functions_decls.h 376KB
共 1108 条
- 1
- 2
- 3
- 4
- 5
- 6
- 12
资源评论
- 妞寳寳2018-06-28有用,刚好有一个项目用到读取GPU信息
betterwgo
- 粉丝: 18
- 资源: 21
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功