tf打开gpu
import tensorflow as tf # 显示几块gpu可用 print ("Num GPUs Available:", len(tf.config.list_physical_devices("GPU")))
Num GPUs Available: 1
# 查看是否有GPU gpu_device_name = tf.test.gpu_device_name() print(gpu_device_name)
/device:GPU:0
# GPU是否可用 tf.test.is_gpu_available()
True
tf.config.list_physical_devices('GPU')
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
# 为找出将运算和张量分配到的目标设备 tf.debugging.set_log_device_placement(True)
打开gpu
import os # 一块gpu参数为0 os.environ["CUDA_VISIBLE_DEVICES"] = "0"
查看
cfig=tf.compat.v1.ConfigProto(log_device_placement=True) sess = tf.compat.v1.Session() sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
>>> Device mapping:
/job:localhost/replica:0/task:0/device:GPU:0 -> device: 0, name: NVIDIA GeForce GTX 1060, pci bus id: 0000:01:00.0, compute capability: 6.1

浙公网安备 33010602011771号