Python 有没有在GPU上运行Tensorflow代码?
这是我的代码,我想使用GPU作为我的代码。目前它正在CPU上运行Python 有没有在GPU上运行Tensorflow代码?,python,tensorflow,gpu,Python,Tensorflow,Gpu,这是我的代码,我想使用GPU作为我的代码。目前它正在CPU上运行 elf.graph = tf.Graph() with self.graph.as_default(): self.face_graph = tf.GraphDef() fid=tf.gfile.GFile(self.facenet_model, 'rb') serialized_graph = fid.read() self.face_graph.P
elf.graph = tf.Graph()
with self.graph.as_default():
self.face_graph = tf.GraphDef()
fid=tf.gfile.GFile(self.facenet_model, 'rb')
serialized_graph = fid.read()
self.face_graph.ParseFromString(serialized_graph)
tf.import_graph_def(self.face_graph, name='')
self.facenet_sess = tf.Session(graph=self.graph)
self.images_placeholder = self.graph.get_tensor_by_name("input:0")
self.embeddings = self.graph.get_tensor_by_name("embeddings:0")
把它包起来
with tf.device('/GPU:0'):
....
把它包起来
with tf.device('/GPU:0'):
....
tensorflow的网站上有一个完整的指南: 确认tf可以看到您的gpu:
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
查看您的GPU名称:
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
进行一些手动放置:
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_logical_devices('GPU')
if gpus:
# Replicate your computation on multiple GPUs
c = []
for gpu in gpus:
with tf.device(gpu.name):
a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c.append(tf.matmul(a, b))
with tf.device('/CPU:0'):
matmul_sum = tf.add_n(c)
print(matmul_sum)
tensorflow的网站上有一个完整的指南: 确认tf可以看到您的gpu:
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
查看您的GPU名称:
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
进行一些手动放置:
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_logical_devices('GPU')
if gpus:
# Replicate your computation on multiple GPUs
c = []
for gpu in gpus:
with tf.device(gpu.name):
a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c.append(tf.matmul(a, b))
with tf.device('/CPU:0'):
matmul_sum = tf.add_n(c)
print(matmul_sum)