Python 在torch.distributed中,如何正确地平均不同GPU上的梯度?

Python 在torch.distributed中,如何正确地平均不同GPU上的梯度?,python,pytorch,torch,multi-gpu,Python,Pytorch,Torch,Multi Gpu,在torch.distributed中,如何正确地平均不同GPU上的梯度 通过修改,以下代码可以成功地使用两个GPU(可通过nvidia smi进行检查) 但有一件事很难理解,即下面的“平均梯度”是否确实是两个GPU上两个模型上平均梯度的正确方法。与下面的代码类似,使用两个进程运行的两个“model=Net()”是两个不同GPU上的两个模型,但行“average_gradients(model)”只是“平均”一个GPU上的模型梯度,而不是两个GPU上的两个模型 问题是,下面的代码是否确实是在两个

在torch.distributed中,如何正确地平均不同GPU上的梯度

通过修改,以下代码可以成功地使用两个GPU(可通过nvidia smi进行检查)

但有一件事很难理解,即下面的“平均梯度”是否确实是两个GPU上两个模型上平均梯度的正确方法。与下面的代码类似,使用两个进程运行的两个“model=Net()”是两个不同GPU上的两个模型,但行“average_gradients(model)”只是“平均”一个GPU上的模型梯度,而不是两个GPU上的两个模型

问题是,下面的代码是否确实是在两个GPU上平均梯度的正确方法?如果为真,如何阅读,如何理解代码?如果没有,下面两个模型上平均梯度的正确方法是什么

import os import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from math import ceil from random import Random from torch.multiprocessing import Process from torchvision import datasets, transforms os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" class Partition(object): """ Dataset-like object, but only access a subset of it. """ def __init__(self, data, index): self.data = data self.index = index def __len__(self): return len(self.index) def __getitem__(self, index): data_idx = self.index[index] return self.data[data_idx] class DataPartitioner(object): """ Partitions a dataset into different chuncks. """ def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234): self.data = data self.partitions = [] rng = Random() rng.seed(seed) data_len = len(data) indexes = [x for x in range(0, data_len)] rng.shuffle(indexes) for frac in sizes: part_len = int(frac * data_len) self.partitions.append(indexes[0:part_len]) indexes = indexes[part_len:] def use(self, partition): return Partition(self.data, self.partitions[partition]) class Net(nn.Module): """ Network architecture. """ def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x) def partition_dataset(): """ Partitioning MNIST """ dataset = datasets.MNIST( './data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])) size = dist.get_world_size() bsz = int(256 / float(size)) partition_sizes = [1.0 / size for _ in range(size)] partition = DataPartitioner(dataset, partition_sizes) partition = partition.use(dist.get_rank()) train_set = torch.utils.data.DataLoader( partition, batch_size=bsz, shuffle=True) return train_set, bsz def average_gradients(model): """ Gradient averaging. """ size = float(dist.get_world_size()) for param in model.parameters(): dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM) param.grad.data /= size def run(rank, size): """ Distributed Synchronous SGD Example """ # print("107 size = ", size) # print("dist.get_world_size() = ", dist.get_world_size()) ## 2 torch.manual_seed(1234) train_set, bsz = partition_dataset() device = torch.device("cuda:{}".format(rank)) model = Net() model = model.to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) num_batches = ceil(len(train_set.dataset) / float(bsz)) for epoch in range(10): epoch_loss = 0.0 for data, target in train_set: # data, target = Variable(data), Variable(target) # data, target = Variable(data.cuda(rank)), Variable(target.cuda(rank)) data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) epoch_loss += loss.item() loss.backward() average_gradients(model) optimizer.step() print('Rank ', dist.get_rank(), ', epoch ', epoch, ': ', epoch_loss / num_batches) # if epoch == 4: # from utils import module_utils # module_utils.save_model() def init_processes(rank, size, fn, backend='gloo'): """ Initialize the distributed environment. """ os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend, rank=rank, world_size=size) fn(rank, size) if __name__ == "__main__": size = 2 processes = [] for rank in range(size): p = Process(target=init_processes, args=(rank, size, run)) p.start() processes.append(p) for p in processes: p.join() 导入操作系统 进口火炬 导入火炬。作为dist分发 导入torch.nn作为nn 导入torch.nn.功能为F 将torch.optim导入为optim 从数学导入单元 从随机导入随机 从torch.MULTIPROSSING导入过程 从torchvision导入数据集,转换 操作系统环境[“CUDA_可见设备”]=“0,1” 类分区(对象): “”“类似于数据集的对象,但仅访问其子集。”“” 定义初始化(自身、数据、索引): self.data=数据 self.index=索引 定义(自我): 返回长度(自索引) 定义uu获取项目uu(自身,索引): data_idx=self.index[index] 返回self.data[data_idx] 类DataPartitioner(对象): “”“将数据集划分为不同的数据块。”“” 定义初始化(self,data,size=[0.7,0.2,0.1],seed=1234): self.data=数据 self.partitions=[] rng=随机() 种子(种子) 数据长度=长度(数据) 索引=[x代表范围内的x(0,数据长度)] 随机排列(索引) 对于压裂尺寸: 零件长度=整数(分形*数据长度) self.partitions.append(索引[0:part\u len]) 索引=索引[部分内容:] def使用(自身、分区): 返回分区(self.data,self.partitions[Partition]) 类别网络(nn.模块): “”“网络体系结构。”“” 定义初始化(自): 超级(网络,自我)。\uuuu初始化 self.conv1=nn.Conv2d(1,10,内核大小=5) self.conv2=nn.Conv2d(10,20,内核大小=5) self.conv2_drop=nn.Dropout2d() self.fc1=nn.线性(320,50) self.fc2=nn.线性(50,10) def前进(自身,x): x=F.relu(F.max_pool2d(self.conv1(x),2)) x=F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)),2)) x=x.view(-1320) x=F.relu(自fc1(x)) x=F.辍学(x,培训=自我培训) x=自身.fc2(x) 返回F.log\u softmax(x) def partition_dataset(): “分区列表” dataset=datasets.MNIST( “/数据”, 火车=真的, 下载=真, transform=transforms.Compose([ transforms.ToTensor(), 规范化((0.1307,),(0.3081,)) ])) 大小=距离获取世界大小() bsz=int(256/浮点(大小)) 分区大小=[1.0/范围内的大小(大小)] 分区=数据分区器(数据集、分区大小) partition=partition.use(dist.get\u rank()) 列车组=torch.utils.data.DataLoader( 分区,批处理大小=bsz,洗牌=True) 返回列车组,bsz def平均_梯度(型号): “”“梯度平均。”“” 大小=浮动(距离获取世界大小() 对于model.parameters()中的参数: dist.all\u reduce(param.grad.data,op=dist.reduce\u op.SUM) param.grad.data/=大小 def运行(等级、大小): “”“分布式同步SGD示例”“” #打印(“107 size=,size”) #打印(“dist.get_world_size()=”,dist.get_world_size())\2 手电筒。手动种子(1234) 序列集,bsz=分区数据集() device=torch.device(“cuda:{}”.format(rank)) model=Net() 模型=模型到(设备) optimizer=optim.SGD(model.parameters(),lr=0.01,momentum=0.5) num_batches=ceil(len(train_set.dataset)/float(bsz)) 对于范围内的历元(10): 历元损耗=0.0 对于数据,列车组中的目标: #数据,目标=变量(数据),变量(目标) #数据,目标=变量(data.cuda(rank)),变量(target.cuda(rank)) 数据,目标=数据到(设备),目标到(设备) optimizer.zero_grad() 输出=模型(数据) 损失=F.nll\U损失(输出、目标) 历元损失+=损失项目() loss.backward() 平均坡度(模型) optimizer.step() 打印('Rank', 区域获取秩(),'历元',历元':', 历元(损失/批量) #如果epoch==4: #从utils导入模块\u utils #模块_utils.save_model() def init_进程(秩、大小、fn、后端='gloo'): “”“初始化分布式环境。”“” 操作系统环境['MASTER_ADDR']='127.0.0.1' 操作系统环境['MASTER_PORT']='29500' dist.init\u进程组(后端,秩=秩,世界大小=大小) fn(级别、大小) 如果名称=“\uuuuu main\uuuuuuuu”: 尺寸=2 进程=[] 对于范围内的排名(大小): p=Process(target=init_进程,args=(秩、大小、运行)) p、 开始() 进程。追加(p) 对于流程中的p: p、 加入
我的解决方案是使用DistributedDataParallel而不是下面的DataParallel

代码

for param in self.model.parameters():
    torch.distributed.all_reduce(param.grad.data)
你可以成功地工作

class DDPOptimizer:
    def __init__(self, model, torch_optim=None, learning_rate=None):
        """
        :param parameters:
        :param torch_optim: like torch.optim.Adam(parameters, lr=learning_rate, eps=1e-9)
            or optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
        :param is_ddp:
        """
        if torch_optim is None:
            torch_optim = torch.optim.Adam(model.parameters(), lr=3e-4, eps=1e-9)

        if learning_rate is not None:
            torch_optim.defaults["lr"] = learning_rate

        self.model = model
        self.optimizer = torch_optim

    def optimize(self, loss):
        self.optimizer.zero_grad()
        loss.backward()
        for param in self.model.parameters():
            torch.distributed.all_reduce(param.grad.data)

        self.optimizer.step()
    pass

def run():
    """ Distributed Synchronous SGD Example """

    module_utils.initialize_torch_distributed()
    start = time.time()

    train_set, bsz = partition_dataset()
    model = Net()

    local_rank = torch.distributed.get_rank()
    device = torch.device("cuda", local_rank)
    model = model.to(device)

    sgd = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
    optimizer = DDPOptimizer(model, torch_optim=sgd)

    # optimizer = NoamOptimizerDistributed(100, 1, 10, model)

    num_batches = math.ceil(len(train_set.dataset) / float(bsz))

    epoch, end_epoch = 1, 10

    while epoch <= end_epoch:
        epoch_loss = 0.0
        for data, target in train_set:
            data, target = data.to(device), target.to(device)

            output = model(data)
            loss = F.nll_loss(output, target)
            epoch_loss += loss.item()
            optimizer.optimize(loss)

        print('Rank ', dist.get_rank(), ', epoch ', epoch, ': ', epoch_loss / num_batches)
        # if epoch % 6 == 0:
        #     if local_rank == 0:
        #         module_utils.save_model(model, "a.pt")
        epoch += 1

    print("Time take to train: ", time.time() - start)
class-DDPOptimizer:
定义初始化(自我、模型、火炬优化=无,学习率=无):
"""
:参数:
:param torch\u optim:like torch.optim.Adam(参数,lr=learning\u rate,eps=1e-9)
或者optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
:参数为\u ddp:
"""
如果torch_optim为无:
torch\u optim=torch.opti