Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/342.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 减少pytorch中的批量_Python_Image Processing_Deep Learning_Pytorch - Fatal编程技术网

Python 减少pytorch中的批量

Python 减少pytorch中的批量,python,image-processing,deep-learning,pytorch,Python,Image Processing,Deep Learning,Pytorch,我是pytorch编程新手。我收到一个错误,显示cuda内存不足。所以我必须减少批量大小。有人能告诉我如何在python代码中做到这一点吗?我也不知道我当前的批量大小 p、 我正在试着运行Deep Image Preor的超分辨率。这是密码 我得到的错误是在运行优化时。上面说 运行时错误:Cuda内存不足 批量大小取决于型号。通常,它是输入张量的第一维度。您的模型使用的名称与我以前使用的不同,其中一些是通用术语,因此我不确定您的模型拓扑或用法。您应该发布代码。记住把它放在代码部分,你可以在编辑器

我是pytorch编程新手。我收到一个错误,显示cuda内存不足。所以我必须减少批量大小。有人能告诉我如何在python代码中做到这一点吗?我也不知道我当前的批量大小

p、 我正在试着运行Deep Image Preor的超分辨率。这是密码

我得到的错误是在运行优化时。上面说

运行时错误:Cuda内存不足


批量大小取决于型号。通常,它是输入张量的第一维度。您的模型使用的名称与我以前使用的不同,其中一些是通用术语,因此我不确定您的模型拓扑或用法。

您应该发布代码。记住把它放在代码部分,你可以在编辑器工具栏上的
{}
符号下找到它。我们不知道您使用的框架,但通常有一个指定batchsize的关键字参数,对于Keras中的ex,它是
batch\u size
Oh Ok。我将编辑它。谢谢我建议添加更具体的标签来吸引能帮助你的人。标记“python”是可以的,但它是一个相当宽泛的术语。可能是pytorch、sklearn或您使用的类似库?是的。非常感谢。请正确缩进代码。插入
优化
功能的代码。
from __future__ import print_function
import matplotlib.pyplot as plt
%matplotlib inline

import argparse
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

import numpy as np
from models import *

import torch
import torch.optim
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader


import warnings
warnings.filterwarnings("ignore")

from skimage.measure import compare_psnr
from models.downsampler import Downsampler

from utils.sr_utils import *

torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor

imsize = -1
factor = 16 # 8
enforse_div32 = 'CROP' # we usually need the dimensions to be divisible by a power of two (32 in this case)
PLOT = True
path_to_image = '/home/smitha/deep-image-prior/tnew.tif'
imgs = load_LR_HR_imgs_sr(path_to_image , imsize, factor, enforse_div32)
imgs['bicubic_np'], imgs['sharp_np'], imgs['nearest_np'] = get_baselines(imgs['LR_pil'], imgs['HR_pil'])
if PLOT:
    plot_image_grid([imgs['HR_np'], imgs['bicubic_np'], imgs['sharp_np'], imgs['nearest_np']], 4,12);
    print ('PSNR bicubic: %.4f   PSNR nearest: %.4f' %  (
                                    compare_psnr(imgs['HR_np'], imgs['bicubic_np']), 
                                    compare_psnr(imgs['HR_np'], imgs['nearest_np'])))
input_depth = 8
INPUT =     'noise'
pad   =     'reflection'
OPT_OVER =  'net'
KERNEL_TYPE='lanczos2'
LR = 5
tv_weight = 0.0
OPTIMIZER = 'adam'
if factor == 16: 
    num_iter = 10
    reg_noise_std = 0.01
elif factor == 8:
    num_iter = 40
    reg_noise_std = 0.05
else:
    assert False, 'We did not experiment with other factors'
net_input = get_noise(input_depth, INPUT, (imgs['HR_pil'].size[1],  imgs['HR_pil'].size[0])).type(dtype).detach()
NET_TYPE = 'skip' # UNet, ResNet
net = get_net(input_depth, 'skip', pad,
          skip_n33d=128, 
          skip_n33u=128, 
          skip_n11=4, 
          num_scales=5,
          upsample_mode='bilinear').type(dtype)
mse = torch.nn.MSELoss().type(dtype)

img_LR_var = np_to_torch(imgs['LR_np']).type(dtype)

downsampler = Downsampler(n_planes=3, factor=factor,  kernel_type=KERNEL_TYPE, phase=0.5, preserve_size=True).type(dtype) 
def closure():
    global i, net_input
    if reg_noise_std > 0:
        net_input = net_input_saved + (noise.normal_() * reg_noise_std)


    out_HR = net(net_input)
    out_LR = downsampler(out_HR)

    total_loss = mse(out_LR, img_LR_var) 

    if tv_weight > 0:
        total_loss += tv_weight * tv_loss(out_HR)

    total_loss.backward()

    # Log
    psnr_LR = compare_psnr(imgs['LR_np'], torch_to_np(out_LR))
    psnr_HR = compare_psnr(imgs['HR_np'], torch_to_np(out_HR))
    print ('Iteration %05d    PSNR_LR %.3f   PSNR_HR %.3f' % (i, psnr_LR, psnr_HR), '\r', end='')

    # History
    psnr_history.append([psnr_LR, psnr_HR])

    if PLOT and i % 100 == 0:
        out_HR_np = torch_to_np(out_HR)
        plot_image_grid([imgs['HR_np'], imgs['bicubic_np'], np.clip(out_HR_np, 0, 1)], factor=13, nrow=3)

    i += 1

    return total_loss   

psnr_history = [] 
volatile=True
net_input_saved = net_input.detach().clone()
noise = net_input.clone()
i = 0
p = get_params(OPT_OVER, net, net_input)
optimize(OPTIMIZER, p, closure, LR, num_iter)
out_HR_np = np.clip(torch_to_np(net(net_input)), 0, 1)
result_deep_prior = put_in_center(out_HR_np, imgs['orig_np'].shape[1:])
plot_image_grid([imgs['HR_np'],
             imgs['bicubic_np'],
             out_HR_np], factor=4, nrow=1);