Python 在Pytorch中重新实现矩阵分解时出现的问题

Python 在Pytorch中重新实现矩阵分解时出现的问题,python,pytorch,mxnet,reproducible-research,matrix-factorization,Python,Pytorch,Mxnet,Reproducible Research,Matrix Factorization,我尝试在Pytorch中实现矩阵分解,作为and 原始模型是在mxnet中编写的。在这里,我尝试在Pytorch中使用相同的想法 这是我的代码,可以直接在codelab import torch import torch.nn as nn import pandas as pd import numpy as np from torch.utils.data import Dataset, DataLoader import collections from collections impor

我尝试在Pytorch中实现矩阵分解,作为and

原始模型是在
mxnet
中编写的。在这里,我尝试在Pytorch中使用相同的想法

这是我的代码,可以直接在
codelab

import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader

import collections
from collections import defaultdict
from IPython import display
import math
from matplotlib import pyplot as plt
import os
import pandas as pd
import random
import re
import shutil
import sys
import tarfile
import time
import requests
import zipfile
import hashlib



# ============data obtained, not change the original code
DATA_HUB= {}

# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
def download(name, cache_dir=os.path.join('..', 'data')):
    """Download a file inserted into DATA_HUB, return the local filename."""
    assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
    url, sha1_hash = DATA_HUB[name]
    os.makedirs(cache_dir, exist_ok=True)
    fname = os.path.join(cache_dir, url.split('/')[-1])
    if os.path.exists(fname):
        sha1 = hashlib.sha1()
        with open(fname, 'rb') as f:
            while True:
                data = f.read(1048576)
                if not data:
                    break
                sha1.update(data)
        if sha1.hexdigest() == sha1_hash:
            return fname  # Hit cache
    print(f'Downloading {fname} from {url}...')
    r = requests.get(url, stream=True, verify=True)
    with open(fname, 'wb') as f:
        f.write(r.content)
    return fname




# Defined in file: ./chapter_multilayer-perceptrons/kaggle-house-price.md
def download_extract(name, folder=None):
    """Download and extract a zip/tar file."""
    fname = download(name)
    base_dir = os.path.dirname(fname)
    data_dir, ext = os.path.splitext(fname)
    if ext == '.zip':
        fp = zipfile.ZipFile(fname, 'r')
    elif ext in ('.tar', '.gz'):
        fp = tarfile.open(fname, 'r')
    else:
        assert False, 'Only zip/tar files can be extracted.'
    fp.extractall(base_dir)
    return os.path.join(base_dir, folder) if folder else data_dir


#1. obtain dataset
DATA_HUB['ml-100k'] = ('http://files.grouplens.org/datasets/movielens/ml-100k.zip',
    'cd4dcac4241c8a4ad7badc7ca635da8a69dddb83')


def read_data_ml100k():
    data_dir = download_extract('ml-100k')
    names = ['user_id', 'item_id', 'rating', 'timestamp']
    data = pd.read_csv(os.path.join(data_dir, 'u.data'), '\t', names=names,
                       engine='python')
    num_users = data.user_id.unique().shape[0]
    num_items = data.item_id.unique().shape[0]
    return data, num_users, num_items


# 2. Split data
#@save
def split_data_ml100k(data, num_users, num_items,
                      split_mode='random', test_ratio=0.1):
    """Split the dataset in random mode or seq-aware mode."""
    if split_mode == 'seq-aware':
        train_items, test_items, train_list = {}, {}, []
        for line in data.itertuples():
            u, i, rating, time = line[1], line[2], line[3], line[4]
            train_items.setdefault(u, []).append((u, i, rating, time))
            if u not in test_items or test_items[u][-1] < time:
                test_items[u] = (i, rating, time)
        for u in range(1, num_users + 1):
            train_list.extend(sorted(train_items[u], key=lambda k: k[3]))
        test_data = [(key, *value) for key, value in test_items.items()]
        train_data = [item for item in train_list if item not in test_data]
        train_data = pd.DataFrame(train_data)
        test_data = pd.DataFrame(test_data)
    else:
        mask = [True if x == 1 else False for x in np.random.uniform(
            0, 1, (len(data))) < 1 - test_ratio]
        neg_mask = [not x for x in mask]
        train_data, test_data = data[mask], data[neg_mask]
    return train_data, test_data

#@save
def load_data_ml100k(data, num_users, num_items, feedback='explicit'):
    users, items, scores = [], [], []
    inter = np.zeros((num_items, num_users)) if feedback == 'explicit' else {}
    for line in data.itertuples():
        user_index, item_index = int(line[1] - 1), int(line[2] - 1)
        score = int(line[3]) if feedback == 'explicit' else 1
        users.append(user_index)
        items.append(item_index)
        scores.append(score)
        if feedback == 'implicit':
            inter.setdefault(user_index, []).append(item_index)
        else:
            inter[item_index, user_index] = score
    return users, items, scores, inter


#@save
def split_and_load_ml100k(split_mode='seq-aware', feedback='explicit',
                          test_ratio=0.1, batch_size=256):
    data, num_users, num_items = read_data_ml100k()
    train_data, test_data = split_data_ml100k(data, num_users, num_items, split_mode, test_ratio)
    train_u, train_i, train_r, _ = load_data_ml100k(train_data, num_users, num_items, feedback)
    test_u, test_i, test_r, _ = load_data_ml100k(test_data, num_users, num_items, feedback)

    # Create Dataset
    train_set = MyData(np.array(train_u), np.array(train_i), np.array(train_r))
    test_set = MyData(np.array(test_u), np.array(test_i), np.array(test_r))

    # Create Dataloader
    train_iter = DataLoader(train_set, shuffle=True, batch_size=batch_size)
    test_iter = DataLoader(test_set, batch_size=batch_size)

    return num_users, num_items, train_iter, test_iter


class MyData(Dataset):
  def __init__(self, user, item, score):
    self.user = torch.tensor(user)
    self.item = torch.tensor(item)
    self.score = torch.tensor(score)
  
  def __len__(self):
    return len(self.user)
  
  def __getitem__(self, idx):
    return self.user[idx], self.item[idx], self.score[idx]


# create a nn class (just-for-fun choice :-) 
class RMSELoss(nn.Module):
    def __init__(self, eps=1e-6):
        '''You should be careful with NaN which will appear if the mse=0, adding self.eps'''
        super().__init__()
        self.mse = nn.MSELoss()
        self.eps = eps
        
    def forward(self,yhat,y):
        loss = torch.sqrt(self.mse(yhat,y) + self.eps)
        return loss



class MF(nn.Module):
    def __init__(self, num_factors, num_users, num_items, **kwargs):
        super(MF, self).__init__(**kwargs)
        self.P = nn.Embedding(num_embeddings=num_users, embedding_dim=num_factors)
        self.Q = nn.Embedding(num_embeddings=num_items, embedding_dim=num_factors)
        self.user_bias = nn.Embedding(num_users, 1)
        self.item_bias = nn.Embedding(num_items, 1)

    def forward(self, user_id, item_id):
        P_u = self.P(user_id)
        Q_i = self.Q(item_id)
        

        b_u = self.user_bias(user_id)
        b_i = self.item_bias(item_id)

        outputs = (P_u * Q_i).sum() + b_u.squeeze() + b_i.squeeze()
        return outputs
        



# train
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Hyper parameters
num_epochs = 50
batch_size = 512
lr = 0.001


num_users, num_items, train_iter, test_iter = split_and_load_ml100k(test_ratio=0.1, batch_size=batch_size)

model = MF(30, num_users, num_items).to(device)

# Loss and Optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
criterion = RMSELoss()

# Train the Model
train_rmse = []
test_rmse = []
for epoch in range(num_epochs):
    train_loss = 0
    num_train = 0
    model.train()
    for users, items, scores in train_iter:
        users = users.to(device)
        items = items.to(device)
        scores = scores.float().to(device)

        # Forward pass
        outputs = model(users, items)
        loss = criterion(outputs, scores)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        num_train += scores.shape[0]
        
    train_rmse.append(train_loss / num_train)    

    model.eval()
    test_loss = 0
    num_test = 0
    with torch.no_grad():
        for users, items, scores in test_iter:
            users = users.to(device)
            items = items.to(device)
            scores = scores.float().to(device)

            outputs = model(users, items)
            loss = criterion(outputs, scores)
            
            test_loss += loss.item()
            num_test += scores.shape[0]
    
    test_rmse.append(test_loss / num_test)


# plot
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')

x = list(range(num_epochs))
fig = plt.figure()
ax = plt.axes()

plt.plot(x, train_rmse, label='train_rmse');
plt.plot(x, test_rmse, label='test_rmse');

leg = ax.legend();
导入火炬
导入torch.nn作为nn
作为pd进口熊猫
将numpy作为np导入
从torch.utils.data导入数据集,数据加载器
导入集合
从集合导入defaultdict
从IPython导入显示
输入数学
从matplotlib导入pyplot作为plt
导入操作系统
作为pd进口熊猫
随机输入
进口稀土
进口舒蒂尔
导入系统
导入tarfile
导入时间
导入请求
进口拉链
导入hashlib
#=============获取数据,不更改原始代码
数据中心={}
#文件中定义:./chapter_-perceptrons/kaggle-house-price.md
def下载(名称,cache_dir=os.path.join(“..”,“data”):
“”“下载插入到数据中心的文件,返回本地文件名。”“”
数据集线器中的断言名称,f“{name}在{DATA\u HUB}中不存在。”
url,sha1\u hash=DATA\u HUB[name]
makedirs(cache\u dir,exist\u ok=True)
fname=os.path.join(缓存目录,url.split('/')[-1])
如果os.path.exists(fname):
sha1=hashlib.sha1()
将open(fname,'rb')作为f:
尽管如此:
数据=f.read(1048576)
如果没有数据:
打破
sha1.更新(数据)
如果sha1.hexdigest()==sha1\u散列:
返回fname#命中缓存
打印(f'Downloading{fname}from{url}…')
r=requests.get(url,stream=True,verify=True)
将open(fname,'wb')作为f:
f、 写作(r.content)
返回fname
#文件中定义:./chapter_-perceptrons/kaggle-house-price.md
def下载\u摘录(名称,文件夹=无):
“”“下载并解压缩zip/tar文件。”“”
fname=下载(名称)
base_dir=os.path.dirname(fname)
data_dir,ext=os.path.splitext(fname)
如果ext=='.zip':
fp=zipfile.zipfile(fname,'r')
elif ext in(“.tar”,“.gz”):
fp=tarfile.open(fname,'r')
其他:
断言False,“只能提取zip/tar文件。”
fp.extractall(基本目录)
返回os.path.join(基本目录,文件夹),如果文件夹为其他数据目录
#1.获取数据集
数据集线器['ml-100k']=('http://files.grouplens.org/datasets/movielens/ml-100k.zip',
‘cd4dcac4241c8a4ad7badc7ca635da8a69dddb83’)
def read_data_ml100k():
数据目录=下载摘录('ml-100k')
名称=['user\u id'、'item\u id'、'rating'、'timestamp']
data=pd.read\u csv(os.path.join(data\u dir,'u.data'),'\t',names=names,
引擎(python)
num\u users=data.user\u id.unique().shape[0]
num\u items=data.item\u id.unique().shape[0]
返回数据、num\u用户、num\u项
# 2. 分割数据
#@拯救
def split_data_ml100k(数据、num_用户、num_项目、,
拆分模式=随机,测试比率=0.1):
“”“以随机模式或序列感知模式拆分数据集。”“”
如果split_mode=='seq aware':
训练项目,测试项目,训练列表={},{},[]
对于数据中的行。itertuples():
u、 i,额定值,时间=第[1]行、第[2]行、第[3]行、第[4]行
列项目。设置默认值(u,[])。追加((u,i,额定值,时间))
如果u不在测试项目或测试项目中[u][1]<时间:
测试项目[u]=(i、等级、时间)
对于范围内的u(1,num_用户+1):
序列列表扩展(已排序(序列项[u],键=lambda k:k[3]))
test_data=[(key,*value)对于key,test_items.items()中的value]
列车数据=[如果项目不在测试数据中,则列车列表中项目对应的项目]
列车数据=pd.数据帧(列车数据)
测试数据=局部放电数据帧(测试数据)
其他:
mask=[如果x==1,则为True;对于np.random.uniform中的x,则为False(
0,1,(len(数据))<1-测试比率]
neg_掩码=[掩码中的x不是x]
列车数据,测试数据=数据[屏蔽],数据[负屏蔽]
返回列车数据、测试数据
#@拯救
def load_data_ml100k(数据、num_用户、num_项目、反馈='explicit'):
用户、项目、分数=[]、[]、[]
inter=np.zero((num_项,num_用户)),如果反馈=='explicit'else{}
对于数据中的行。itertuples():
用户索引,项目索引=int(第[1]-1行),int(第[2]-1行)
分数=整数(第[3]行),如果反馈=明确的,否则为1
附加(用户索引)
items.append(item_索引)
分数。附加(分数)
如果反馈=‘隐式’:
inter.setdefault(用户索引,[])。append(项索引)
其他:
inter[项目索引,用户索引]=得分
返回用户、项目、分数、内部
#@拯救
def分离和加载ml100k(分离模式='seq-aware',反馈='explicit',
测试比率=0.1,批次大小=256):
数据,num_用户,num_项=读取数据\u ml100k()
列车数据,测试数据=分割数据ml100k(数据,数量用户,数量项目,分割模式,测试比率)
列车,列车i,列车r,列车=加载列车数据ml100k(列车数据,数量用户,数量项目,反馈)
test\u,test\u i,test\u r,u=load\u data\u ml100k(test\u data,num\u users,num\u items,反馈)
#创建数据集
列组=MyData(np.数组(列组)、np.数组(列组i)、np.数组(列组r))
test_set=MyData(np.array(test_)、np.array(test_i)、np.array(test_r))
#创建数据加载器
训练集=数据加载器(训练集,随机播放=真,批量大小=批量大小)
测试iter=数据加载程序(测试集,批量大小=批量大小)
返回num\u用户、num\u项目、训练iter、测试iter
类MyData(数据集):
定义初始(自我、用户、项目、分数):
self.user=torch.tensor(用户)
self.item=火炬张量(item)
self.score=火炬张量(score)
定义(自我):
返回len(self.user)
def uu getitem uu(self,idx):
返回self.user[idx],self.item[idx],self.score[idx]
#创建一个nn类(只是为了好玩的选择:-)
类RMSELoss(nn.模块):
定义初始值(自我,eps=1e-6):
''你应该小心会出现的NaN
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
num_train += 1