Python 如何正确分割数据并创建dok矩阵?

Python 如何正确分割数据并创建dok矩阵?,python,dataframe,matrix,deep-learning,neural-network,Python,Dataframe,Matrix,Deep Learning,Neural Network,我让我的模型正常运行,并将提前停止定义为回调。模型中断了,我让它在没有提前停止的情况下运行,loss和val_loss之间的距离越来越远(见下图)。这表明喂食过量。 我检查了模型。这是由于数据被拆分,因为它在没有验证数据的情况下运行时没有错误 我的数据集由几次购买组成。用户可以购买多个项目。用户购买的产品如下所示 purchaseid itemid 0 0 3 1 0 8 2

我让我的模型正常运行,并将提前停止定义为回调。模型中断了,我让它在没有提前停止的情况下运行,loss和val_loss之间的距离越来越远(见下图)。这表明喂食过量。 我检查了模型。这是由于数据被拆分,因为它在没有验证数据的情况下运行时没有错误

我的数据集由几次购买组成。用户可以购买多个项目。用户购买的产品如下所示

            purchaseid  itemid
0           0           3
1           0           8
2           0           2
我想以80:20的比例分割数据帧。然而,完整的购买应该始终进行,而不是随意分割,所以我给自己写了一个方法

比如我想要什么

   purchaseid  itemid
0           0       3
1           0       8
2           0       2
3           1      10
我想要的不是

   purchaseid  itemid
0           0       3
1           0       8

3           1      10
数据帧

d = {'purchaseid': [0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 9, 9, 9, 9],
     'itemid': [ 3, 8, 2, 10, 3, 10, 4, 12, 3, 12, 3, 4, 8, 6, 3, 0, 5, 12, 9, 9, 13, 1, 7, 11, 11]}
df = pd.DataFrame(data=d)

   purchaseid  itemid
0           0       3
1           0       8
2           0       2
3           1      10
4           2       3
PERCENTAGE_SPLIT = 20
NUM_NEGATIVES = 4
def splitter(df):
  df_ = pd.DataFrame()
  sum_purchase = df['purchaseid'].nunique()
  amount = round((sum_purchase / 100) * PERCENTAGE_SPLIT)

  random_list = random.sample(df['purchaseid'].unique().tolist(), amount)
  df_ = df.loc[df['purchaseid'].isin(random_list)]
  df_reduced = df.loc[~df['purchaseid'].isin(random_list)]
  return [df_reduced, df_]

def generate_matrix(df_main, dataframe, name):
  
  mat = sp.dok_matrix((df_main.shape[0], len(df_main['itemid'].unique())), dtype=np.float32)
  for purchaseid, itemid in zip(dataframe['purchaseid'], dataframe['itemid']):
    mat[purchaseid, itemid] = 1.0

  return mat

dfs = splitter(df)
df_tr = dfs[0].copy(deep=True)
df_val = dfs[1].copy(deep=True)

train_mat = generate_matrix(df, df_tr, 'train')
val_mat = generate_matrix(df, df_val, 'val')
.
.
.
def get_train_samples(train_mat, num_negatives):
    user_input, item_input, labels = [], [], []
    num_user, num_item = train_mat.shape
    for (u, i) in train_mat.keys():
        user_input.append(u)
        item_input.append(i)
        labels.append(1)
        # negative instances
        for t in range(num_negatives):
            j = np.random.randint(num_item)
            while (u, j) in train_mat.keys():
                j = np.random.randint(num_item)
            user_input.append(u)
            item_input.append(j)
            labels.append(0)
    return user_input, item_input, labels

num_users, num_items = train_mat.shape

model = get_model(num_users, num_items, ...)

user_input, item_input, labels = get_train_samples(train_mat, NUM_NEGATIVES)
val_user_input, val_item_input, val_labels = get_train_samples(val_mat, NUM_NEGATIVES)

hist = model.fit([np.array(user_input), np.array(item_input)], np.array(labels),
                 validation_data=([np.array(val_user_input), np.array(val_item_input)], np.array(val_labels)))
我不知道这是由于数据的分割还是dok矩阵的创建。有人能告诉我他们是否看到了虫子吗? 拆分发生时没有错误。我用几个数据集对此进行了测试,始终得到了预期的结果。这可能是因为dok矩阵,还是因为
get\u train\u samples()

代码

d = {'purchaseid': [0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 9, 9, 9, 9],
     'itemid': [ 3, 8, 2, 10, 3, 10, 4, 12, 3, 12, 3, 4, 8, 6, 3, 0, 5, 12, 9, 9, 13, 1, 7, 11, 11]}
df = pd.DataFrame(data=d)

   purchaseid  itemid
0           0       3
1           0       8
2           0       2
3           1      10
4           2       3
PERCENTAGE_SPLIT = 20
NUM_NEGATIVES = 4
def splitter(df):
  df_ = pd.DataFrame()
  sum_purchase = df['purchaseid'].nunique()
  amount = round((sum_purchase / 100) * PERCENTAGE_SPLIT)

  random_list = random.sample(df['purchaseid'].unique().tolist(), amount)
  df_ = df.loc[df['purchaseid'].isin(random_list)]
  df_reduced = df.loc[~df['purchaseid'].isin(random_list)]
  return [df_reduced, df_]

def generate_matrix(df_main, dataframe, name):
  
  mat = sp.dok_matrix((df_main.shape[0], len(df_main['itemid'].unique())), dtype=np.float32)
  for purchaseid, itemid in zip(dataframe['purchaseid'], dataframe['itemid']):
    mat[purchaseid, itemid] = 1.0

  return mat

dfs = splitter(df)
df_tr = dfs[0].copy(deep=True)
df_val = dfs[1].copy(deep=True)

train_mat = generate_matrix(df, df_tr, 'train')
val_mat = generate_matrix(df, df_val, 'val')
.
.
.
def get_train_samples(train_mat, num_negatives):
    user_input, item_input, labels = [], [], []
    num_user, num_item = train_mat.shape
    for (u, i) in train_mat.keys():
        user_input.append(u)
        item_input.append(i)
        labels.append(1)
        # negative instances
        for t in range(num_negatives):
            j = np.random.randint(num_item)
            while (u, j) in train_mat.keys():
                j = np.random.randint(num_item)
            user_input.append(u)
            item_input.append(j)
            labels.append(0)
    return user_input, item_input, labels

num_users, num_items = train_mat.shape

model = get_model(num_users, num_items, ...)

user_input, item_input, labels = get_train_samples(train_mat, NUM_NEGATIVES)
val_user_input, val_item_input, val_labels = get_train_samples(val_mat, NUM_NEGATIVES)

hist = model.fit([np.array(user_input), np.array(item_input)], np.array(labels),
                 validation_data=([np.array(val_user_input), np.array(val_item_input)], np.array(val_labels)))
结果

d = {'purchaseid': [0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 9, 9, 9, 9],
     'itemid': [ 3, 8, 2, 10, 3, 10, 4, 12, 3, 12, 3, 4, 8, 6, 3, 0, 5, 12, 9, 9, 13, 1, 7, 11, 11]}
df = pd.DataFrame(data=d)

   purchaseid  itemid
0           0       3
1           0       8
2           0       2
3           1      10
4           2       3
PERCENTAGE_SPLIT = 20
NUM_NEGATIVES = 4
def splitter(df):
  df_ = pd.DataFrame()
  sum_purchase = df['purchaseid'].nunique()
  amount = round((sum_purchase / 100) * PERCENTAGE_SPLIT)

  random_list = random.sample(df['purchaseid'].unique().tolist(), amount)
  df_ = df.loc[df['purchaseid'].isin(random_list)]
  df_reduced = df.loc[~df['purchaseid'].isin(random_list)]
  return [df_reduced, df_]

def generate_matrix(df_main, dataframe, name):
  
  mat = sp.dok_matrix((df_main.shape[0], len(df_main['itemid'].unique())), dtype=np.float32)
  for purchaseid, itemid in zip(dataframe['purchaseid'], dataframe['itemid']):
    mat[purchaseid, itemid] = 1.0

  return mat

dfs = splitter(df)
df_tr = dfs[0].copy(deep=True)
df_val = dfs[1].copy(deep=True)

train_mat = generate_matrix(df, df_tr, 'train')
val_mat = generate_matrix(df, df_val, 'val')
.
.
.
def get_train_samples(train_mat, num_negatives):
    user_input, item_input, labels = [], [], []
    num_user, num_item = train_mat.shape
    for (u, i) in train_mat.keys():
        user_input.append(u)
        item_input.append(i)
        labels.append(1)
        # negative instances
        for t in range(num_negatives):
            j = np.random.randint(num_item)
            while (u, j) in train_mat.keys():
                j = np.random.randint(num_item)
            user_input.append(u)
            item_input.append(j)
            labels.append(0)
    return user_input, item_input, labels

num_users, num_items = train_mat.shape

model = get_model(num_users, num_items, ...)

user_input, item_input, labels = get_train_samples(train_mat, NUM_NEGATIVES)
val_user_input, val_item_input, val_labels = get_train_samples(val_mat, NUM_NEGATIVES)

hist = model.fit([np.array(user_input), np.array(item_input)], np.array(labels),
                 validation_data=([np.array(val_user_input), np.array(val_item_input)], np.array(val_labels)))