Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/r/69.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
使用R循环创建新数据帧的更快方法_R_Function_Loops - Fatal编程技术网

使用R循环创建新数据帧的更快方法

使用R循环创建新数据帧的更快方法,r,function,loops,R,Function,Loops,使用df,我正在创建一个新的数据帧(final.df),它在df数据帧的startdate和enddate之间的每个日期都有一行 df <- data.frame(claimid = c("123A", "125B", "151C", "124A", "

使用
df
,我正在创建一个新的数据帧(
final.df
),它在
df
数据帧的
startdate
enddate
之间的每个日期都有一行

df <- data.frame(claimid = c("123A", 
                             "125B", 
                             "151C", 
                             "124A", 
                             "325C"),
                 startdate = as.Date(c("2018-01-01", 
                                       "2017-05-20",
                                       "2017-12-15",
                                       "2017-11-05",
                                       "2018-02-06")),
                 enddate = as.Date(c("2018-01-06", 
                                     "2017-06-21",
                                     "2018-01-02",
                                     "2017-11-15",
                                     "2018-02-18")))

您可以使用
gather
from
tidyr
将宽格式转换为长格式,然后使用
pad
from
padr
在开始日期和结束日期之间创建新的日期行。
group=“claimid”
参数用于指定分组变量:

library(dplyr)
library(tidyr)
library(padr)

df %>%
  gather(var, date, -claimid) %>%
  pad(group = "claimid") %>%
  select(-var)
或使用
数据。表
显示效率:

library(data.table)
setDT(df)[,.(date = seq(startdate, enddate, "days")), claimid]
结果:

   claimid       date
1     123A 2018-01-01
2     123A 2018-01-02
3     123A 2018-01-03
4     123A 2018-01-04
5     123A 2018-01-05
6     123A 2018-01-06
7     124A 2017-11-05
8     124A 2017-11-06
9     124A 2017-11-07
10    124A 2017-11-08
11    124A 2017-11-09
12    124A 2017-11-10
13    124A 2017-11-11
14    124A 2017-11-12
15    124A 2017-11-13
16    124A 2017-11-14
17    124A 2017-11-15
18    125B 2017-05-20
19    125B 2017-05-21
20    125B 2017-05-22
...
基准:

   claimid       date
1     123A 2018-01-01
2     123A 2018-01-02
3     123A 2018-01-03
4     123A 2018-01-04
5     123A 2018-01-05
6     123A 2018-01-06
7     124A 2017-11-05
8     124A 2017-11-06
9     124A 2017-11-07
10    124A 2017-11-08
11    124A 2017-11-09
12    124A 2017-11-10
13    124A 2017-11-11
14    124A 2017-11-12
15    124A 2017-11-13
16    124A 2017-11-14
17    124A 2017-11-15
18    125B 2017-05-20
19    125B 2017-05-21
20    125B 2017-05-22
...
初始化函数:

library(tidyverse)
library(padr)
library(data.table)

# OP's function
claim_level <- function(a) {
  specific_row <- df[a, ]
  dates <- seq(specific_row$startdate, specific_row$enddate, by="days")
  day_level <- function(b) {
    day <- dates[b]
    data.frame(claimid = specific_row$claimid, date = day)
  }
  do.call("rbind", lapply(c(1:length(dates)), function(b) day_level(b))) 
}

OP_f = function(){
  do.call("rbind", lapply(c(1:nrow(df)), function(a) claim_level(a))) 
}

# useR's tidyverse + padr
f1 = function(){
  df %>%
    gather(var, date, -claimid) %>%
    pad(interval = "day", group = "claimid") %>%
    select(-var)
}

# useR's data.table
DT = df
setDT(DT)

f2 = function(){
  DT[,.(date = seq(startdate, enddate, "days")), claimid]
}

# Moody_Mudskipper's Base R
f3 = function(){
  do.call(rbind,
          Map(function(claimid, startdate, enddate)
            data.frame(claimid, date=as.Date(startdate:enddate, origin = "1970-01-01")),
            df$claimid, df$startdate, df$enddate))
}

# Moody_Mudskipper's tidyverse
f4 = function(){
  df %>% 
    group_by(claimid) %>% 
    mutate(date = list(as.Date(startdate:enddate, origin = "1970-01-01"))) %>%
    select(1, 4) %>% 
    unnest %>%
    ungroup
}

# MKR's tidyr expand
f5 = function(){
  df %>% 
    group_by(claimid) %>%
    expand(date = seq(startdate, enddate, by="day"))
}
基准结果:

library(microbenchmark)
microbenchmark(OP_f(), f1(), f2(), f3(), f4(), f5())

Unit: milliseconds
   expr       min        lq      mean    median        uq        max neval
 OP_f() 26.421534 27.697194 30.342682 28.981143 31.537396  58.071238   100
   f1() 36.133364 38.179196 40.749812 39.870931 41.367655  58.428888   100
   f2()  1.005843  1.261449  1.450633  1.383232  1.559689   4.058900   100
   f3()  2.373679  2.534148  2.786888  2.633035  2.797452   6.941421   100
   f4() 22.659097 23.341435 25.275457 24.111411 26.499893  40.840061   100
   f5() 46.445622 48.148606 52.565480 51.185478 52.845829 176.912276   100

data.table
是速度方面的赢家,@Moody\u mudscappper的Base R解决方案是第二好的。虽然
padr::pad
tidyr::expand
似乎是最方便的,但它们也是最慢的(甚至比OP的原始程序还要慢)。

在base
R
中:

do.call(rbind,
Map(function(claimid, startdate, enddate)
  data.frame(claimid, date=as.Date(startdate:enddate, origin = "1970-01-01")),
    df$claimid, df$startdate, df$enddate))

# claimid       date
# 1    123A 2018-01-01
# 2    123A 2018-01-02
# 3    123A 2018-01-03
# 4    123A 2018-01-04
# 5    123A 2018-01-05
# 6    123A 2018-01-06
#...
并且只使用
tidyverse

library(tidyverse) # for `dplyr` and `tidyr`
df %>% 
  group_by(claimid) %>% 
  mutate(dates = list(as.Date(startdate:enddate, origin = "1970-01-01"))) %>%
  select(1, 4) %>% 
  unnest %>%
  ungroup

# # A tibble: 82 x 2
#   claimid      dates
#    <fctr>     <date>
# 1    123A 2018-01-01
# 2    123A 2018-01-02
# 3    123A 2018-01-03
# 4    123A 2018-01-04
# 5    123A 2018-01-05
# 6    123A 2018-01-06
# 7    125B 2017-05-20
# 8    125B 2017-05-21
# 9    125B 2017-05-22
# 10   125B 2017-05-23
# # ... with 72 more rows
library(tidyverse)#表示'dplyr'和'tidyr'`
df%>%
分组依据(claimid)%>%
突变(日期=列表(起始日期:结束日期,origin=“1970-01-01”))%>%
选择(1,4)%>%
最新%>%
解组
##A tibble:82 x 2
#索赔日期
#         
#1123A 2018-01-01
#2123A 2018-01-02
#3123A 2018-01-03
#4123A 2018-01-04
#5 123A 2018-01-05
#6 123A 2018-01-06
#7 125B 2017-05-20
#8125B 2017-05-21
#9 125B 2017-05-22
#10125B 2017-05-23
# # ... 还有72行

一个选项是使用
tidyr::expand
函数将
startdate
enddate
之间的行展开

library(tidyverse)
df %>% group_by(claimid) %>%
  expand(date = seq(startdate, enddate, by="day")) %>%
  as.data.frame()

#    claimid       date
# 1     123A 2018-01-01
# 2     123A 2018-01-02
# 3     123A 2018-01-03
# 4     123A 2018-01-04
# 5     123A 2018-01-05
# 6     123A 2018-01-06
# 7     124A 2017-11-05
# 8     124A 2017-11-06
# 9     124A 2017-11-07
# 10    124A 2017-11-08
# 11    124A 2017-11-09
# 12    124A 2017-11-10
#
#  70 more rows

使用您的
tidyverse
方法,我收到以下错误:“错误:每列必须是向量列表或数据帧[日期]列表”。它似乎不想取消列出
日期
列。在您的真实数据上还是使用示例数据?使用示例数据哪个版本的
tidyverse
?还有其他人吗?@bshelt141我无法用
tidyverse 1.2.1
重现错误。尝试重新启动会话并再次运行代码。如果您关心速度,可以尝试
library(data.table);setDT(df)[,(date=seq(startdate,enddate,by=“day”)),by=claimid]
可能的重复:可能值得考虑的是
tidyr::expand
的性能。@MKR将其添加到基准测试中。不幸的是,它似乎比
padr
还要慢。不过,如果您只加载了
tidyverse
Thanks@useR,这会很方便。至少我们得到了关于它性能的信息:-)。我发现它很容易使用
展开
library(tidyverse)
df %>% group_by(claimid) %>%
  expand(date = seq(startdate, enddate, by="day")) %>%
  as.data.frame()

#    claimid       date
# 1     123A 2018-01-01
# 2     123A 2018-01-02
# 3     123A 2018-01-03
# 4     123A 2018-01-04
# 5     123A 2018-01-05
# 6     123A 2018-01-06
# 7     124A 2017-11-05
# 8     124A 2017-11-06
# 9     124A 2017-11-07
# 10    124A 2017-11-08
# 11    124A 2017-11-09
# 12    124A 2017-11-10
#
#  70 more rows