Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/r/74.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
R:将具有多个URL的文本转换为datatable中可单独单击的URL_R_Shiny - Fatal编程技术网

R:将具有多个URL的文本转换为datatable中可单独单击的URL

R:将具有多个URL的文本转换为datatable中可单独单击的URL,r,shiny,R,Shiny,我有一个包含URL的文本数据框架,如下所示: df=data.frame(Text=c("Great weather today at the course, early tee off https://www.uspgatour.co.uk","Pizzas are my favorite here https://www.dinospizza.com and here https://www.mariospizza.com")) 我试图从每个文本中提取URL,并将它们存储在一个新列URL

我有一个包含URL的文本数据框架,如下所示:

df=data.frame(Text=c("Great weather today at the course, early tee off https://www.uspgatour.co.uk","Pizzas are my favorite here https://www.dinospizza.com and here https://www.mariospizza.com"))
我试图从每个文本中提取URL,并将它们存储在一个新列
URL

library(qdapRegex)
df$URL=rm_url(df$Text, extract=TRUE)
对于仅包含一个URL的第一行,它已将其提取并存储在列中

但是,在包含两个URL的行中,结果存储为:

c(“,”)

与单个URL不同,上面的结果在datatable中是不可查找的,因为Shiny将其视为单个URL,即使它实际上是两个URL的组合

我正在寻找一种分割每个URL的方法,以便将每个URL视为一个单独的链接,可以单击同一行。

下面是我在
server.R
中的代码部分,它将文本转换为可单击的URL

# Convert table to final

global_summary =reactive({

global_summarised=results_combined %>%
  filter(SVM_PROB_QOL >=input$inp_pg1qolproba & globalsegment==input$inp_pg1segment & Date >=input$inp_pg1daterange[1] & Date <=input$inp_pg1daterange[2]) %>%
  select(SVM_LABEL_QOL,SVM_LABEL_DIMENSION,globalsegment,Segment,Account,Date,text,Type,URL) %>%
  filter(!is.na(SVM_LABEL_QOL) & SVM_LABEL_QOL=='QoL' & !duplicated(text)) %>% #precautionary
  group_by(globalsegment,SVM_LABEL_DIMENSION) %>%
  top_n(1000,Date) %>%
  arrange(desc(Date))

#Some cleaning up using pre-defined functions

global_summarised$text=clean_text_proper(global_summarised$text)
global_summarised=relabel_globalsegments(global_summarised)
global_summarised=relabel_subsegments(global_summarised)
names(global_summarised)=c("Classified","Dimension","Global Segment","Sub-Segment","Client","Created","QoL Tweet","Tweet Type","URL")

#Make URLs clickable
global_summarised$URL <- ifelse(!is.na(global_summarised$URL),paste0("<a href='",global_summarised$URL,"'>",global_summarised$URL,"</a>"),"")


global_summarised

})

#And then render table using reactive expression

output$global_summarised_table <- renderDataTable(datatable(global_summary(),options=list(pageLength=5),escape = FALSE))
#将表格转换为最终表格
全局汇总=无功({
全局汇总=结果汇总%>%
过滤器(SVM_PROB_QOL>=输入$inp_pg1qolproba&globalsegment==输入$inp_pg1segment&Date>=输入$inp_pg1daterange[1]&Date%
选择(支持向量机标签、支持向量机标签、维度、全局段、段、科目、日期、文本、类型、URL)%>%
过滤器(!is.na(SVM_-LABEL_-QOL)&SVM_-LABEL_-QOL=='QOL'&重复(文本))%>%\
分组依据(全局分段,支持向量机标签维度)%>%
顶部(1000,日期)%>%
安排(说明(日期))
#使用预定义函数进行某些清理
全局_摘要$text=干净_文本_正确(全局_摘要$text)
全局汇总=重新标记全局分段(全局汇总)
全局汇总=重新标记子段(全局汇总)
名称(全球汇总)=c(“分类”、“维度”、“全球细分”、“子细分”、“客户”、“已创建”、“QoL推文”、“推文类型”、“URL”)
#使URL可点击
我想你可以

library(DT)
df <- data.frame(URL = I(list(c("https://www.dinospizza.com", "https://www.mariospizza.com"))))
df$URL <- sapply(df$URL, function(x) paste(sprintf('<a href="%1$s">%1$s</a>', x), collapse=","))
datatable(df,options=list(pageLength=5),escape = FALSE)
库(DT)
df我想你能做到

library(DT)
df <- data.frame(URL = I(list(c("https://www.dinospizza.com", "https://www.mariospizza.com"))))
df$URL <- sapply(df$URL, function(x) paste(sprintf('<a href="%1$s">%1$s</a>', x), collapse=","))
datatable(df,options=list(pageLength=5),escape = FALSE)
库(DT)
df