使用带有udpipe注释的content\u transformer

使用带有udpipe注释的content\u transformer,r,tm,udpipe,R,Tm,Udpipe,所以我刚刚发现udpipe有一种展示相关性的很棒的方法,所以我开始研究它。如果我在导入csv文件后将其用于csv文件,并且不对其进行任何更改,则来自的代码将非常完美 但当我创建语料库并更改/删除一些单词时,问题就出现了。我不是R方面的专家,但我在谷歌上搜索了这么多,似乎弄不懂 这是我的密码: txt <- read_delim(fileName, ";", escape_double = FALSE, trim_ws = TRUE) # Maak Corpus docs <- Co

所以我刚刚发现udpipe有一种展示相关性的很棒的方法,所以我开始研究它。如果我在导入csv文件后将其用于csv文件,并且不对其进行任何更改,则来自的代码将非常完美

但当我创建语料库并更改/删除一些单词时,问题就出现了。我不是R方面的专家,但我在谷歌上搜索了这么多,似乎弄不懂

这是我的密码:

txt <- read_delim(fileName, ";", escape_double = FALSE, trim_ws = TRUE)

# Maak Corpus
docs <- Corpus(VectorSource(txt))
docs <- tm_map(docs, tolower)
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, stripWhitespace)
docs <- tm_map(docs, removeWords, stopwords('nl'))
docs <- tm_map(docs, removeWords, myWords())
docs <- tm_map(docs, content_transformer(gsub), pattern = "afspraak|afspraken|afgesproken", replacement = "afspraak")
docs <- tm_map(docs, content_transformer(gsub), pattern = "communcatie|communiceren|communicatie|comminicatie|communiceer|comuniseren|comunuseren|communictatie|comminiceren|comminisarisacie|communcaite", replacement = "communicatie")
docs <- tm_map(docs, content_transformer(gsub), pattern = "contact|kontact|kontakt", replacement = "contact")

comments <- docs

library(lattice)
stats <- txt_freq(x$upos)
stats$key <- factor(stats$key, levels = rev(stats$key))
#barchart(key ~ freq, data = stats, col = "cadetblue", main = "UPOS (Universal Parts of Speech)\n frequency of occurrence", xlab = "Freq")

## NOUNS (zelfstandige naamwoorden)
stats <- subset(x, upos %in% c("NOUN")) 
stats <- txt_freq(stats$token)
stats$key <- factor(stats$key, levels = rev(stats$key))
barchart(key ~ freq, data = head(stats, 20), col = "cadetblue", main = "Most occurring nouns", xlab = "Freq")

## ADJECTIVES (bijvoeglijke naamwoorden)
stats <- subset(x, upos %in% c("ADJ")) 
stats <- txt_freq(stats$token)
stats$key <- factor(stats$key, levels = rev(stats$key))
barchart(key ~ freq, data = head(stats, 20), col = "cadetblue", main = "Most occurring adjectives", xlab = "Freq")

## Using RAKE (harkjes)
stats <- keywords_rake(x = x, term = "lemma", group = "doc_id", relevant = x$upos %in% c("NOUN", "ADJ"))
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
barchart(key ~ rake, data = head(subset(stats, freq > 3), 20), col = "cadetblue", main = "Keywords identified by RAKE", xlab = "Rake")

## Using Pointwise Mutual Information Collocations
x$word <- tolower(x$token)
stats <- keywords_collocation(x = x, term = "word", group = "doc_id")
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
barchart(key ~ pmi, data = head(subset(stats, freq > 3), 20), col = "cadetblue", main = "Keywords identified by PMI Collocation", xlab = "PMI (Pointwise Mutual Information)")

## Using a sequence of POS tags (noun phrases / verb phrases)
x$phrase_tag <- as_phrasemachine(x$upos, type = "upos")
stats <- keywords_phrases(x = x$phrase_tag, term = tolower(x$token), pattern = "(A|N)*N(P+D*(A|N)*N)*", is_regex = TRUE, detailed = FALSE)
stats <- subset(stats, ngram > 1 & freq > 3)
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
barchart(key ~ freq, data = head(stats, 20), col = "cadetblue", main = "Keywords - simple noun phrases", xlab = "Frequency")


cooc <- cooccurrence(x = subset(x, upos %in% c("NOUN", "ADJ")), 
                                         term = "lemma", 
                                         group = c("doc_id", "paragraph_id", "sentence_id"))
head(cooc)
library(igraph)
library(ggraph)
library(ggplot2)
wordnetwork <- head(cooc, 30)
wordnetwork <- graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") +
    geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "pink") +
    geom_node_text(aes(label = name), col = "darkgreen", size = 4) +
    theme_graph(base_family = "Arial Narrow") +
    theme(legend.position = "none") +
    labs(title = "Cooccurrences within sentence", subtitle = "Nouns & Adjective")
一旦我将导入的文件转换为语料库,它就会失败。有人知道我如何仍然可以执行tm_map函数,然后运行udpipe代码吗


提前通知Tnx

有多种解决方案可以满足您的需求。但由于您的语料库是使用vectorsource创建的,所以它只是一个输入的长向量。这可以很容易地回到向量中,以便udpipe可以接管

在udpipe示例文档中,所有内容都定义为x,因此我也将这样做。清理语料库后,只需执行以下操作:

x <- as.character(docs[1])
文档后面的[1]很重要,否则您会得到一些不需要的额外字符。完成后,运行udpipe命令将向量转换为所需的data.frame

x <- udpipe_annotate(ud_model, x)
x <- as.data.frame(x)
另一种方法是首先将语料库检查?writeCorpus写入磁盘以获取更多信息,然后再次读取清理后的文件并将其通过udpipe。这更像是一种变通方法,但可能会产生更好的工作流程

udpipe还处理标点符号,如果您使用荷兰模式Punc | komma或unc | punt,它会放入一个名为punt的特殊upos类中,并用荷兰语描述xpos。 如果名词有大写字母,引理将是小写的

在您的情况下,我将只使用基本的正则表达式选项来遍历数据,而不是使用tm。荷兰语中的stopwords只是去掉了一些动词,比如zijn、worden-kunnen和一些adposition作为te,代词作为ik和we。不管怎样,您都可以在udpipe代码中过滤这些内容,因为您只查看名词和形容词