Learn R Programming

qdap (version 2.2.1)

lexical_classification: Lexical Classification Score

Description

Transcript apply lexical classification score (content to functional word proportion) by grouping variable(s) and optionally plot the breakdown of the model.

Usage

lexical_classification(text.var, grouping.var = NULL,
  order.by.lexical_classification = TRUE,
  function.words = qdapDictionaries::function.words, bracket = "all", ...)

Arguments

text.var
The text variable.
grouping.var
The grouping variables. Default NULL generates one word list for all text. Also takes a single grouping variable or a list of 1 or more grouping variables.
order.by.lexical_classification
logical. If TRUE orders the results by #' lexical_classification score.
function.words
A vector of function words. Default is function.words.
bracket
The bracket type to remove. Use NULL to not remove bracketed substrings. See bracket argument in bracketX for bracket types.
...
Other arguments passed to bracketX.

Value

  • A list containing at the following components:
  • contentA data.frame of all content words used and corresponding frequencies
  • functionalA data.frame of all content words used and corresponding frequencies
  • rawSentence level descriptive statistics on content vs. functional word use (ave.content.rate is also nown as lexical density
  • lexical_classificationSummarized (grouping variable level) descriptive statistics for content vs. functional word use

Details

Content words (i.e., nouns, verbs, adjectives, and adverbs) tend to be the words speakers stresses in language use. Whereas, functional words are the "glue" that holds the content together. Speakers devote much less time and stress to these words (i.e., pronouns, articles, conjunctions, quantifiers, and prepositions).

References

Chung, C. & Pennebaker, J. (2007). The Psychological Functions of Function Words. In K. Fiedler (Ed.) Social Communication (pp. 343-359). New York: Psychology Press. Pulvermuller, F. (1999). Words in the brain's language. Behavioral and Brain Sciences, 22, pp. 253-279. doi:10.1017/S0140525X9900182X Segalowitz, S. J. & Lane, K. (2004). Perceptual fluency and lexical access for function versus content words. Behavioral and Brain Sciences, 27, 307-308. doi:10.1017/S0140525X04310071 Bell, A., Brenier, J. M., Gregory, M., Girand, C. & Jurafsky, D. (2009). Predictability Effects on Durations of Content and Function Words in Conversational English. Journal of Memory and Language, 60(1), 92-111. doi:10.1016/j.jml.2008.06.003

Examples

Run this code
lexical_classification("I did not like the dog.")
lexical_classification(DATA.SPLIT$state, DATA.SPLIT$person)

(out <- with(pres_debates2012, lexical_classification(dialogue, list(person, time))))
plot(out)

scores(out)

out2 <- preprocessed(out)
htruncdf(out2)
plot(out2)

plot(out[["content"]])
dev.new()
plot(out[["functional"]])

## cloud of functional vs. content
## Highlight Content Words
set.seed(10)
par(mar = c(0,0,0,0))
list(
        content = out[["content"]],
        functional = out[["functional"]]
    ) %>%
    list_df2df("type") %>%
    dplyr::mutate(colors = ifelse(type == "functional", "gray80", "blue")) %>%
    with(., wordcloud::wordcloud(
        word,
        freq,
        min.freq = 8,
        random.order=FALSE,
        ordered.colors = TRUE,
        colors = colors
    ))
mtext("2012 Presidential Debates:\nFunctional vs. Content Word Use", padj=1.25)
legend(
    .05, .12, bty = "n",
    legend = c("functional", "content"),
    fill = c("gray80", "blue"),
    cex = .7
)

## Highlight Functional Words
set.seed(10)
par(mar = c(0,0,0,0))
list(
        content = out[["content"]],
        functional = out[["functional"]]
    ) %>%
    list_df2df("type") %>%
    dplyr::mutate(colors = ifelse(type == "functional", "red", "gray80")) %>%
    with(., wordcloud::wordcloud(
        word,
        freq,
        min.freq = 8,
        random.order=FALSE,
        ordered.colors = TRUE,
        colors = colors
    ))
mtext("2012 Presidential Debates:\nFunctional vs. Content Word Use", padj=1.25)
legend(
    .05, .12, bty = "n",
    legend = c("functional", "content"),
    fill = c("red", "gray80"),
    cex = .7
)

#=============#
## ANIMATION ##
#=============#
## EXAMPLE 1
lex_ani <- lexical_classification(DATA.SPLIT$state, DATA.SPLIT$person)
lexa <- Animate(lex_ani, content="white", functional="blue",
    current.color = "yellow", current.speaker.color="grey70")

bgb <- vertex_apply(lexa, label.color="grey80", size=20, color="grey40")
bgb <- edge_apply(bgb, label.color="yellow")

print(bgb, bg="black", net.legend.color ="white", pause=1)

## EXAMPLE 2
lex_ani2 <- lexical_classification(mraja1spl$dialogue, mraja1spl$person)
lexa2 <- Animate(lex_ani2, content="white", functional="blue",
    current.color = "yellow", current.speaker.color="grey70")

bgb2 <- vertex_apply(lexa2, label.color="grey80", size=17, color="grey40")
bgb2 <- edge_apply(bgb2, label.color="yellow")
print(bgb2, bg="black", pause=.75, net.legend.color = "white")

## EXAMPLE 3 (bar plot)
Animate(lex_ani2, type="bar")

## EXAMPLE 4 (text plot)
Animate(lex_ani2, type="text")

#======================#
## Complex Animations ##
#======================#
## EXAMPLE 1: Network + Text + Bar

library(animation)
library(grid)
library(gridBase)
library(qdap)
library(reports)
library(igraph)
library(plotrix)

lex_ani2 <- lexical_classification(mraja1spl$dialogue, mraja1spl$person)

## Set up the network version
lex_net <- Animate(lex_ani2, contextual="white", lexal="blue",
    current.color = "yellow", current.speaker.color="grey70")
bgb <- vertex_apply(lex_net, label.color="grey80", size=17, color="grey40")
bgb <- edge_apply(bgb, label.color="yellow")


## Set up the bar version
lex_bar <- Animate(lex_ani2, type="bar")

## Set up the text
lex_text <- Animate(lex_ani2, type="text", size = 3, width=125, color="white")

## Generate a folder
loc <- reports::folder(animation_lexical_classification)
setwd(loc)

## Set up the plotting function
oopt <- animation::ani.options(interval = 0.1)


lex_text_bar <- Map(function(x, y){

    uns <- unit(c(-1.6,.5,-.2,.25), "cm")

    x <- x +
        theme(plot.margin = uns,
            text=element_text(color="white"),
            legend.text=element_text(color="white"),
            legend.background = element_rect(fill = "black"),
            panel.border = element_rect(color = "black"),
            panel.background = element_rect(fill = "black"),
            plot.background = element_rect(fill = "black",
                color="black"))

    uns2 <- unit(c(-.5,.5,-.45,.25), "cm")

    y <- y +
        theme(plot.margin = uns2,
            text=element_text(color="white"),
            legend.text=element_text(color="white"),
            legend.background = element_rect(fill = "black"),
            plot.background = element_rect(fill = "black",
                color="black"))

    gA <- ggplotGrob(x)
    gB <- ggplotGrob(y)
    maxWidth <- grid::unit.pmax(gA$widths[2:5], gB$widths[2:5])
    gA$widths[2:5] <- as.list(maxWidth)
    gB$widths[2:5] <- as.list(maxWidth)
    out <- arrangeGrob(gA, gB, ncol=1, heights = c(.3, .70))
    ## grid.draw(out)
    invisible(out)

}, lex_text, lex_bar)


FUN <- function(follow=FALSE, theseq = seq_along(bgb)) {

    Title <- "Animated Content Rate: Romeo and Juliet Act 1"
    Legend <- c(.2, -1, 1.5, -.95)
    Legend.cex <- 1

    lapply(theseq, function(i) {
        if (follow) {
            png(file=sprintf("%s/images/Rplot%s.png", loc, i),
                width=750, height=875)
        }
        ## Set up the layout
        layout(matrix(c(rep(1, 7), rep(2, 6)), 13, 1, byrow = TRUE))

        ## Plot 1
        par(mar=c(2, 0, 2, 0), bg="black")
        #par(mar=c(2, 0, 2, 0))
        set.seed(22)
        plot.igraph(bgb[[i]], edge.curved=TRUE)
        mtext(Title, side=3, col="white")
        color.legend(Legend[1], Legend[2], Legend[3], Legend[4],
              c("Functional", "Content"), attributes(bgb)[["legend"]],
              cex = Legend.cex, col="white")

        ## Plot2
        plot.new()
        vps <- baseViewports()

        print(lex_text_bar[[i]], vp = vpStack(vps$figure,vps$plot))

        animation::ani.pause()

        if (follow) {
            dev.off()
        }
    })

}

FUN()

## Detect OS
type <- if(.Platform$OS.type == "windows") shell else system


saveHTML(FUN(), autoplay = FALSE, loop = TRUE, verbose = FALSE,
    ani.height = 1000, ani.width=750,
    outdir = loc, single.opts =
    "'controls': ['first', 'previous', 'play', 'next', 'last', 'loop', 'speed'], 'delayMin': 0")

FUN(TRUE)

## EXAMPLE 2: Line + Text + Bar
## Generate a folder
loc2 <- reports::folder(animation_lexical_classification2)
setwd(loc2)

lex_ani2 <- lexical_classification(mraja1spl$dialogue, mraja1spl$person)

## Set up the bar version
lex_bar <- Animate(lex_ani2, type="bar")
cumline <- cumulative(lex_bar)
lex_line <- plot(cumline)
ylims <- range(cumline[[1]][-c(1:100)]) + c(-.1, .1)

## Set up the text
lex_text <- Animate(lex_ani2, type="text", size = 4, width = 80)


lex_line_text_bar <- Map(function(x, y, z){

    mar <- theme(plot.margin = unit(c(0, .5, 0, .25), "cm"))

    gA <- ggplotGrob(x + mar +
        theme(panel.background = element_rect(fill = NA, colour = NA),
            panel.border = element_rect(fill = NA, colour = NA),
            plot.background = element_rect(fill = NA, colour = NA)))
    gB <- ggplotGrob(y + mar)
    gC <- ggplotGrob(z + mar + ylab("Average Content Rate") +
        coord_cartesian(ylim = ylims) +
        ggtitle("Average Content Rate: Romeo & Juliet Act 1"))

    maxWidth <- grid::unit.pmax(gA$widths[2:5], gB$widths[2:5], gC$widths[2:5])
    gA$widths[2:5] <- as.list(maxWidth)
    gB$widths[2:5] <- as.list(maxWidth)
    gC$widths[2:5] <- as.list(maxWidth)
    out <- arrangeGrob(gC, gA, gB, ncol=1, heights = c(.38, .25, .37))
    ## grid.draw(out)
    invisible(out)

}, lex_text, lex_bar, lex_line)


FUN2 <- function(follow=FALSE, theseq = seq_along(lex_line_text_bar)) {


    lapply(theseq, function(i) {
        if (follow) {
            png(file=sprintf("%s/images/Rplot%s.png", loc2, i),
                width=750, height=875)
        }

        print(lex_line_text_bar[[i]])
        animation::ani.pause()

        if (follow) {
            dev.off()
        }
    })

}

FUN2()

## Detect OS
type <- if(.Platform$OS.type == "windows") shell else system

library(animation)
saveHTML(FUN2(), autoplay = FALSE, loop = TRUE, verbose = FALSE,
    ani.height = 1000, ani.width=750,
    outdir = loc2, single.opts =
    "'controls': ['first', 'previous', 'play', 'next', 'last', 'loop', 'speed'], 'delayMin': 0")

FUN2(TRUE)

#==================#
## Static Network ##
#==================#
(lexdat <- with(sentSplit(DATA, 4), lexical_classification(state, person)))
m <- Network(lexdat)
m
print(m, bg="grey97", vertex.color="grey75")

print(m, title="Lexical Content Discourse Map", title.color="white",
    bg="black", legend.text.color="white", vertex.label.color = "grey70",
    edge.label.color="yellow")

## or use themes:
dev.off()
m + qtheme()
m + theme_nightheat
dev.off()
m + theme_nightheat(title="Lexical Content Discourse Map",
    vertex.label.color = "grey50")

#==================================#
## Content Rate Over Time Example ##
#==================================#
lexpres <- lapply(with( pres_debates2012, split(dialogue, time)), function(x) {
    lexical_classification(x)
})
lexplots <- lapply(seq_along(lexpres), function(i) {
    dat <- cumulative(lexpres[[i]])
    m <- plot(dat)
    if (i != 2) m <- m + ylab("")
    if (i == 2) m <- m + ylab("Average Content Rate")
    if (i != 3) m <- m + xlab(NULL)
    if (i != 1) m <- m + theme(plot.margin=unit(c(0, 1, 0, .5) + .1, "lines"))
    m + ggtitle(paste("Debate", i)) +
        coord_cartesian(xlim = c(300, length(dat[[1]])),
            ylim = unlist(range(dat[[1]][-c(1:300)]) + c(-.25, .25)))
})

library(grid)
library(gridExtra)
do.call(grid.arrange, lexplots)

Run the code above in your browser using DataLab