# NOT RUN {
## stream tweets mentioning "election" for 90 seconds
e <- stream_tweets("election", timeout = 90)
## data frame where each observation (row) is a different tweet
e
## plot tweet frequency
ts_plot(e, "secs")
## stream tweets mentioning Obama for 30 seconds
djt <- stream_tweets("realdonaldtrump", timeout = 30)
## preview tweets data
djt
## get user IDs of people who mentioned trump
usrs <- users_data(djt)
## lookup users data
usrdat <- lookup_users(unique(usrs$user_id))
## preview users data
usrdat
## store large amount of tweets in files using continuous streams
## by default, stream_tweets() returns a random sample of all tweets
## leave the query field blank for the random sample of all tweets.
stream_tweets(
timeout = (60 * 10),
parse = FALSE,
file_name = "tweets1"
)
stream_tweets(
timeout = (60 * 10),
parse = FALSE,
file_name = "tweets2"
)
## parse tweets at a later time using parse_stream function
tw1 <- parse_stream("tweets1.json")
tw1
tw2 <- parse_stream("tweets2.json")
tw2
## streaming tweets by specifying lat/long coordinates
## stream continental US tweets for 5 minutes
usa <- stream_tweets(
c(-125, 26, -65, 49),
timeout = 300
)
## use lookup_coords() for a shortcut verson of the above code
usa <- stream_tweets(
lookup_coords("usa"),
timeout = 300
)
## stream world tweets for 5 mins, save to JSON file
## shortcut coords note: lookup_coords("world")
world.old <- stream_tweets(
c(-180, -90, 180, 90),
timeout = (60 * 5),
parse = FALSE,
file_name = "world-tweets.json"
)
## read in JSON file
rtworld <- parse_stream("word-tweets.json")
## world data set with with lat lng coords variables
x <- lat_lng(rtworld)
# }
# NOT RUN {
# }
Run the code above in your browser using DataLab