##
## basic examples
##
library(lattice)
library(grid)
# load sample data, sp1 is a data.frame
data(sp1)
# using data.frame method of slab()
# aggregate entire collection with two different segment sizes
a <- slab(sp1, fm = ~ prop)
b <- slab(sp1, fm = ~ prop, seg_size=5)
# check output
str(a)
# stack into long format
ab <- make.groups(a, b)
ab$which <- factor(ab$which, levels=c('a','b'),
labels=c('1-cm Interval', '5-cm Interval'))
# manually copmute mean +/- 1 SD
ab$upper <- with(ab, p.mean + p.sd)
ab$lower <- with(ab, p.mean - p.sd)
# use mean +/- 1 SD
# custom plotting function for uncertainty viz.
xyplot(top ~ p.mean | which, data=ab, ylab='Depth',
xlab='mean bounded by +/- 1 SD',
lower=ab$lower, upper=ab$upper, ylim=c(250,-5), alpha=0.5,
panel=panel.depth_function,
prepanel=prepanel.depth_function,
cf=ab$contributing_fraction,
layout=c(2,1), scales=list(x=list(alternating=1))
)
# use median and IQR
# custom plotting function for uncertainty viz.
xyplot(top ~ p.q50 | which, data=ab, ylab='Depth',
xlab='median bounded by 25th and 75th percentiles',
lower=ab$p.q25, upper=ab$p.q75, ylim=c(250,-5), alpha=0.5,
panel=panel.depth_function,
prepanel=prepanel.depth_function,
cf=ab$contributing_fraction,
layout=c(2,1), scales=list(x=list(alternating=1))
)
##
## categorical variable example
##
library(reshape)
data(sp1)
# normalize horizon names: result is a factor
sp1$name <- generalize.hz(sp1$name,
new=c('O','A','B','C'),
pat=c('O', '^A','^B','C'))
# compute slice-wise probability so that it sums to contributing fraction, from 0-150
a <- slab(sp1, fm= ~ name, class_prob_mode=1, seg_vect=0:150)
# reshape into long format for plotting
a.long <- melt(a, id.vars=c('top','bottom'), measure.vars=c('O','A','B','C'))
# plot horizon type proportions using panels
xyplot(top ~ value | variable, data=a.long, subset=value > 0,
ylim=c(150, -5), type=c('S','g'), horizontal=TRUE, layout=c(4,1), col=1 )
# again, this time using groups
xyplot(top ~ value, data=a.long, groups=variable, subset=value > 0,
ylim=c(150, -5), type=c('S','g'), horizontal=TRUE, asp=2)
# adjust probability to size of collection, from 0-150
a.1 <- slab(sp1, fm= ~ name, class_prob_mode=2, seg_vect=0:150)
# reshape into long format for plotting
a.1.long <- melt(a.1, id.vars=c('top','bottom'), measure.vars=c('O','A','B','C'))
# group mode 1 and mode 2 data
g <- make.groups(mode_1=a.long, mode_2=a.1.long)
# plot horizon type proportions
xyplot(top ~ value | variable, groups=which, data=g, subset=value > 0,
ylim=c(240, -5), type=c('S','g'), horizontal=TRUE, layout=c(4,1),
auto.key=list(lines=TRUE, points=FALSE, columns=2),
par.settings=list(superpose.line=list(col=c(1,2))))
# apply slice-wise evaluation of max probability, and assign ML-horizon at each slice
f.ML.hz <- function(i) {
if(any(i > 0))
which.max(i)
else
NA
}
a$name <- c('O','A','B','C')[apply(a[, c('O','A','B','C')], 1, f.ML.hz)]
# extract ML-horizon boundaries using RLE
# note that this will overestimate each change in ML-horizon by 1 depth slice
ml.rle <- rle(as.vector(na.omit(a$name)))
ml.hz.bounds <- cumsum(ml.rle$lengths) - 1
# composite into a data.frame for use later
(gen.hz.ml <- data.frame(hz=ml.rle$value, top=c(0, ml.hz.bounds[-length(ml.hz.bounds)]), bottom=ml.hz.bounds, stringsAsFactors=FALSE))
##
## multivariate examples
##
data(sp3)
# add new grouping factor
sp3$group <- 1
sp3$group[as.numeric(sp3$id) > 5] <- 2
sp3$group <- factor(sp3$group)
# slot several variables at once
# within each level of 'group'
a <- slab(sp3, fm=group ~ L + A + B)
# pre-compute intervals
a$upper <- with(a, p.mean + p.sd)
a$lower <- with(a, p.mean - p.sd)
# check the results:
# note that 'group' is the column containing group labels
library(lattice)
xyplot(
top ~ p.mean | variable, data=a, groups=group, subscripts=TRUE,
lower=a$lower, upper=a$upper, ylim=c(125,-5), alpha=0.5,
layout=c(3,1), scales=list(x=list(relation='free')),
panel=panel.depth_function,
prepanel=prepanel.depth_function
)
# convert mean value for each variable into long format
library(reshape)
a.wide <- cast(a, group + top + bottom ~ variable, value=c('p.mean'))
## again, this time for a user-defined slab from 40-60 cm
a <- slab(sp3, fm=group ~ L + A + B, seg_vect=c(40,60))
# now we have weighted average properties (within the defined slab) for each variable, and each group
(a.wide <- cast(a, group + top + bottom ~ variable, value=c('p.mean')))
## this time, compute the weighted mean of selected properties, by profile ID
a <- slab(sp3, fm=id ~ L + A + B, seg_vect=c(40,60))
(a.wide <- cast(a, id + top + bottom ~ variable, value=c('p.mean')))
## aggregate the entire collection:
## note the missing left-hand side of the formula
a <- slab(sp3, fm= ~ L + A + B)
Run the code above in your browser using DataLab