# NOT RUN {
# Visit http://jean-romain.github.io/lidR/wiki for an illustrated and commented
# version of this example.
# This is a dummy example. It is more efficient to load the entire file than
# splitting it into several pieces to process, even when using multiple cores.
# 1. Build a project (here, a single file catalog for the purposes of this example).
LASfile <- system.file("extdata", "Megaplot.laz", package="lidR")
project = catalog(LASfile)
plot(project)
# 2. Set some catalog options
# For this dummy example, the clustering size is 80 m and the buffer is 15 m using
# a single core (because this example is run on the CRAN server when the package is submitted).
buffer(project) = 15
cores(project) = 1
tiling_size(project) = 120
# 3. Load the shapefile needed to filter your points.
folder <- system.file("extdata", "", package="lidR")
lake_shp = rgdal::readOGR(folder, "lake_polygons_UTM17")
# 4. Build the function that analyzes each cluster of the catalog.
# The function's first argument is a LAS object. The internal routine takes care of
# this part. The other arguments can be freely choosen by the user. See the following
# template:
tree_area = function(las, lake)
{
# The las argument is a LAS object with each field loaded and an extra column 'buffer'
# Associate geographic data with lidar points
lasclassify(las, lake, field = "lake")
# filter lakes, and low elevation points
las = lasfilter(las, lake == FALSE, Z > 4)
if (is.null(las))
return(NULL)
# segment trees (in this example the low point density does not enable
# accurate segmentation of trees. This is just a proof-of-concept)
chm = grid_canopy(las, 1, subcircle = 0.3)
chm = as.raster(chm)
kernel = matrix(1,3,3)
chm = raster::focal(chm, w = kernel, fun = mean, na.rm = TRUE)
lastrees(las, algorithm = "watershed", chm = chm)
# Here we used the function tree_metric to compute some metrics for each tree. This
# function is defined later in the global environment.
m = tree_metrics(las, myMetrics(X, Y, Z, buffer))
# If min buffer is 0 it means the trees were at least partly in the non-buffered area, so we
# want to keep these trees.
# However, the trees that are on the edge of the buffered area will be counted
# twice. So we must remove the trees on the right side and on the top side of the buffer
# If max buffer is <= 2 it means that the trees belong inside the area of interest, on
# the left side or the bottom side, or both.
m = m[minbuff == 0 & maxbuff <= 2]
# Remove buffering information that is no longer useful
m[, c("minbuff","maxbuff") := NULL]
return(m)
}
# This function enables users to extract, for a single tree, the position of the highest point
# and some information about the buffering position of the tree. The function tree_metrics takes
# care of mapping along each tree.
myMetrics <- function(x, y, z, buff)
{
i = which.max(z)
xcenter = x[i]
ycenter = y[i]
A = area(x,y)
minbuff = min(buff)
maxbuff = max(buff)
return(
list(
x = xcenter,
y = ycenter,
area = A,
minbuff = minbuff,
maxbuff = maxbuff
))
}
# Everything is now well defined, so now we can process over an entire catalog with
# hundreds of files (but in this example we use just one file...)
# 4. Process the project. The arguments of the user-defined function must
# belong in a labelled list. We also pass extra arguments to the function readLAS
# to load only X, Y and Z coordinates. This way we save a huge amount of memory, which
# can be used for the current process.
fargs = list(lake = lake_shp)
output = catalog_apply(project, tree_area, fargs, select = "xyz")
# 5. Post-process the output result (depending on the output computed). Here, each value
# of the list is a data.table, so rbindlist does the job:
output = data.table::rbindlist(output)
with(output, plot(x,y, cex = sqrt(area/pi)/5, asp = 1))
# }
Run the code above in your browser using DataLab