blob: bc23235bd6accd81f180841e401e49c5ed4978e5 [file] [log] [blame]
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collocationAnalysis.R
\name{collocationAnalysis,KorAPConnection-method}
\alias{collocationAnalysis,KorAPConnection-method}
\alias{collocationAnalysis}
\title{Collocation analysis}
\usage{
\S4method{collocationAnalysis}{KorAPConnection}(
kco,
node,
vc = "",
lemmatizeNodeQuery = FALSE,
minOccur = 5,
leftContextSize = 5,
rightContextSize = 5,
topCollocatesLimit = 200,
searchHitsSampleLimit = 20000,
ignoreCollocateCase = FALSE,
withinSpan = ifelse(exactFrequencies, "base/s=s", ""),
exactFrequencies = TRUE,
stopwords = append(RKorAPClient::synsemanticStopwords(), node),
seed = 7,
expand = length(vc) != length(node),
maxRecurse = 0,
addExamples = FALSE,
thresholdScore = "logDice",
threshold = 2,
localStopwords = c(),
collocateFilterRegex = "^[:alnum:]+-?[:alnum:]*$",
queryMissingScores = FALSE,
missingScoreQuantile = 0.05,
vcLabel = NA_character_,
...
)
}
\arguments{
\item{kco}{\code{\link[=KorAPConnection]{KorAPConnection()}} object (obtained e.g. from \code{KorAPConnection()}}
\item{node}{target word}
\item{vc}{string describing the virtual corpus in which the query should be performed. An empty string (default) means the whole corpus, as far as it is license-wise accessible.}
\item{lemmatizeNodeQuery}{if TRUE, node query will be lemmatized, i.e. \verb{x -> [tt/l=x]}}
\item{minOccur}{minimum absolute number of observed co-occurrences to consider a collocate candidate}
\item{leftContextSize}{size of the left context window}
\item{rightContextSize}{size of the right context window}
\item{topCollocatesLimit}{limit analysis to the n most frequent collocates in the search hits sample}
\item{searchHitsSampleLimit}{limit the size of the search hits sample}
\item{ignoreCollocateCase}{logical, set to TRUE if collocate case should be ignored}
\item{withinSpan}{KorAP span specification (see \url{https://korap.ids-mannheim.de/doc/ql/poliqarp-plus?embedded=true#spans}) for collocations to be searched within. Defaults to \code{base/s=s}.}
\item{exactFrequencies}{if FALSE, extrapolate observed co-occurrence frequencies from frequencies in search hits sample, otherwise retrieve exact co-occurrence frequencies}
\item{stopwords}{vector of stopwords not to be considered as collocates}
\item{seed}{seed for random page collecting order}
\item{expand}{if TRUE, \code{node} and \code{vc} parameters are expanded to all of their combinations}
\item{maxRecurse}{apply collocation analysis recursively \code{maxRecurse} times}
\item{addExamples}{If TRUE, examples for instances of collocations will be added in a column \code{example}. This makes a difference in particular if \code{node} is given as a lemma query.}
\item{thresholdScore}{association score function (see \code{\link{association-score-functions}}) to use for computing the threshold that is applied for recursive collocation analysis calls}
\item{threshold}{minimum value of \code{thresholdScore} function call to apply collocation analysis recursively}
\item{localStopwords}{vector of stopwords that will not be considered as collocates in the current function call, but that will not be passed to recursive calls}
\item{collocateFilterRegex}{allow only collocates matching the regular expression}
\item{queryMissingScores}{if TRUE, attempt to retrieve corpus-based association scores for vc/collocate combinations that would otherwise be imputed, by re-querying the KorAP backend without applying the collocate frequency threshold}
\item{missingScoreQuantile}{lower quantile (evaluated per association measure) that anchors the adaptive floor used for imputing missing scores between virtual corpora; a robust spread is subtracted from this anchor so the imputed values stay below the weakest observed scores}
\item{vcLabel}{optional label override for the current virtual corpus (used internally when named VC collections are expanded)}
\item{...}{more arguments will be passed to \code{\link[=collocationScoreQuery]{collocationScoreQuery()}}}
}
\value{
A tibble where each row represents a candidate collocate for the requested node.
Columns include (depending on the selected association measures):
\itemize{
\item \code{node}, \code{collocate}, \code{vc}, \code{label}: identifiers for the query node, collocate, virtual corpus, and optional label.
\item Frequency and contingency information such as \code{frequency}, \code{O}, \code{O1}, \code{O2}, \code{E}, \code{leftContextSize}, \code{rightContextSize}, and \code{w}.
\item Association measures (e.g. \code{logDice}, \code{ll}, \code{mi}, ...), one column per requested scorer.
\item Per-labelled association scores produced by multi-VC comparisons using the pattern \code{<measure>_<label>}.
\item Ranks per label/measure with the pattern \code{rank_<label>_<measure>} (1 is best) and the corresponding percentile ranks \code{percentile_rank_<label>_<measure>}.
\item Pairwise contrasts for two-label comparisons, e.g. \code{delta_<measure>}, \code{delta_rank_<measure>}, and \code{delta_percentile_rank_<measure>}.
\item Summary columns describing the strongest labels per measure (\code{winner_*}, \code{runner_up_*}, \code{loser_*}, and \code{max_delta_*}).
\item Optional helper columns such as \code{query}, \code{example}, or \code{url} when example retrieval is requested.
}
}
\description{
Performs a collocation analysis for the given node (or query)
in the given virtual corpus.
}
\details{
The collocation analysis is currently implemented on the client side, as some of the
functionality is not yet provided by the KorAP backend. Mainly for this reason
it is very slow (several minutes, up to hours), but on the other hand very flexible.
You can, for example, perform the analysis in arbitrary virtual corpora, use complex node queries,
and look for expression-internal collocates using the focus function (see examples and demo).
To increase speed at the cost of accuracy and possible false negatives,
you can decrease searchHitsSampleLimit and/or topCollocatesLimit and/or set exactFrequencies to FALSE.
Note that some outdated non-DeReKo back-ends might not yet support returning tokenized matches (warning issued).
In this case, the client library will fall back to client-side tokenization which might be slightly less accurate.
This might lead to false negatives and to frequencies that differ from corresponding ones acquired via the web
user interface.
}
\examples{
\dontrun{
# Find top collocates of "Packung" inside and outside the sports domain.
KorAPConnection(verbose = TRUE) |>
collocationAnalysis("Packung",
vc = c("textClass=sport", "textClass!=sport"),
leftContextSize = 1, rightContextSize = 1, topCollocatesLimit = 20
) |>
dplyr::filter(logDice >= 5)
}
\dontrun{
# Identify the most prominent light verb construction with "in ... setzen".
# Note that, currently, the use of focus function disallows exactFrequencies.
KorAPConnection(verbose = TRUE) |>
collocationAnalysis("focus(in [tt/p=NN] {[tt/l=setzen]})",
leftContextSize = 1, rightContextSize = 0, exactFrequencies = FALSE, topCollocatesLimit = 20
)
}
}
\seealso{
Other collocation analysis functions:
\code{\link{association-score-functions}},
\code{\link{collocationScoreQuery,KorAPConnection-method}},
\code{\link{synsemanticStopwords}()}
}
\concept{collocation analysis functions}