From 44a1496efa7bb3f958f4e3f4c3c8a90e1e9bbced Mon Sep 17 00:00:00 2001 From: Datseris Date: Fri, 30 Jan 2026 14:48:35 +0000 Subject: [PATCH 01/19] first step of reworking the API and Docs --- docs/Project.toml | 1 + docs/make.jl | 65 ++++---- docs/src/api.md | 79 ++++++++++ docs/src/index.md | 6 +- docs/src/tutorial.jl | 178 ++++++++++++++++++++++ docs/src/tutorial.md | 14 ++ docs/src/tutorial/distributions.md | 149 ------------------ src/core/complexity_measures_interface.jl | 34 +++++ 8 files changed, 343 insertions(+), 183 deletions(-) create mode 100644 docs/src/api.md create mode 100644 docs/src/tutorial.jl create mode 100644 docs/src/tutorial.md create mode 100644 src/core/complexity_measures_interface.jl diff --git a/docs/Project.toml b/docs/Project.toml index 7346e67..bd86c98 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -11,6 +11,7 @@ DynamicalSystems = "61744808-ddfa-5f27-97ff-6e42cc95d634" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecurrenceMicrostatesAnalysis = "cb83a08b-85c6-4e94-91aa-4e946c7d4f0c" StateSpaceSets = "40b095a5-5852-4c12-98c7-d43bf788e795" diff --git a/docs/make.jl b/docs/make.jl index 3f472f4..27d42e2 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -5,45 +5,48 @@ using DocumenterCitations using RecurrenceMicrostatesAnalysis using StateSpaceSets +# Convert tutorial file to markdown +import Literate +Literate.markdown("src/tutorial.jl", "src"; credit = false) + pages = [ "Welcome" => "index.md", - "Tutorial" => [ - "Distributions" => "tutorial/distributions.md", - "Quantifiers" => "tutorial/quantifiers.md", - "Recurrence Functions" => "tutorial/recurrences.md", - "Shapes and Sampling" => "tutorial/shapes_and_sampling.md", - "GPU" => "tutorial/gpu.md", - "Utils" => "tutorial/utils.md", - ], - "Examples" => [ - "Machine Learning" => "examples/ml.md", - ], - "Developers" => "dev.md", + "Tutorial" => "tutorial.md", + # "API" => "api.md", + # "Tutorial" => [ + # "Distributions" => "tutorial/distributions.md", + # "Quantifiers" => "tutorial/quantifiers.md", + # "Recurrence Functions" => "tutorial/recurrences.md", + # "Shapes and Sampling" => "tutorial/shapes_and_sampling.md", + # "GPU" => "tutorial/gpu.md", + # "Utils" => "tutorial/utils.md", + # ], + # "Ecosystem Integration" => [ + # "Machine Learning" => "examples/ml.md", + # ], + "Developers docs" => "dev.md", "References" => "refs.md", ] +# Apply JuliaDynamics theme, choosing a specific branch (easier debugging) +github_user = "JuliaDynamics" +branch = "master" +download_path = "https://raw.githubusercontent.com/$github_user/doctheme/$branch/" + +import Downloads +Downloads.download( + "$download_path/build_docs_with_style.jl", + joinpath(@__DIR__, "build_docs_with_style.jl") +) +include("build_docs_with_style.jl") + +using DocumenterCitations + bib = CitationBibliography( joinpath(@__DIR__, "refs.bib"); style=:authoryear ) -makedocs( - sitename = "RecurrenceMicrostatesAnalysis.jl", - format = Documenter.HTML( - prettyurls = true, - collapselevel = 3, - ), - modules = [RecurrenceMicrostatesAnalysis, StateSpaceSets, ComplexityMeasures], - pages = pages, - doctest = false, - checkdocs = :exported, - warnonly = [:doctest, :missing_docs], - plugins = [bib] +build_docs_with_style(pages, RecurrenceMicrostatesAnalysis, ComplexityMeasures, StateSpaceSets; + expandfirst = ["index.md"], bib, ) - -deploydocs( - repo = "github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl.git", - target = "build", - push_preview = true, - versions = ["stable" => "v^", "v#.#", "dev" => "dev"] -) \ No newline at end of file diff --git a/docs/src/api.md b/docs/src/api.md new file mode 100644 index 0000000..7ac6d30 --- /dev/null +++ b/docs/src/api.md @@ -0,0 +1,79 @@ +# API + +# ## Probabilities and counts + +```@docs +RecurrenceMicrostates +RecurrenceExpression +CorridorRecurrence +ThresholdRecurrence # What uised to be called Standard +``` + +# ## Specification of recurrence microstates + +```@docs +RecurrenceExpression +MicrostateShape +``` + + + + +Alternatively, a [`RecurrenceExpression`](@ref) can be specified directly: +```julia +distribution([x], expr::RecurrenceExpression, n::Int; kwargs...) +``` +**Example:** +```@example quick_example +expr = Corridor(0.05, 0.27) +dist = distribution(ssset, expr, 2) +``` + +If a custom [`MicrostateShape`](@ref) is required, the call simplifies to: +```julia +distribution([x], shape::MicrostateShape; kwargs...) +``` +**Example:** +```@example quick_example +shape = Triangle(Standard(0.27), 3) +dist = distribution(ssset, shape) +``` + + +# ## Sampling modes + +```@docs +SRandom +what else? +``` + +# ## Computational specification + +```@docs +CPUCore +GPUCore +``` + +The following needs to change: + +``` +This method automatically selects a [`CPUCore`](@ref) when `x` is a [`StateSpaceSet`](@ref) +and a [`GPUCore`](@ref) when `x` is an `AbstractGPUVector`. By default, square microstates of size `n` are used. + +Additional keyword arguments include: +- `rate::Float64`: Sampling rate (default: `0.05`). +- `sampling::SamplingMode`: Sampling mode (default: [`SRandom`](@ref)). +- `metric::Metric`: Distance metric from [Distances.jl](https://github.com/JuliaStats/Distances.jl). When using a [`GPUCore`](@ref), a [`GPUMetric`](@ref) must be provided. +``` + +all these keyword arguments should not be given to the +`probabilities` function. THey should be given to a generic computation type that is a field of the RecurrenceMicrostates!!! + +The following must be in the GPUCore struct: + +!!! warning + GPU backends require inputs of type `Float32`. `Float64` inputs are not supported on GPU. + + + +# ## \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index e3be29d..c35e9e1 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,12 +1,12 @@ # RecurrenceMicrostatesAnalysis.jl + ```@docs RecurrenceMicrostatesAnalysis ``` -!!! todo "GitHub" - RecurrenceMicrostatesAnalysis.jl is an open-source package available on [GitHub](https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl). +!!! info "Citation and credit" If you find this package useful, please consider giving it a star on GitHub and don't forget to cite [our work](https://doi.org/10.1063/5.0293708). 😉 - + ## About the documentation The documentation of **RecurrenceMicrostatesAnalysis.jl** is designed to explain how to use the package while also introducing the theoretical background of the RMA framework. The bibliography used throughout the documentation is listed in the [References](@ref) section; **please remember to cite the appropriate works if you use them**. diff --git a/docs/src/tutorial.jl b/docs/src/tutorial.jl new file mode 100644 index 0000000..2f5f30e --- /dev/null +++ b/docs/src/tutorial.jl @@ -0,0 +1,178 @@ +# # Tutorial for RecurrenceMicrostatesAnalysis.jl + +# In this tutorial we go through a typical usage of **RecurrenceMicrostatesAnalysis.jl**. +# We'll see how to calculate distributions of recurrence microstates, +# how to optimize our choices regarding the distribution generation, +# and how to perform Recurrence Microstate Analysis (RMA). + +# But first, we'll start with a crash course! + +# ## Crash-course into RMA + +# Recurrence Plots (RPs) were introduced in 1987 by Eckmann et al. +# [Eckmann1987RP](@cite) as a method for analyzing dynamical systems through recurrence +# properties. + +# Consider a time series $\vec{x}_i \in \mathbb{R}^d$, $i \in \{1, 2, \dots, K\}$, +# where $K$ is the length of the time series and $d$ is the dimension of the phase space. +# The recurrence plot is defined by the recurrence matrix +# ```math +# R_{i,j} = \Theta(\varepsilon - \|\vec x_i - \vec x_j\|), +# ``` +# where $\Theta(\cdot)$ denotes the Heaviside step function and $\varepsilon$ is the recurrence +# threshold. + +# The following figure shows examples of recurrence plots for different systems: +# (a) white noise; +# (b) a superposition of harmonic oscillators; +# (c) a logistic map with a linear trend; +# (d) Brownian motion. + +# ![Image of four RPs with their timeseries](../assets/rps.png) + +# A recurrence microstate is a local structure extracted from an RP. For a given microstate +# shape and size, the set of possible microstates is finite. For example, square microstates +# with size $N = 2$ yield $16$ distinct configurations. + +# ![Image of the 16 squared microstates to N = 2](../assets/microstates.png) + +# Recurrence Microstates Analysis (RMA) uses the probability distribution of these microstates +# as a source of information for characterizing dynamical systems. + +# ## Probability distributions of recurrence microstates + +# Finding and counting microstates in data is straightforward. +# It amounts to passing the input data to the `probabilities` function, +# while specifying the options of the `RecurrenceMicrostates` estimator, +# which essentially means e.g., what sort of distance threshold defines a recurrence, +# and what is maximum microstate size to consider. + +# Let's first generate some data of a chaotic map using **DynamicalSystems.jl**: + +using DynamicalSystemsBase + +function henon_rule(u, p, n) + x, y = u # system state + a, b = p # system parameters + xn = 1.0 - a*x^2 + y + yn = b*x + return SVector(xn, yn) +end + +u0 = [0.2, 0.3] +p0 = [1.4, 0.3] + +henon = DeterministicIteratedMap(henon_rule, u0, p0) + +total_time = 10_000 +X, t = trajectory(henon, total_time) +X + +# Notice that `X` is already a [`StateSpaceSet`](@ref). Because **RecurrenceMicrostatesAnalysis.jl** +# is part of **DynamicalSystems.jl**, this data type is the preferred input type. +# Other types are also possible however, see the documentation of the +# [`RecurrenceMicrostates`](@ref) central type for more. + +# Now, we specify the recurrence microstate configuration + +ε = 0.27 +N = 2 +rmspace = RecurrenceMicrostates(ε, N) + +# and finally call + +probs = probabilities(ospace, X) + +# The [`probability`](@ref) function is the same function as in [`ComplexityMeasures`](@ref). +# Given an outcome space, that is a way to _symbolize_ input data into discrete outcomes, +# `probabilities` return the probability (relative occurrence frequency) for each outcome. +# And indeed, the recurrence microstates is an outcome space. + +# If instead of the probabilities of the microstates you want their direct count +# simply replace `probabilities` with `counts`. + +counts(rmspace, X) + + +# ## Recurrence microstates analysis (RMA) + +# To actually analyze your data, there are two ways forwards. +# One way, is to utilize these probabilities within the interface provided +# by [`ComplexityMeasures`](@ref) to calculate entropies. +# For example, the corresponding Shannon entropy is + +entropy(Shannon(), probs) + +# (note that the API of `ComplexityMeasures` is re-exported by `RecurrenceMicrostateAnalysis`). +# This number corresponds to the **recurrence microstate entropy** as defined in our +# publication [`WhichPaperIscorrectToCite`](@cite). + +# `ComplexityMeasures` allows the convenience syntax of + +entropy(Shannon(), rmspace, X) + +# so in this case we wouldn't need to calculate the probabilities directly. +# Naturally, any other entropy could be estimated instead, + +entropy(Tsallis(), rmspace, X) + +# although we haven't explored alternative entropies in research yet. + +# The secon way forwards is the more traditional recurrence quantification analysis +# route, where you estimate (approximate really) various quantities +# such as laminarity that fundamentally relate with the context of recurrences. +# For example, + +# These quantities are listed in XXX. + +# Note that if instead of + + + +# ## Optimizing recurrence specification + +# In the above example we blindly selected the recurrence threshold `ε`. +# A better approach is to optimize it, so it (for example) maximizes +# the recurrence microstate entropy. +# This can be done with the [`optimize`](@ref) function + +ε, S = optimize(Threshold(), RecurrenceEntropy(), X, N) +rmspace = RecurrenceMicrostates(ε, N) +h = entropy(Shannon(), rmspace, X) +(h, S) + + +# ## Custom specification of recurrence microstates + +# When we write `rmspace = RecurrenceMicrostates(ε, N)`, +# we are in fact accepting a default definition for both what counts as a recurrence +# as well as what recurrence microstates to examine. +# We can alter either, by choosing the recurrence expression, or the specific +# microstate(s) we wish to analyze. For example + +expr = CorridorRecurrence(0.05, 0.27) +shape = MicrostateTriangle(lalala) +rmspace = RecurrenceMicrostates(; expression = expr, shape) +probabilities(rmspace, X) + +# More details are given in [`RecurrenceMicrostates`](@ref) +# and the [API](@ref) section of the docs. + + +# ## Cross recurrence plots + +# For cross-recurrences, nearly nothing changes for you, nor for the source code +# of the code base! Simply call `function(..., rmspace, X, Y)`, adding an additional +# final argument `Y` corresponding to the second trajectory from which cross recurrences are estimated. + +# For example, here are the cross recurrence microstate distribution for +# the original Henon map trajectory and one at slightly different parameters + +set_parameter!(henon, 1, 1.35) +Y, t = trajectory(henon, total_time) +probabilities(rmspace, X, Y) + +# This augmentation from one to two input data +# works for all functions discussed in this tutorial. + +# ## Spatial data diff --git a/docs/src/tutorial.md b/docs/src/tutorial.md new file mode 100644 index 0000000..c0f1a68 --- /dev/null +++ b/docs/src/tutorial.md @@ -0,0 +1,14 @@ +```@meta +EditURL = "tutorial.jl" +``` + +````@example tutorial +using Distributions, RecurrenceMicrostatesAnalysis +data = rand(Uniform(0, 1), 10_000); +ssset = StateSpaceSet(data) + +ε = 0.27 +N = 2 +dist = distribution(ssset, ε, N) +```` + diff --git a/docs/src/tutorial/distributions.md b/docs/src/tutorial/distributions.md index d12779b..3242216 100644 --- a/docs/src/tutorial/distributions.md +++ b/docs/src/tutorial/distributions.md @@ -8,92 +8,6 @@ simple application example. Next, we present [A brief review](@ref) of Recurrenc and RMA. Finally, we explain the [`distribution`](@ref) function in [Computing RMA distributions](@ref), including the role of [Histograms](@ref). -## Quick start with RecurrenceMicrostatesAnalysis.jl - -This section presents concise examples illustrating how to use the package. RMA distributions -are computed using the [`distribution`](@ref) function, which returns a [`Probabilities`](@ref) -structure containing the microstate distribution. - -We start with a simple example based on a uniform random process. First, we generate the data -and convert it into a [`StateSpaceSet`](@ref): -```@example quick_example -using Distributions, RecurrenceMicrostatesAnalysis -data = rand(Uniform(0, 1), 10_000); -ssset = StateSpaceSet(data) -``` - -Next, we compute the RMA distribution. This requires specifying the recurrence threshold -$\varepsilon$ and the microstate size $N$. These parameters are discussed in more detail in [A brief review](@ref) and [Optimizing a parameter](@ref). - -```@example quick_example -ε = 0.27 -N = 2 -dist = distribution(ssset, ε, N) -``` - -As another example, we use [DynamicalSystems.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/dynamicalsystems/stable/) -to generate data from the Hénon map, following the example presented in its documentation: -```@example quick_example -using DynamicalSystems - -function henon_rule(u, p, n) # here `n` is "time", but we don't use it. - x, y = u # system state - a, b = p # system parameters - xn = 1.0 - a*x^2 + y - yn = b*x - return SVector(xn, yn) -end - -u0 = [0.2, 0.3] -p0 = [1.4, 0.3] - -henon = DeterministicIteratedMap(henon_rule, u0, p0) - -total_time = 10_000 -X, t = trajectory(henon, total_time) -X -``` - -Finally, we compute the RMA distribution of the trajectory `X`. Here, the threshold is selected -using [`optimize`](@ref) by maximizing the recurrence microstate entropy: -```@example quick_example -ε, S = optimize(Threshold(), RecurrenceEntropy(), X, N) -``` - -```@example quick_example -dist = distribution(X, ε, N) -``` - -## A brief review - -Recurrence Plots (RPs) were introduced in 1987 by Eckmann et al. [Eckmann1987RP](@cite) as a method for analyzing dynamical systems through recurrence -properties. - -Consider a time series $\vec{x}_i \in \mathbb{R}^d$, $i \in \{1, 2, \dots, K\}$, where $K$ is the length of the time series and $d$ is the dimension of the phase space. -The recurrence plot is defined by the recurrence matrix -```math -R_{i,j} = \Theta(\varepsilon - \|\vec x_i - \vec x_j\|), -``` -where $\Theta(\cdot)$ denotes the Heaviside step function and $\varepsilon$ is the recurrence -threshold. - -The following figure shows examples of recurrence plots for different systems: -(a) white noise; -(b) a superposition of harmonic oscillators; -(c) a logistic map with a linear trend; -(d) Brownian motion. - -![Image of four RPs with their timeseries](../assets/rps.png) - -A recurrence microstate is a local structure extracted from an RP. For a given microstate -shape and size, the set of possible microstates is finite. For example, square microstates -with size $N = 2$ yield $16$ distinct configurations. - -![Image of the 16 squared microstates to N = 2](../assets/microstates.png) - -Recurrence Microstates Analysis (RMA) uses the probability distribution of these microstates -as a source of information for characterizing dynamical systems. - ## Computing RMA distributions The computation of RMA distributions is the core functionality of @@ -109,62 +23,7 @@ A commonly used convenience interface is: ```julia distribution([x], ε::Float, n::Int; kwargs...) ``` -This method automatically selects a [`CPUCore`](@ref) when `x` is a [`StateSpaceSet`](@ref) -and a [`GPUCore`](@ref) when `x` is an `AbstractGPUVector`. By default, square microstates of size `n` are used. - -Additional keyword arguments include: -- `rate::Float64`: Sampling rate (default: `0.05`). -- `sampling::SamplingMode`: Sampling mode (default: [`SRandom`](@ref)). -- `metric::Metric`: Distance metric from [Distances.jl](https://github.com/JuliaStats/Distances.jl). When using a [`GPUCore`](@ref), a [`GPUMetric`](@ref) must be provided. - -!!! warning - GPU backends require inputs of type `Float32`. `Float64` inputs are not supported on GPU. - -Alternatively, a [`RecurrenceExpression`](@ref) can be specified directly: -```julia -distribution([x], expr::RecurrenceExpression, n::Int; kwargs...) -``` -**Example:** -```@example quick_example -expr = Corridor(0.05, 0.27) -dist = distribution(ssset, expr, 2) -``` -If a custom [`MicrostateShape`](@ref) is required, the call simplifies to: -```julia -distribution([x], shape::MicrostateShape; kwargs...) -``` -**Example:** -```@example quick_example -shape = Triangle(Standard(0.27), 3) -dist = distribution(ssset, shape) -``` - ---- -## Cross-recurrence plots -RMA distributions can also be computed from Cross-Recurrence Plots (CRPs) by providing two time series: -```julia -distribution([x], [y], expr::RecurrenceExpression, n::Int; kwargs...) -distribution([x], [y], shape::MicrostateShape; kwargs...) -``` - -**Example:** -```@example quick_example -data_1 = StateSpaceSet(rand(Uniform(0, 1), 1000)) -data_2 = StateSpaceSet(rand(Uniform(0, 1), 2000)) -dist = distribution(data_1, data_2, 0.27, 2) -``` - -!!! danger - The inputs `x` and `y` must have the same phase-space dimensionality. The following example - is invalid and will raise an exception: - ```julia - data_1 = StateSpaceSet(rand(Uniform(0, 1), (1000, 2))) - data_2 = StateSpaceSet(rand(Uniform(0, 1), (2000, 3))) - dist = distribution(data_1, data_2, 0.27, 2) - ``` - ---- ## Spatial data The package also provides experimental support for spatial data, following *"Generalised Recurrence Plot Analysis for Spatial Data"* [Marwan2007Spatial](@cite). @@ -199,11 +58,3 @@ distribution(spatialdata, Rect(Standard(0.27), (2, 1, 2, 1))) ```@example quick_example distribution(spatialdata_1, spatialdata_2, Rect(Standard(0.27), (2, 1, 2, 1))) ``` - -## Histograms -The [`histogram`](@ref) function counts the occurrences of each microstate identified during -sampling. It is called internally by [`distribution`](@ref) , which converts the resulting [`Counts`](@ref) into [`Probabilities`](@ref). - -```@docs -histogram -``` \ No newline at end of file diff --git a/src/core/complexity_measures_interface.jl b/src/core/complexity_measures_interface.jl new file mode 100644 index 0000000..8ab53c8 --- /dev/null +++ b/src/core/complexity_measures_interface.jl @@ -0,0 +1,34 @@ +# Define outcome space + +struct RecurrenceMicrostates{MS<:MicrostateShape, RE <: RecurrenceExpression, SM<:SamplingMode, C<:Core} <: CountBasedOutcomeSpace + shape::MS + expression::RE + sampling::SM + core::C # cpu or gpu + what_else # ? +end + +# Define necessary functions to actually make it an `OutcomeSpace` + +function ComplexityMeasures.counts_and_outcomes(rmspace::RecurrenceMicrostates, x, y = x) + a, b, c = extract_defining_types(rmspace) + counts = histogram(a,b,c, x, y) # Fix this of course + outcomes = xxx # somehow must generate them + return Counts() +end + +function ComplexityMeasures.codify(rmspace::RecurrenceMicrostates, x, y = x) + # TODO +end + +function ComplexityMeasures.outcome_space(rmspace::RecurrenceMicrostates, x, y = x) + # TODO +end + +# The rest is taken care of by ComplexityMeasures.jl. Including `entropy(...)`. + +# TODOs: + +# all microstate shape types need to be renamed; they conflict with +# plotting packages too much. +# Furthermore, they should be orthogonal inputs to the main outcome space t \ No newline at end of file From be32b154df2e5c872eb7b61af7299a87eeda0bd3 Mon Sep 17 00:00:00 2001 From: Datseris Date: Fri, 30 Jan 2026 15:54:10 +0000 Subject: [PATCH 02/19] addd more exaple for convenience constructors --- src/core/complexity_measures_interface.jl | 20 ++++++++++++++++++++ src/core/cpu_core.jl | 6 ++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/core/complexity_measures_interface.jl b/src/core/complexity_measures_interface.jl index 8ab53c8..215dcdf 100644 --- a/src/core/complexity_measures_interface.jl +++ b/src/core/complexity_measures_interface.jl @@ -8,6 +8,26 @@ struct RecurrenceMicrostates{MS<:MicrostateShape, RE <: RecurrenceExpression, SM what_else # ? end +# convenience constructors + +function RecurrenceMicrostates(; + ε = nothing, + N = 2, + core = CPUCore(), + whatever_else + ) + + # create the data structure + RecurrenceMicrostates(...) +end + + +# downstream convenience +function compute_motif(ospace::RecurrenceMicrostates, x, y = x) + compute_motif(ospace.expression, ..., x, y) +end + + # Define necessary functions to actually make it an `OutcomeSpace` function ComplexityMeasures.counts_and_outcomes(rmspace::RecurrenceMicrostates, x, y = x) diff --git a/src/core/cpu_core.jl b/src/core/cpu_core.jl index bca8bbb..cba822b 100644 --- a/src/core/cpu_core.jl +++ b/src/core/cpu_core.jl @@ -37,6 +37,8 @@ struct StandardCPUCore{M<:MicrostateShape, S<:SamplingMode} <: CPUCore{M, S} shape::M sampling::S end + + #......................................................................................... CPUCore(shape::M, sampling::S) where {M<:MicrostateShape, S<:SamplingMode} = StandardCPUCore(shape, sampling) @@ -217,7 +219,7 @@ distribution( ) = distribution(CPUCore(shape, sampling), x, y; threads = threads) #......................................................................................... distribution( - x::StateSpaceSet, + x::StateSpaceSet, y::StateSpaceSet, expr::RecurrenceExpression, n::Int; @@ -282,7 +284,7 @@ distribution( ) = distribution(x, x, shape; rate = rate, sampling = sampling, threads = threads) #......................................................................................... distribution( - core::CPUCore, + core::CPUCore, x; threads = Threads.nthreads() ) = distribution(core, x, x; threads = threads) From c12a070571aea833e99f19ef6257db01ae1fd62f Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Fri, 30 Jan 2026 18:52:48 -0300 Subject: [PATCH 03/19] remove recurrence expression dependence; rename types to avoid conflict issues --- src/core/complexity_measures_interface.jl | 3 +- src/shapes/diagonal.jl | 42 ++++++------ src/shapes/rect.jl | 82 ++++++++++------------- src/shapes/triangle.jl | 35 +++++----- 4 files changed, 75 insertions(+), 87 deletions(-) diff --git a/src/core/complexity_measures_interface.jl b/src/core/complexity_measures_interface.jl index 215dcdf..d79905b 100644 --- a/src/core/complexity_measures_interface.jl +++ b/src/core/complexity_measures_interface.jl @@ -49,6 +49,7 @@ end # TODOs: -# all microstate shape types need to be renamed; they conflict with +# DONE - all microstate shape types need to be renamed; they conflict with # plotting packages too much. +# # Furthermore, they should be orthogonal inputs to the main outcome space t \ No newline at end of file diff --git a/src/shapes/diagonal.jl b/src/shapes/diagonal.jl index 6b6f065..d474dd2 100644 --- a/src/shapes/diagonal.jl +++ b/src/shapes/diagonal.jl @@ -1,39 +1,36 @@ -export Diagonal +export DiagonalMicrostate ########################################################################################## # MicrostateShape: Diagonal + Constructors and sub-types ########################################################################################## """ - Diagonal <: MicrostateShape + DiagonalMicrostate <: MicrostateShape Define a diagonal microstate shape, which captures recurrences along diagonals of a Recurrence Plot (RP). # Constructor ```julia -Diagonal(expr::E, N::Int; B::Int = 2) +DiagonalMicrostate(N::Int; B::Int = 2) ``` -where `expr` is the [`RecurrenceExpression`](@ref) used to evaluate recurrences and `N` -defines the length of the diagonal microstate. +where `N` defines the length of the diagonal microstate. # Example ```julia -diagonal = Diagonal(expr, 3) +diagonal = DiagonalMicrostate(expr, 3) ``` !!! info - **Diagonal** microstates are compatible with spatial data. However, they do not capture + **DiagonalMicrostate** microstates are compatible with spatial data. However, they do not capture hyper-diagonals in Spatial Recurrence Plots (SRP). Only diagonals defined by sequential recurrences are supported, such as: ```math R_{i_1,i_2,j_1,j_2}, R_{i_1 + 1,i_2 + 1,j_1 + 1,j_2 + 1}, R_{i_1 + 2,i_2 + 2,j_1 + 2,j_2 + 2}, \\ldots, R_{i_1 + n - 1,i_2 + n - 1,j_1 + n - 1,j_2 + n - 1} ``` """ -struct Diagonal{N, B, E<:RecurrenceExpression} <: MicrostateShape - expr::E -end +struct DiagonalMicrostate{N, B} <: MicrostateShape end -Diagonal(expr::E, N::Int; B::Int = 2) where {E <: RecurrenceExpression} = Diagonal{N,B,E}(expr) +DiagonalMicrostate(N::Int; B::Int = 2) = DiagonalMicrostate{N, B}(expr) ########################################################################################## # Implementations: SamplingSpace @@ -41,16 +38,16 @@ Diagonal(expr::E, N::Int; B::Int = 2) where {E <: RecurrenceExpression} = Diagon # Based on time series: (CPU & GPU) #......................................................................................... SamplingSpace( - ::Diagonal{N, B, E}, + ::DiagonalMicrostate{N, B}, x::Union{StateSpaceSet, AbstractGPUVector{SVector{D, Float32}}}, y::Union{StateSpaceSet, AbstractGPUVector{SVector{D, Float32}}} -) where {N, B, E<:RecurrenceExpression, D} = SSRect2(length(x) - N + 1, length(y) - N + 1) +) where {N, B, D} = SSRect2(length(x) - N + 1, length(y) - N + 1) function SamplingSpace( - ::Diagonal{N, B, E}, + ::DiagonalMicrostate{N, B}, x::AbstractArray{<: Real}, y::AbstractArray{<: Real} -) where {N, B, E<:RecurrenceExpression} +) where {N, B} dims_x = size(x)[2:end] dims_y = size(y)[2:end] @@ -65,7 +62,8 @@ end # Implementations: compute_motif (SRP) ########################################################################################## @inline function compute_motif( - shape::Diagonal, + ::DiagonalMicrostate, + expr::RecurrenceExpression, x::AbstractArray{<: Real}, y::AbstractArray{<: Real}, idx::Vector{Int}, @@ -84,7 +82,7 @@ end i = ntuple(k -> itr[k], dim_x) j = ntuple(k -> itr[dim_x + k], dim_y) - index += recurrence(shape.expr, x, y, i, j) * p + index += recurrence(expr, x, y, i, j) * p itr .+= 1 end @@ -95,27 +93,27 @@ end ########################################################################################## # Implementations: utils — histogram size, power vector, and offsets ########################################################################################## -@generated function get_histogram_size(::Diagonal{N, B, E}) where {N, B, E} +@generated function get_histogram_size(::DiagonalMicrostate{N, B}) where {N, B} size = B^(N) return :( $size ) end -@generated function get_power_vector(::CPUCore, ::Diagonal{N, B, E}) where {N, B, E} +@generated function get_power_vector(::CPUCore, ::DiagonalMicrostate{N, B}) where {N, B} expr = :(SVector{$N}( $([:(B^$i) for i in 0:(N-1)]... ) )) return expr end -@generated function get_offsets(::CPUCore, ::Diagonal{N, B, E}) where {N, B, E} +@generated function get_offsets(::CPUCore, ::DiagonalMicrostate{N, B}) where {N, B} elems = [ :(SVector{2, Int}($n, $n)) for n in 0:(N - 1)] return :( SVector{$N, $(SVector{2, Int})}( $(elems...) ) ) end -@generated function get_power_vector(::GPUCore, ::Diagonal{N, B, E}) where {N, B, E} +@generated function get_power_vector(::GPUCore, ::DiagonalMicrostate{N, B}) where {N, B} expr = :(SVector{$N}( $([:(Int32(B^$i)) for i in 0:(N-1)]... ) )) return expr end -@generated function get_offsets(::GPUCore, ::Diagonal{N, B, E}) where {N, B, E} +@generated function get_offsets(::GPUCore, ::DiagonalMicrostate{N, B}) where {N, B} elems = [ :(SVector{2, Int32}($(Int32(n)), $(Int32(n)))) for n in 0:(N - 1)] return :( SVector{$N, $(SVector{2, Int32})}( $(elems...) ) ) end \ No newline at end of file diff --git a/src/shapes/rect.jl b/src/shapes/rect.jl index 6b593e2..4e7363b 100644 --- a/src/shapes/rect.jl +++ b/src/shapes/rect.jl @@ -1,77 +1,68 @@ -export Rect +export RectMicrostate ########################################################################################## # MicrostateShape: Rect + Constructors and sub-types ########################################################################################## """ - Rect <: MicrostateShape + RectMicrostate <: MicrostateShape Define a rectangular microstate shape. -`Rect` can represent either a two-dimensional microstate (identified as `Rect2`, used for +`RectMicrostate` can represent either a two-dimensional microstate (identified as `Rect2Microstate`, used for Recurrence Plots and Cross-Recurrence Plots) or an N-dimensional microstate (identified as -`RectN`, used for spatial data). +`RectNMicrostate`, used for spatial data). -# Rect2 (time-series data) -A 2D rectangular microstate can be initialized using either of the following constructors: +# Rect2Microstate (time-series data) +A 2D rectangular microstate can be initialized using either of the following constructor: ```julia -Rect(expr::E; rows = 2, cols = 2, B = 2) -Rect(rows::Int, cols::Int; expr = Standard(0.27), B = 2) +RectMicrostate(rows::Int, cols::Int; B = 2) ``` -Here, `rows` and `columns` define the rectangle dimensions, `expr` is the [`RecurrenceExpression`](@ref) -used to evaluate recurrences, and `B` is the base used to encode microstate elements (typically `2`, representing recurrence or non-recurrence). +Here, `rows` and `columns` define the rectangle dimensions, and `B` is the base used to encode +microstate elements (typically `2`, representing recurrence or non-recurrence). Rectangular microstates can be specialized to define common patterns such as lines, columns, and squares: ```julia -line = Rect(expr; rows = n, cols = 1) -column = Rect(expr; rows = 1, cols = n) -square = Rect(expr; rows = n, cols = n) +line = RectMicrostate(N, 1) +column = RectMicrostate(1, N) +square = RectMicrostate(N, N) ``` Since square microstates are frequently used, a convenience constructor is also provided: ```julia -Rect(expr::E, N; B = 2) -``` -where `N` defines the size of the square microstate. For example: -```julia -square = Rect(expr, n) +RectMicrostate(N; B = 2) ``` -# RectN (spatial data) -For N-dimensional structures, typically used with spatial data, the RectN variant can be +# RectNMicrostate (spatial data) +For N-dimensional structures, typically used with spatial data, the RectNMicrostate variant can be initialized as: ```julia -Rect(expr::E, structure::NTuple{D, Int}; B = 2) +RectMicrostate(structure::NTuple{D, Int}; B = 2) ``` Here, `structure` defines the size of the microstate along each dimension. For example: ```julia -nrect = Rect(expr, (2, 1, 2, 1)) +nrect = RectMicrostate((2, 1, 2, 1)) ``` This form is suitable for N-dimensional spatial data, such as images or volumetric datasets. """ -abstract type Rect <: MicrostateShape end +abstract type RectMicrostate <: MicrostateShape end #......................................................................................... # Based on time series: (CPU & GPU) #......................................................................................... -struct Rect2{W, H, B, E <: RecurrenceExpression} <: Rect - expr::E -end +struct Rect2Microstate{W, H, B} <: RectMicrostate end -Rect(expr::E; rows = 2, cols = 2, B = 2) where {E <: RecurrenceExpression} = Rect2{rows,cols,B,E}(expr) -Rect(expr::E, N; B = 2) where {E <: RecurrenceExpression} = Rect2{N,N,B,E}(expr) -Rect(rows::Int, cols::Int; expr::E = Standard(0.27), B = 2) where {E <: RecurrenceExpression} = Rect2{rows, cols, B, Standard}(expr) +RectMicrostate(N; B = 2) = Rect2Microstate{N, N, B}() +RectMicrostate(rows::Int, cols::Int; B = 2) = Rect2Microstate{rows, cols, B}() #......................................................................................... # Based on spatial data: (CPU only) #......................................................................................... -struct RectN{D, B, E <: RecurrenceExpression} <: Rect - expr::E +struct RectNMicrostate{D, B} <: RectMicrostate structure::NTuple{D, Int} end -Rect(expr::E, structure::NTuple{D, Int}; B = 2) where {D, E <: RecurrenceExpression} = RectN{D, B, E}(expr, structure) +RectMicrostate(structure::NTuple{D, Int}; B = 2) where {D} = RectNMicrostate{D, B}(structure) ########################################################################################## # Implementations: SamplingSpace @@ -79,18 +70,18 @@ Rect(expr::E, structure::NTuple{D, Int}; B = 2) where {D, E <: RecurrenceExpress # Based on time series: (CPU & GPU) #......................................................................................... SamplingSpace( - ::Rect2{W, H, B, E}, + ::Rect2Microstate{W, H, B}, x::Union{StateSpaceSet, AbstractGPUVector{SVector{N, Float32}}}, y::Union{StateSpaceSet, AbstractGPUVector{SVector{N, Float32}}} -) where {W, H, B, E<:RecurrenceExpression, N} = SSRect2(length(x) - W + 1, length(y) - H + 1) +) where {W, H, B, N} = SSRect2(length(x) - W + 1, length(y) - H + 1) #......................................................................................... # Based on spatial data: (CPU only) #......................................................................................... function SamplingSpace( - shape::RectN{D, B, E}, + shape::RectNMicrostate{D, B}, x::AbstractArray{<: Real}, y::AbstractArray{<: Real} -) where {D, B, E<:RecurrenceExpression} +) where {D, B} dims_x = size(x)[2:end] dims_y = size(y)[2:end] @@ -107,7 +98,8 @@ end # Implementations: compute_motif (SRP) ########################################################################################## @inline function compute_motif( - shape::RectN, + shape::RectNMicrostate, + expr::RecurrenceExpression, x::AbstractArray{<: Real}, y::AbstractArray{<: Real}, idx::Vector{Int}, @@ -124,7 +116,7 @@ end i = ntuple(k -> itr[k], dim) j = ntuple(k -> itr[dim + k], length(shape.structure) - dim) - index += recurrence(shape.expr, x, y, i, j) * p + index += recurrence(expr, x, y, i, j) * p itr[1] += 1 for k in 1:length(shape.structure) - 1 @@ -143,41 +135,41 @@ end ########################################################################################## # Implementations: utils — histogram size, power vector, and offsets ########################################################################################## -@generated function get_histogram_size(::Rect2{W, H, B, E}) where {W, H, B, E} +@generated function get_histogram_size(::Rect2Microstate{W, H, B}) where {W, H, B} size = B^(W*H) return :( $size ) end -@generated function get_power_vector(::CPUCore, ::Rect2{W, H, B, E}) where {W, H, B, E} +@generated function get_power_vector(::CPUCore, ::Rect2Microstate{W, H, B}) where {W, H, B} N = W * H expr = :(SVector{$N}( $([:(B^$i) for i in 0:(N-1)]... ) )) return expr end -@generated function get_offsets(::CPUCore, ::Rect2{W, H, B, E}) where {W, H, B, E} +@generated function get_offsets(::CPUCore, ::Rect2Microstate{W, H, B}) where {W, H, B} N = W * H elems = [ :(SVector{2, Int}($w, $h)) for w in 0:(W - 1) for h in 0:(H - 1)] return :( SVector{$N, $(SVector{2, Int})}( $(elems...) ) ) end -@generated function get_power_vector(::GPUCore, ::Rect2{W, H, B, E}) where {W, H, B, E} +@generated function get_power_vector(::GPUCore, ::Rect2Microstate{W, H, B}) where {W, H, B} N = W * H expr = :(SVector{$N}( $([:(Int32(B^$i)) for i in 0:(N-1)]... ) )) return expr end -@generated function get_offsets(::GPUCore, ::Rect2{W, H, B, E}) where {W, H, B, E} +@generated function get_offsets(::GPUCore, ::Rect2Microstate{W, H, B}) where {W, H, B} N = W * H elems = [ :(SVector{2, Int32}($(Int32(w)), $(Int32(h)))) for w in 0:(W - 1) for h in 0:(H - 1)] return :( SVector{$N, $(SVector{2, Int32})}( $(elems...) ) ) end -function get_histogram_size(shape::RectN{D, B, E}) where {D, B, E} +function get_histogram_size(shape::RectNMicrostate{D, B}) where {D, B} size = B^(prod(shape.structure)) return size end -function get_power_vector(::CPUCore, shape::RectN{D, B, E}) where {D, B, E} +function get_power_vector(::CPUCore, shape::RectNMicrostate{D, B}) where {D, B} N = prod(shape.structure) return SVector{N}((B^i for i in 0:(N-1))...) end \ No newline at end of file diff --git a/src/shapes/triangle.jl b/src/shapes/triangle.jl index f496ef8..e88509c 100644 --- a/src/shapes/triangle.jl +++ b/src/shapes/triangle.jl @@ -1,27 +1,26 @@ -export Triangle +export TriangleMicrostate ########################################################################################## # MicrostateShape: Triangle + Constructors and sub-types ########################################################################################## """ - Triangle{N, B, E<:RecurrenceExpression} <: MicrostateShape + TriangleMicrostate{N, B} <: MicrostateShape -Triangle{N, B, E<:RecurrenceExpression} <: MicrostateShape +TriangleMicrostate{N, B} <: MicrostateShape Define a triangular microstate shape, originally introduced by Hirata in 2021 [Hirata2021Triangle](@cite). # Constructor ```julia -Triangle(expr::E, N::Int; B::Int = 2) +TriangleMicrostate(N::Int; B::Int = 2) ``` -where `expr` is the [`RecurrenceExpression`](@ref) used to evaluate recurrences and `N` -defines the size of the triangular microstate. +where `N` defines the size of the triangular microstate. # Example ```julia -n = 3 -triangle = Triangle(expr, n) +N = 3 +triangle = TriangleMicrostate(N) ``` The corresponding microstate structure is given by: ```math @@ -35,11 +34,9 @@ R_{i,j} & R_{i,j + 1} & R_{i,j + 2} \\\\ !!! compat Triangular microstate shape is not compatible with spatial data. """ -struct Triangle{N, B, E<:RecurrenceExpression} <: MicrostateShape - expr::E -end +struct TriangleMicrostate{N, B} <: MicrostateShape end -Triangle(expr::E, N::Int; B::Int = 2) where {E <: RecurrenceExpression} = Triangle{N,B,E}(expr) +TriangleMicrostate(N::Int; B::Int = 2) = TriangleMicrostate{N, B}() ########################################################################################## # Implementations: SamplingSpace @@ -47,35 +44,35 @@ Triangle(expr::E, N::Int; B::Int = 2) where {E <: RecurrenceExpression} = Triang # Based on time series: (CPU & GPU) #......................................................................................... SamplingSpace( - ::Triangle{N, B, E}, + ::TriangleMicrostate{N, B}, x::Union{StateSpaceSet, AbstractGPUVector{SVector{D, Float32}}}, y::Union{StateSpaceSet, AbstractGPUVector{SVector{D, Float32}}} -) where {N, B, E<:RecurrenceExpression, D} = SSRect2(length(x) - N + 1, length(y) - N + 1) +) where {N, B, D} = SSRect2(length(x) - N + 1, length(y) - N + 1) ########################################################################################## # Implementations: utils — histogram size, power vector, and offsets ########################################################################################## -@generated function get_histogram_size(::Triangle{N, B, E}) where {N, B, E} +@generated function get_histogram_size(::TriangleMicrostate{N, B}) where {N, B} size = B^((N * (N + 1)) ÷ 2) return :( $size ) end -@generated function get_power_vector(::CPUCore, ::Triangle{N, B, E}) where {N, B, E} +@generated function get_power_vector(::CPUCore, ::TriangleMicrostate{N, B}) where {N, B} expr = :(SVector{$(N*(N + 1) ÷ 2)}( $([:(B^$( (((j - 1) * j) ÷ 2) + (i - 1) )) for j in 1:N for i in 1:j]... ) )) return expr end -@generated function get_offsets(::CPUCore, ::Triangle{N, B, E}) where {N, B, E} +@generated function get_offsets(::CPUCore, ::TriangleMicrostate{N, B}) where {N, B} elems = [ :(SVector{2, Int}($i, $j)) for j in 0:(N - 1) for i in 0:j] return :( SVector{$(N*(N + 1) ÷ 2), $(SVector{2, Int})}( $(elems...) ) ) end -@generated function get_power_vector(::GPUCore, ::Triangle{N, B, E}) where {N, B, E} +@generated function get_power_vector(::GPUCore, ::TriangleMicrostate{N, B}) where {N, B} expr = :(SVector{$(N*(N + 1) ÷ 2)}( $([:(Int32(B^$( (((j - 1) * j) ÷ 2) + (i - 1) ))) for j in 1:N for i in 1:j]... ) )) return expr end -@generated function get_offsets(::GPUCore, ::Triangle{N, B, E}) where {N, B, E} +@generated function get_offsets(::GPUCore, ::TriangleMicrostate{N, B}) where {N, B} elems = [ :(SVector{2, Int32}($(Int32(i)), $(Int32(j)))) for j in 0:(N - 1) for i in 0:j] return :( SVector{$(N*(N + 1) ÷ 2), $(SVector{2, Int32})}( $(elems...) ) ) end \ No newline at end of file From 15d49edbade32270f5517a0c7cf4e3ee5cfff139 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Fri, 30 Jan 2026 18:53:56 -0300 Subject: [PATCH 04/19] add recurrence microstate type --- src/RecurrenceMicrostatesAnalysis.jl | 2 ++ src/core/recurrence_microstates.jl | 42 ++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 src/core/recurrence_microstates.jl diff --git a/src/RecurrenceMicrostatesAnalysis.jl b/src/RecurrenceMicrostatesAnalysis.jl index 63ed2bd..e22c500 100644 --- a/src/RecurrenceMicrostatesAnalysis.jl +++ b/src/RecurrenceMicrostatesAnalysis.jl @@ -27,6 +27,8 @@ const DEFAULT_METRIC = Euclidean() ########################################################################################## # Core API types and functions ########################################################################################## +include("core/recurrence_microstates.jl") + include("core/abstract_core.jl") include("core/recurrence.jl") include("core/shape.jl") diff --git a/src/core/recurrence_microstates.jl b/src/core/recurrence_microstates.jl new file mode 100644 index 0000000..6eada46 --- /dev/null +++ b/src/core/recurrence_microstates.jl @@ -0,0 +1,42 @@ +export RecurrenceMicrostates + +########################################################################################## +# Recurrence Microstate +########################################################################################## +""" + RecurrenceMicrostates +""" +struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: RMACore} <: CountBasedOutcomeSpace + shape::MS + expr::RE + sampling::SM + core::C +end + +########################################################################################## +# Recurrence Microstate: Convenience constructors +########################################################################################## +function RecurrenceMicrostates(ε::Real, N::Int; sampling_ratio::Real = 0.05, core::RMACore = CPUCore()) + + ## If using GPU, assert Float32. + if (core isa GPUCore) + @assert ε isa Float32 "When using GPU, the threshold must be a Float32." + end + + shape = Rect(N) + sampling = SRandom(sampling_ratio) + expr = Standard(ε) + return RecurrenceMicrostates(shape, expr, sampling, core) +end + +function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, core::RMACore = CPUCore()) + + ## If using GPU, assert Float32. + if (core isa GPUCore) + @assert ε isa Float32 "When using GPU, the threshold must be a Float32." + end + + sampling = SRandom(sampling_ratio) + expr = Standard(ε) + return RecurrenceMicrostates(shape, expr, sampling, core) +end \ No newline at end of file From 92b9b56a9f656cb6b2438999b89cbe410f375125 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Fri, 30 Jan 2026 23:01:59 -0300 Subject: [PATCH 05/19] fix shape names; modify histogram input; add complexity measures interface; add some recurrence microstates convenience constructors --- src/RecurrenceMicrostatesAnalysis.jl | 10 +- src/core/abstract_core.jl | 100 +++++++++------- src/core/complexity_measures_interface.jl | 20 ++++ src/core/cpu_core.jl | 133 +++++----------------- src/core/gpu/gpu_core.jl | 116 +++++-------------- src/core/recurrence_microstates.jl | 69 +++++++++-- src/utils/operations/permute_cols.jl | 10 +- src/utils/operations/permute_rows.jl | 8 +- src/utils/operations/transpose.jl | 6 +- 9 files changed, 211 insertions(+), 261 deletions(-) diff --git a/src/RecurrenceMicrostatesAnalysis.jl b/src/RecurrenceMicrostatesAnalysis.jl index e22c500..b87e8bf 100644 --- a/src/RecurrenceMicrostatesAnalysis.jl +++ b/src/RecurrenceMicrostatesAnalysis.jl @@ -11,7 +11,6 @@ end RecurrenceMicrostatesAnalysis ########################################################################################## using Atomix using Combinatorics -using ComplexityMeasures using Distances using GPUArraysCore using KernelAbstractions @@ -20,6 +19,7 @@ using Reexport using StaticArrays @reexport using Adapt +@reexport using ComplexityMeasures @reexport using StateSpaceSets const DEFAULT_METRIC = Euclidean() @@ -27,22 +27,22 @@ const DEFAULT_METRIC = Euclidean() ########################################################################################## # Core API types and functions ########################################################################################## -include("core/recurrence_microstates.jl") - include("core/abstract_core.jl") include("core/recurrence.jl") include("core/shape.jl") include("core/sampling.jl") +include("core/gpu/gpu_metric.jl") +include("core/recurrence_microstates.jl") include("core/cpu_core.jl") - -include("core/gpu/gpu_metric.jl") include("core/gpu/gpu_core.jl") include("core/measures.jl") include("core/optimize.jl") include("core/operation.jl") +include("core/complexity_measures_interface.jl") + ########################################################################################## # Recurrence functions, motif shapes, and sampling modes ########################################################################################## diff --git a/src/core/abstract_core.jl b/src/core/abstract_core.jl index ef2a3e3..5372632 100644 --- a/src/core/abstract_core.jl +++ b/src/core/abstract_core.jl @@ -1,4 +1,4 @@ -export RMACore, histogram, distribution +export RMACore, histogram ########################################################################################## # RMACore @@ -23,55 +23,71 @@ correctly. abstract type RMACore end ########################################################################################## -# Implementations: histogram & distribution +# Implementations: histogram ########################################################################################## """ - histogram(core::RMACore, [x], [y]) + histogram(rmspace::RecurrenceMicrostates, [x], [y]; kwargs...) -Compute the histogram of recurrence microstates for the input data `[x]` and `[y]` using the specified -backend `core`, which must be an [`RMACore`](@ref). - -This function executes the full backend pipeline: sampling the recurrence space, constructing -microstates, and evaluating recurrences. - -The result is returned as a [`Counts`](@ref) object, where each index corresponds to the decimal -representation of the associated microstate. -""" -function histogram(core::RMACore, x, y) - core_type = typeof(core) - x_type = typeof(x) - y_type = typeof(y) - - msg = "`histogram` not implemented for $core_type and input data of types $x_type and $y_type." - throw(ArgumentError(msg)) -end -#......................................................................................... -""" - distribution(core::RMACore, [x], [y]) - -Compute an RMA distribution from the recurrence structure constructed using the input data `[x]` and -`[y]`. +Compute the histogram of recurrence microstates for an abstract recurrence structure constructed +from the input data `[x]` and `[y]`. If `[x]` and `[y]` are identical, the result corresponds to a Recurrence Plot (RP); otherwise, it corresponds to a Cross-Recurrence Plot (CRP). -The `core` argument must be a subtype of [`RMACore`](@ref) and defines how the computation is performed, -including the execution backend (CPU or GPU), the microstate shape, the recurrence expression, and the -sampling mode. - -The result is returned as a [`Probabilities`](@ref) object, where each index corresponds to the decimal -representation of the associated microstate. - -Internally, `distribution` calls [`histogram`](@ref) and converts the resulting counts into -probabilities. +The result is returned as a `Vector{Int}` representing the histogram of recurrence +microstates for the given input data. + +### Arguments +- `rmspace`: A [`RecurrenceMicrostates`](@ref) which defines the outcome space. +- `[x]`: Input data provided as a [`StateSpaceSet`](@ref) or an `AbstractArray`. If using a [`GPUCore`](@ref) the input must be an `AbstractGPUVector`. +- `[y]`: Input data provided as a [`StateSpaceSet`](@ref) or an `AbstractArray`. If using a [`GPUCore`](@ref) the input must be an `AbstractGPUVector`. + +!!! note + [`StateSpaceSet`](@ref) and `AbstractArray` inputs use different internal backends and therefore + different histogram implementations. Both interfaces share the same method signature, differing + only in the input data representation. + +!!! info + We strongly recommend to use [`StateSpaceSet`](@ref) as input for time series. However, if given + a `Vector{Real}` as input it is accepted and converted internally to a [`StateSpaceSet`](@ref). + +### Keyword Arguments +If using a [`CPUCore`](@ref): +- `threads`: Number of threads used to compute the histogram. By default, this is set to + `Threads.nthreads()`, which can be specified at Julia startup using `--threads N` or via the + `JULIA_NUM_THREADS` environment variable. + +If using a [`GPUCore`](@ref): +- `groupsize`: Number of threads per GPU workgroup. + +### Examples using [`CPUCore`](@ref) +- Time series: +```julia +ssset = StateSpaceSet(rand(Float64, (1000))) +rmspace = RecurrenceMicrostates(0.27, 3) +dist = histogram(rmspace, ssset) +``` + +- Spatial data: +```julia +spatialdata = rand(Float64, (3, 50, 50)) +rmspace = RecurrenceMicrostates(0.27, RectMicrostate((2, 1, 2, 1))) +dist = histogram(rmspace, spatialdata) +``` + +### Examples using [`GPUCore`](@ref) +```julia +using CUDA +gpudata = StateSpaceSet(Float32.(data)) |> CuVector +core = GPUCore(CUDABackend(), Rect(Standard(0.27f0; metric = GPUEuclidean()), 2), SRandom(0.05)) +dist = histogram(core, gpudata, gpudata) +``` + +!!! note + The resulting histogram is copied from GPU memory back to the CPU. """ -function distribution(core::RMACore, x, y) - core_type = typeof(core) - x_type = typeof(x) - y_type = typeof(y) - - msg = "`distribution` not implemented for $core_type and input data of types $x_type and $y_type." - throw(ArgumentError(msg)) +function histogram() + throw(ArgumentError("`histogram` is not implemented without arguments.")) end ########################################################################################## \ No newline at end of file diff --git a/src/core/complexity_measures_interface.jl b/src/core/complexity_measures_interface.jl index d79905b..d057a76 100644 --- a/src/core/complexity_measures_interface.jl +++ b/src/core/complexity_measures_interface.jl @@ -1,3 +1,22 @@ + +########################################################################################## +# Interface with ComplexityMeasures.jl +########################################################################################## +function ComplexityMeasures.counts_and_outcomes(rmspace::RecurrenceMicrostates, x, y = x) + counts = histogram(rmspace, x, y) + outcomes = eachindex(counts) + return Counts(counts, outcomes) +end + +function ComplexityMeasures.codify(rmspace::RecurrenceMicrostates, x, y = x) + return histogram(rmspace, x, y) +end + +function ComplexityMeasures.outcome_space(rmspace::RecurrenceMicrostates, x, y = x) + return eachindex(1:get_histogram_size(rmspace.shape)) +end + +#= # Define outcome space struct RecurrenceMicrostates{MS<:MicrostateShape, RE <: RecurrenceExpression, SM<:SamplingMode, C<:Core} <: CountBasedOutcomeSpace @@ -44,6 +63,7 @@ end function ComplexityMeasures.outcome_space(rmspace::RecurrenceMicrostates, x, y = x) # TODO end +=# # The rest is taken care of by ComplexityMeasures.jl. Including `entropy(...)`. diff --git a/src/core/cpu_core.jl b/src/core/cpu_core.jl index cba822b..bf67960 100644 --- a/src/core/cpu_core.jl +++ b/src/core/cpu_core.jl @@ -4,43 +4,12 @@ export CPUCore, StandardCPUCore # RMACore: CPU ########################################################################################## """ - CPUCore{M<:MicrostateShape, S<:SamplingMode} <: RMACore + CPUCore <: RMACore -Abstract CPU backend that implements the **RecurrenceMicrostatesAnalysis.jl** execution pipeline on +Type which represents the pipeline executed by **RecurrenceMicrostatesAnalysis.jl** on central processing units. - -The package provides a default implementation via [`StandardCPUCore`](@ref). - -Concrete subtypes of `CPUCore` must define the following fields: -- `shape`: the [`MicrostateShape`](@ref) used to construct microstates. -- `sampling`: the [`SamplingMode`](@ref) used to sample the recurrence space. - -# Implementations -- [`StandardCPUCore`](@ref) """ -abstract type CPUCore{M<:MicrostateShape, S<:SamplingMode} <: RMACore end -#......................................................................................... -""" - StandardCPUCore{M<:MicrostateShape, S<:SamplingMode} <: CPUCore{M, S} - -Default CPU backend implementation for **RecurrenceMicrostatesAnalysis.jl**. - -This type provides the standard execution pipeline for computing recurrence microstate distributions -on CPU devices. - -# Initialization -```julia -core = CPUCore(shape, sampling) -``` -""" -struct StandardCPUCore{M<:MicrostateShape, S<:SamplingMode} <: CPUCore{M, S} - shape::M - sampling::S -end - - -#......................................................................................... -CPUCore(shape::M, sampling::S) where {M<:MicrostateShape, S<:SamplingMode} = StandardCPUCore(shape, sampling) +struct CPUCore <: RMACore end ########################################################################################## # Implementation: compute_motif @@ -71,64 +40,24 @@ end ########################################################################################## # Based on time series: (CPU) #......................................................................................... -""" - histogram(core::StandardCPUCore, [x], [y]; kwargs...) - -Compute the histogram of recurrence microstates for an abstract recurrence structure constructed -from the input data `[x]` and `[y]`. - -If `[x]` and `[y]` are identical, the result corresponds to a Recurrence Plot (RP); otherwise, it -corresponds to a Cross-Recurrence Plot (CRP). - -The result is returned as a [`Counts`](@ref) object representing the histogram of recurrence -microstates for the given input data. - -This method implements the CPU backend using a [`CPUCore`](@ref), specifically the -[`StandardCPUCore`](@ref) implementation. - -### Arguments -- `core`: A [`StandardCPUCore`](@ref) defining the CPU backend configuration. -- `[x]`: Input data provided as a [`StateSpaceSet`](@ref) or an `AbstractArray`. -- `[y]`: Input data provided as a [`StateSpaceSet`](@ref) or an `AbstractArray`. - -!!! note - [`StateSpaceSet`](@ref) and `AbstractArray` inputs use different internal backends and therefore - different histogram implementations. Both interfaces share the same method signature, differing - only in the input data representation. -### Keyword Arguments -- `threads`: Number of threads used to compute the histogram. By default, this is set to - `Threads.nthreads()`, which can be specified at Julia startup using `--threads N` or via the - `JULIA_NUM_THREADS` environment variable. - -### Examples -- Time series: -```julia -ssset = StateSpaceSet(rand(Float64, (1000))) -core = CPUCore(Rect(Standard(0.27), 2), SRandom(0.05)) -dist = histogram(core, ssset, ssset) -``` - -- Spatial data: -```julia -spatialdata = rand(Float64, (3, 50, 50)) -core = CPUCore(Rect(Standard(0.5), (2, 2, 1, 1)), SRandom(0.05)) -dist = histogram(core, spatialdata, spatialdata) -``` -""" function histogram( - core::StandardCPUCore, - x::StateSpaceSet, - y::StateSpaceSet; + rmspace::RecurrenceMicrostates{MS, RE, SM, C}, + x::Union{StateSpaceSet, Vector{<: Real}}, + y::Union{StateSpaceSet, Vector{<: Real}} = x; threads = Threads.nthreads() -) +) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: CPUCore} + + if (x isa Vector); x = StateSpaceSet(x); end + if (y isa Vector); y = StateSpaceSet(y); end + # Info - space = SamplingSpace(core.shape, x, y) - samples = get_num_samples(core.sampling, space) + space = SamplingSpace(rmspace.shape, x, y) + samples = get_num_samples(rmspace.sampling, space) # Allocate memory - pv = get_power_vector(core, core.shape) - offsets = get_offsets(core, core.shape) + pv = get_power_vector(rmspace.core, rmspace.shape) + offsets = get_offsets(rmspace.core, rmspace.shape) # Compute the histogram chunk = ceil(Int, samples / threads) @@ -136,15 +65,15 @@ function histogram( for t in 1:threads tasks[t] = Threads.@spawn begin - local_hist = zeros(Int, get_histogram_size(core.shape)) + local_hist = zeros(Int, get_histogram_size(rmspace.shape)) local_rng = TaskLocalRNG() start = (t - 1) * chunk + 1 stop = min(t * chunk, samples) for m in start:stop - i, j = get_sample(core, core.sampling, space, local_rng, m) - idx = compute_motif(core.shape.expr, x, y, i, j, pv, offsets) + i, j = get_sample(rmspace.core, rmspace.sampling, space, local_rng, m) + idx = compute_motif(rmspace.expr, x, y, i, j, pv, offsets) @inbounds local_hist[idx] += 1 end @@ -153,27 +82,25 @@ function histogram( end res = reduce(+, fetch.(tasks)) - out = eachindex(res) - - return Counts(res, out) + return res end #......................................................................................... # Based on spatial data: (CPU only) #......................................................................................... function histogram( - core::StandardCPUCore, + rmspace::RecurrenceMicrostates{MS, RE, SM, C}, x::AbstractArray{<: Real}, - y::AbstractArray{<: Real}; + y::AbstractArray{<: Real} = x; threads = Threads.nthreads() -) +) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: CPUCore} # Info - space = SamplingSpace(core.shape, x, y) - samples = get_num_samples(core.sampling, space) + space = SamplingSpace(rmspace.shape, x, y) + samples = get_num_samples(rmspace.sampling, space) dim_x = ndims(x) - 1 dim_y = ndims(y) - 1 # Allocate memory - pv = get_power_vector(core, core.shape) + pv = get_power_vector(rmspace.core, rmspace.shape) # Compute the histogram chunk = ceil(Int, samples / threads) @@ -181,7 +108,7 @@ function histogram( for t in 1:threads tasks[t] = Threads.@spawn begin - local_hist = zeros(Int, get_histogram_size(core.shape)) + local_hist = zeros(Int, get_histogram_size(rmspace.shape)) local_rng = TaskLocalRNG() start = (t - 1) * chunk + 1 @@ -191,8 +118,8 @@ function histogram( itr = zeros(Int, dim_x + dim_y) for m in start:stop - get_sample(core, core.sampling, space, idx, local_rng, m) - i = compute_motif(core.shape, x, y, idx, itr, pv) + get_sample(rmspace.core, rmspace.sampling, space, idx, local_rng, m) + i = compute_motif(rmspace.shape, rmspace.expr, x, y, idx, itr, pv) @inbounds local_hist[i] += 1 end @@ -201,9 +128,7 @@ function histogram( end res = reduce(+, fetch.(tasks)) - out = eachindex(res) - - return Counts(res, out) + return res end ########################################################################################## diff --git a/src/core/gpu/gpu_core.jl b/src/core/gpu/gpu_core.jl index a48f57f..d0ba474 100644 --- a/src/core/gpu/gpu_core.jl +++ b/src/core/gpu/gpu_core.jl @@ -4,46 +4,21 @@ export GPUCore, StandardGPUCore # RMACore: GPU ########################################################################################## """ - GPUCore{B, M<:MicrostateShape, S<:SamplingMode} <: RMACore + GPUCore{B} <: RMACore -Abstract GPU backend that implements the **RecurrenceMicrostatesAnalysis.jl** execution pipeline on -graphics processing units. +Type which represents the pipeline executed by **RecurrenceMicrostatesAnalysis.jl** on +graphical processing units. -The package provides a default implementation via [`StandardGPUCore`](@ref). - -Concrete subtypes of `GPUCore` must define the following fields: -- `backend`: the GPU backend device (e.g. `CUDABackend`, `MetalBackend`). -- `shape`: the [`MicrostateShape`](@ref) used to construct microstates. -- `sampling`: the [`SamplingMode`](@ref) used to sample the recurrence space. - -# Implementations -- [`StandardGPUCore`](@ref) -""" -abstract type GPUCore{B, M<:MicrostateShape, S<:SamplingMode} <: RMACore end - -########################################################################################## -""" - StandardGPUCore{B, M<:MicrostateShape, S<:SamplingMode} <: GPUCore{B, M, S} - -Default GPU backend implementation for **RecurrenceMicrostatesAnalysis.jl**. - -This type provides the standard execution pipeline for computing recurrence microstate distributions -on GPU devices. - -# Initialization +It is initialized using: ```julia -core = GPUCore(backend, shape, sampling) +GPUCore(backend) ``` +Here, `backend` is the GPU device backend, e.g., `MetalBackend`, `CUDABackend`. """ -struct StandardGPUCore{B, M<:MicrostateShape, S<:SamplingMode} <: GPUCore{B, M, S} +struct GPUCore{B} <: RMACore backend::B - shape::M - sampling::S end -########################################################################################## -GPUCore(backend::B, shape::M, sampling::S) where {B, M<:MicrostateShape, S<:SamplingMode} = StandardGPUCore(backend, shape, sampling) - ########################################################################################## # Implementation: compute_motif ########################################################################################## @@ -65,98 +40,61 @@ end ########################################################################################## # Based on time series: (GPU) #......................................................................................... -""" - histogram(core::StandardGPUCore, [x], [y]; kwargs...) - -Compute the histogram of recurrence microstates for an abstract recurrence structure constructed -from the input data `[x]` and `[y]`. - -If `[x]` and `[y]` are identical, the result corresponds to a Recurrence Plot (RP); otherwise, it -corresponds to a Cross-Recurrence Plot (CRP). - -The result is returned as a [`Counts`](@ref) object representing the histogram of recurrence -microstates for the given input data. - -!!! note - The resulting histogram is copied from GPU memory back to the CPU. - -This method implements the GPU backend using a [`GPUCore`](@ref), specifically the -[`StandardGPUCore`](@ref) implementation. - -### Arguments -- `core`: A [`StandardGPUCore`](@ref) defining the GPU backend configuration. -- `[x]`: Input data provided as an `AbstractGPUVector`. -- `[y]`: Input data provided as an `AbstractGPUVector`. - -### Keyword Arguments -- `groupsize`: Number of threads per GPU workgroup. - -### Examples -```julia -using CUDA -gpudata = StateSpaceSet(Float32.(data)) |> CuVector -core = GPUCore(CUDABackend(), Rect(Standard(0.27f0; metric = GPUEuclidean()), 2), SRandom(0.05)) -dist = histogram(core, gpudata, gpudata) -``` -""" function histogram( - core::StandardGPUCore, + rmspace::RecurrenceMicrostates{MS, RE, SM, C}, x::AbstractGPUVector{SVector{N, Float32}}, - y::AbstractGPUVector{SVector{N, Float32}}; + y::AbstractGPUVector{SVector{N, Float32}} = x; groupsize::Int = 256 -) where {N} +) where {N, MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: GPUCore} # Info - space = SamplingSpace(core.shape, x, y) - samples = get_num_samples(core.sampling, space) + space = SamplingSpace(rmspace.shape, x, y) + samples = get_num_samples(rmspace.sampling, space) # Allocate memory - pv = get_power_vector(core, core.shape) - offsets = get_offsets(core, core.shape) + pv = get_power_vector(rmspace.core, rmspace.shape) + offsets = get_offsets(rmspace.core, rmspace.shape) - hist = KernelAbstractions.zeros(core.backend, Int32, get_histogram_size(core.shape)) + hist = KernelAbstractions.zeros(rmspace.core.backend, Int32, get_histogram_size(rmspace.shape)) # Call the kernel - if core.sampling isa Full - gpu_rng = KernelAbstractions.zeros(core.backend, Int32, 1) - gpu_histogram!(core.backend, groupsize)(x, y, pv, offsets, core, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) + if rmspace.sampling isa Full + gpu_rng = KernelAbstractions.zeros(rmspace.core.backend, Int32, 1) + gpu_histogram!(rmspace.core.backend, groupsize)(x, y, pv, offsets, rmspace.core, rmspace.expr, rmspace.sampling, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) else - rng = get_sample(core, core.sampling, space, samples) - gpu_rng = KernelAbstractions.zeros(core.backend, SVector{2,Int32}, samples) + rng = get_sample(rmspace.core, rmspace.sampling, space, samples) + gpu_rng = KernelAbstractions.zeros(rmspace.core.backend, SVector{2,Int32}, samples) copyto!(gpu_rng, rng) - gpu_histogram!(core.backend, groupsize)(x, y, pv, offsets, core, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) + gpu_histogram!(rmspace.core.backend, groupsize)(x, y, pv, offsets, rmspace.core, rmspace.expr, rmspace.sampling, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) end - KernelAbstractions.synchronize(core.backend) + KernelAbstractions.synchronize(rmspace.core.backend) res = hist |> Vector - out = eachindex(res) - - return Counts(Int64.(res), out) + return res end ########################################################################################## # Implementation: GPU Kernels ########################################################################################## -@kernel function gpu_histogram!(x, y, pv, offsets, core, space, samples, hist, rng, n) +@kernel function gpu_histogram!(x, y, pv, offsets, core, expr, sampling, space, samples, hist, rng, n) m = @index(Global) if m <= samples i = zero(Int32) j = zero(Int32) - if core.sampling isa Full - i, j = get_sample(core, core.sampling, space, rng, m) + if sampling isa Full + i, j = get_sample(core, sampling, space, rng, m) else i = rng[m][1] j = rng[m][2] end - idx = gpu_compute_motif(core.shape.expr, x, y, i, j, pv, offsets, n) + idx = gpu_compute_motif(expr, x, y, i, j, pv, offsets, n) Atomix.@atomic hist[idx] += one(Int32) end end - ########################################################################################## # Implementation: distribution ########################################################################################## diff --git a/src/core/recurrence_microstates.jl b/src/core/recurrence_microstates.jl index 6eada46..9edc19b 100644 --- a/src/core/recurrence_microstates.jl +++ b/src/core/recurrence_microstates.jl @@ -6,7 +6,7 @@ export RecurrenceMicrostates """ RecurrenceMicrostates """ -struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: RMACore} <: CountBasedOutcomeSpace +struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: RMACore} <: ComplexityMeasures.CountBasedOutcomeSpace shape::MS expr::RE sampling::SM @@ -16,27 +16,78 @@ end ########################################################################################## # Recurrence Microstate: Convenience constructors ########################################################################################## -function RecurrenceMicrostates(ε::Real, N::Int; sampling_ratio::Real = 0.05, core::RMACore = CPUCore()) +function RecurrenceMicrostates(ε::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) ## If using GPU, assert Float32. if (core isa GPUCore) @assert ε isa Float32 "When using GPU, the threshold must be a Float32." + if (!(metric isa GPUMetric)) + println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") + metric = GPUEuclidean() + end end - shape = Rect(N) - sampling = SRandom(sampling_ratio) - expr = Standard(ε) + shape = RectMicrostate(N) + expr = Standard(ε; metric = metric) return RecurrenceMicrostates(shape, expr, sampling, core) end -function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, core::RMACore = CPUCore()) +function RecurrenceMicrostates(ε::Real, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) + shape = RectMicrostate(structure) + expr = Standard(ε; metric = metric) + return RecurrenceMicrostates(shape, expr, sampling, CPUCore()) +end + +function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) + + ## If using GPU, assert Float32. + if (core isa GPUCore) + @assert ε isa Float32 "When using GPU, the threshold must be a Float32." + if (!(metric isa GPUMetric)) + println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") + metric = GPUEuclidean() + end + end + + expr = Standard(ε; metric = metric) + return RecurrenceMicrostates(shape, expr, sampling, core) +end + +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) + + ## If using GPU, assert Float32. + if (core isa GPUCore) + @assert ε isa Float32 "When using GPU, the threshold must be a Float32." + if (!(metric isa GPUMetric)) + println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") + metric = GPUEuclidean() + end + end + + shape = RectMicrostate(N) + expr = Corridor(ε_min, ε_max; metric = metric) + return RecurrenceMicrostates(shape, expr, sampling, core) +end + +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) + shape = RectMicrostate(structure) + expr = Corridor(ε_min, ε_max; metric = metric) + return RecurrenceMicrostates(shape, expr, sampling, CPUCore()) +end + +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) ## If using GPU, assert Float32. if (core isa GPUCore) @assert ε isa Float32 "When using GPU, the threshold must be a Float32." + if (!(metric isa GPUMetric)) + println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") + metric = GPUEuclidean() + end end - sampling = SRandom(sampling_ratio) - expr = Standard(ε) + expr = Corridor(ε_min, ε_max; metric = metric) return RecurrenceMicrostates(shape, expr, sampling, core) -end \ No newline at end of file +end + +########################################################################################## \ No newline at end of file diff --git a/src/utils/operations/permute_cols.jl b/src/utils/operations/permute_cols.jl index b381dcb..9107722 100644 --- a/src/utils/operations/permute_cols.jl +++ b/src/utils/operations/permute_cols.jl @@ -11,7 +11,7 @@ Operation that permutes the columns of a microstate \$\\mathbf{M}\$. To initialize a `PermuteColumns` operation, a rectangular microstate shape must be provided via a [`Rect`](@ref) structure: ```julia -PermuteColumns(::Rect2{R, C, B, E}; S::Vector{Vector{Int}} = collect(permutations(1:C)) +PermuteColumns(::Rect2Microstate{R, C, B}; S::Vector{Vector{Int}} = collect(permutations(1:C)) ``` Here, the keyword argument `S` defines the set \$S_n\$ of column permutations. The `PermuteColumns` struct precomputes the column permutations for each row of the microstate. @@ -19,8 +19,8 @@ These precomputed permutations can be accessed via the field `Q`. # Examples ```julia -PermuteColumns(Rect(3, 3)) # Microstate 3 x 3 -PermuteColumns(Rest(1, 3)) # Microstate 1 x 3 (it is a line) +PermuteColumns(RectMicrostate(3, 3)) # Microstate 3 x 3 +PermuteColumns(RectMicrostate(1, 3)) # Microstate 1 x 3 (it is a line) ``` This operation is applied via the [`operate`](@ref) function: @@ -41,9 +41,9 @@ struct PermuteColumns{R, C} <: Operation end PermuteColumns( - ::Rect2{R, C, B, E}; + ::Rect2Microstate{R, C, B}; S::Vector{Vector{Int}} = collect(permutations(1:C)) - ) where {R, C, B, E} = PermuteColumns{R, C}(precompute_Q(R, C, S)) + ) where {R, C, B} = PermuteColumns{R, C}(precompute_Q(R, C, S)) ########################################################################################## # Operate a permutation of columns diff --git a/src/utils/operations/permute_rows.jl b/src/utils/operations/permute_rows.jl index e56d9ae..a6789e2 100644 --- a/src/utils/operations/permute_rows.jl +++ b/src/utils/operations/permute_rows.jl @@ -11,13 +11,13 @@ Operation that permutes the rows of a microstate \$\\mathbf{M}\$. To initialize a `PermuteRows` operation, a rectangular microstate shape must be provided via a [`Rect`](@ref) structure: ```julia -PermuteRows(::Rect2{R, C, B, E}) +PermuteRows(::Rect2Microstate{R, C, B, E}) ``` # Examples ```julia -PermuteRows(Rect(3, 3)) # Microstate 3 x 3 -PermuteRows(Rest(3, 1)) # Microstate 3 x 1 (it is a column) +PermuteRows(RectMicrostate(3, 3)) # Microstate 3 x 3 +PermuteRows(RectMicrostate(3, 1)) # Microstate 3 x 1 (it is a column) ``` This operation is applied via the [`operate`](@ref) function: @@ -35,7 +35,7 @@ The resulting microstate binary identifier (1-based). """ struct PermuteRows{R, C} <: Operation end -PermuteRows(::Rect2{R, C, B, E}) where {R, C, B, E} = PermuteRows{R, C}() +PermuteRows(::Rect2Microstate{R, C, B}) where {R, C, B} = PermuteRows{R, C}() ########################################################################################## # Operate a permutation of rows diff --git a/src/utils/operations/transpose.jl b/src/utils/operations/transpose.jl index 68d00de..fcdb5ab 100644 --- a/src/utils/operations/transpose.jl +++ b/src/utils/operations/transpose.jl @@ -11,12 +11,12 @@ Operation that transposes a microstate \$\\mathbf{M}\$. To initialize a `Transpose` operation, a rectangular microstate shape must be provided via a [`Rect`](@ref) structure: ```julia -Transpose(::Rect2{R, C, B, E}) +Transpose(::Rect2Microstate{R, C, B, E}) ``` # Examples ```julia -Transpose(Rect(3, 3)) # 3 x 3 microstate +Transpose(RectMicrostate(3, 3)) # 3 x 3 microstate ``` This operation is applied via the [`operate`](@ref) function: @@ -32,7 +32,7 @@ The resulting microstate decimal identifier (1-based). """ struct Transpose{R, C} <: Operation end -Transpose(::Rect2{R, C, B, E}) where {R, C, B, E} = Transpose{R, C}() +Transpose(::Rect2Microstate{R, C, B}) where {R, C, B} = Transpose{R, C}() ########################################################################################## # Operate a transposition From df674185a72c829a67760ae63f954c334697fa34 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Mon, 2 Feb 2026 14:18:15 -0300 Subject: [PATCH 06/19] remove core dependence; adapt to the new API structure; fix small issues --- src/core/abstract_core.jl | 2 +- src/core/complexity_measures_interface.jl | 14 +- src/core/cpu_core.jl | 24 +- src/core/gpu/gpu_core.jl | 36 +-- src/core/recurrence.jl | 2 +- src/core/recurrence_microstates.jl | 73 ++---- src/core/shape.jl | 5 +- src/recurrences/corridor.jl | 6 +- src/recurrences/standard.jl | 4 +- src/rqa/det.jl | 35 ++- src/rqa/disorder.jl | 45 +++- src/rqa/entropy.jl | 2 +- src/rqa/lam.jl | 23 +- src/rqa/rr.jl | 16 +- src/shapes/diagonal.jl | 2 +- src/utils/opt/threshold.jl | 8 +- test/core/backend.jl | 6 +- test/core/operation.jl | 6 +- test/core/sampling.jl | 4 +- test/core/shape.jl | 6 +- test/distributions.jl | 256 +++++++++++----------- test/rqa/det.jl | 17 +- test/rqa/entropy.jl | 2 +- test/rqa/lam.jl | 16 +- test/rqa/rr.jl | 2 +- test/sampling/full.jl | 21 +- test/sampling/random.jl | 32 +-- test/shapes/diagonal.jl | 14 +- test/shapes/rect.jl | 14 +- test/shapes/triangle.jl | 11 +- test/utils/operations.jl | 6 +- 31 files changed, 388 insertions(+), 322 deletions(-) diff --git a/src/core/abstract_core.jl b/src/core/abstract_core.jl index 5372632..93de4b1 100644 --- a/src/core/abstract_core.jl +++ b/src/core/abstract_core.jl @@ -1,4 +1,4 @@ -export RMACore, histogram +export histogram ########################################################################################## # RMACore diff --git a/src/core/complexity_measures_interface.jl b/src/core/complexity_measures_interface.jl index d057a76..004b6d3 100644 --- a/src/core/complexity_measures_interface.jl +++ b/src/core/complexity_measures_interface.jl @@ -5,7 +5,7 @@ function ComplexityMeasures.counts_and_outcomes(rmspace::RecurrenceMicrostates, x, y = x) counts = histogram(rmspace, x, y) outcomes = eachindex(counts) - return Counts(counts, outcomes) + return counts, outcomes end function ComplexityMeasures.codify(rmspace::RecurrenceMicrostates, x, y = x) @@ -16,6 +16,18 @@ function ComplexityMeasures.outcome_space(rmspace::RecurrenceMicrostates, x, y = return eachindex(1:get_histogram_size(rmspace.shape)) end +## +## Needed to CRP +function ComplexityMeasures.probabilities(o::RecurrenceMicrostates, x, y) + return first(probabilities_and_outcomes(o, x, y)) +end + +function ComplexityMeasures.probabilities_and_outcomes(o::RecurrenceMicrostates, x, y) + cts, outs = counts_and_outcomes(o, x, y) + probs = Probabilities(cts, outs) + return probs, outcomes(probs) +end + #= # Define outcome space diff --git a/src/core/cpu_core.jl b/src/core/cpu_core.jl index bf67960..7c8326d 100644 --- a/src/core/cpu_core.jl +++ b/src/core/cpu_core.jl @@ -1,5 +1,3 @@ -export CPUCore, StandardCPUCore - ########################################################################################## # RMACore: CPU ########################################################################################## @@ -42,22 +40,24 @@ end #......................................................................................... function histogram( - rmspace::RecurrenceMicrostates{MS, RE, SM, C}, + rmspace::RecurrenceMicrostates{MS, RE, SM}, x::Union{StateSpaceSet, Vector{<: Real}}, y::Union{StateSpaceSet, Vector{<: Real}} = x; threads = Threads.nthreads() -) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: CPUCore} +) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} if (x isa Vector); x = StateSpaceSet(x); end if (y isa Vector); y = StateSpaceSet(y); end + core = CPUCore() + # Info space = SamplingSpace(rmspace.shape, x, y) samples = get_num_samples(rmspace.sampling, space) # Allocate memory - pv = get_power_vector(rmspace.core, rmspace.shape) - offsets = get_offsets(rmspace.core, rmspace.shape) + pv = get_power_vector(core, rmspace.shape) + offsets = get_offsets(core, rmspace.shape) # Compute the histogram chunk = ceil(Int, samples / threads) @@ -72,7 +72,7 @@ function histogram( stop = min(t * chunk, samples) for m in start:stop - i, j = get_sample(rmspace.core, rmspace.sampling, space, local_rng, m) + i, j = get_sample(core, rmspace.sampling, space, local_rng, m) idx = compute_motif(rmspace.expr, x, y, i, j, pv, offsets) @inbounds local_hist[idx] += 1 end @@ -88,19 +88,21 @@ end # Based on spatial data: (CPU only) #......................................................................................... function histogram( - rmspace::RecurrenceMicrostates{MS, RE, SM, C}, + rmspace::RecurrenceMicrostates{MS, RE, SM}, x::AbstractArray{<: Real}, y::AbstractArray{<: Real} = x; threads = Threads.nthreads() -) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: CPUCore} +) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} # Info space = SamplingSpace(rmspace.shape, x, y) samples = get_num_samples(rmspace.sampling, space) dim_x = ndims(x) - 1 dim_y = ndims(y) - 1 + core = CPUCore() + # Allocate memory - pv = get_power_vector(rmspace.core, rmspace.shape) + pv = get_power_vector(core, rmspace.shape) # Compute the histogram chunk = ceil(Int, samples / threads) @@ -118,7 +120,7 @@ function histogram( itr = zeros(Int, dim_x + dim_y) for m in start:stop - get_sample(rmspace.core, rmspace.sampling, space, idx, local_rng, m) + get_sample(core, rmspace.sampling, space, idx, local_rng, m) i = compute_motif(rmspace.shape, rmspace.expr, x, y, idx, itr, pv) @inbounds local_hist[i] += 1 end diff --git a/src/core/gpu/gpu_core.jl b/src/core/gpu/gpu_core.jl index d0ba474..af4c3b5 100644 --- a/src/core/gpu/gpu_core.jl +++ b/src/core/gpu/gpu_core.jl @@ -1,5 +1,3 @@ -export GPUCore, StandardGPUCore - ########################################################################################## # RMACore: GPU ########################################################################################## @@ -15,9 +13,7 @@ GPUCore(backend) ``` Here, `backend` is the GPU device backend, e.g., `MetalBackend`, `CUDABackend`. """ -struct GPUCore{B} <: RMACore - backend::B -end +struct GPUCore <: RMACore end ########################################################################################## # Implementation: compute_motif @@ -41,35 +37,39 @@ end # Based on time series: (GPU) #......................................................................................... function histogram( - rmspace::RecurrenceMicrostates{MS, RE, SM, C}, - x::AbstractGPUVector{SVector{N, Float32}}, - y::AbstractGPUVector{SVector{N, Float32}} = x; + rmspace::RecurrenceMicrostates{MS, <: RecurrenceExpression{T, M}, SM}, + x::AbstractGPUVector{SVector{N, T}}, + y::AbstractGPUVector{SVector{N, T}} = x; groupsize::Int = 256 -) where {N, MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: GPUCore} +) where {MS <: MicrostateShape, SM <: SamplingMode, N, T <: Real, M <: GPUMetric} + + # Get backend. + backend = KernelAbstractions.get_backend(x) + core = GPUCore() # Info space = SamplingSpace(rmspace.shape, x, y) samples = get_num_samples(rmspace.sampling, space) # Allocate memory - pv = get_power_vector(rmspace.core, rmspace.shape) - offsets = get_offsets(rmspace.core, rmspace.shape) + pv = get_power_vector(core, rmspace.shape) + offsets = get_offsets(core, rmspace.shape) - hist = KernelAbstractions.zeros(rmspace.core.backend, Int32, get_histogram_size(rmspace.shape)) + hist = KernelAbstractions.zeros(backend, Int32, get_histogram_size(rmspace.shape)) # Call the kernel if rmspace.sampling isa Full - gpu_rng = KernelAbstractions.zeros(rmspace.core.backend, Int32, 1) - gpu_histogram!(rmspace.core.backend, groupsize)(x, y, pv, offsets, rmspace.core, rmspace.expr, rmspace.sampling, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) + gpu_rng = KernelAbstractions.zeros(backend, Int32, 1) + gpu_histogram!(backend, groupsize)(x, y, pv, offsets, core, rmspace.expr, rmspace.sampling, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) else - rng = get_sample(rmspace.core, rmspace.sampling, space, samples) - gpu_rng = KernelAbstractions.zeros(rmspace.core.backend, SVector{2,Int32}, samples) + rng = get_sample(core, rmspace.sampling, space, samples) + gpu_rng = KernelAbstractions.zeros(backend, SVector{2,Int32}, samples) copyto!(gpu_rng, rng) - gpu_histogram!(rmspace.core.backend, groupsize)(x, y, pv, offsets, rmspace.core, rmspace.expr, rmspace.sampling, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) + gpu_histogram!(backend, groupsize)(x, y, pv, offsets, core, rmspace.expr, rmspace.sampling, space, Int32(samples), hist, gpu_rng, Int32(N); ndrange = samples) end - KernelAbstractions.synchronize(rmspace.core.backend) + KernelAbstractions.synchronize(backend) res = hist |> Vector return res end diff --git a/src/core/recurrence.jl b/src/core/recurrence.jl index 80efbe0..9139b23 100644 --- a/src/core/recurrence.jl +++ b/src/core/recurrence.jl @@ -15,7 +15,7 @@ which defines how recurrence between two states is evaluated. - [`Standard`](@ref) - [`Corridor`](@ref) """ -abstract type RecurrenceExpression end +abstract type RecurrenceExpression{T, M} end ########################################################################################## # Function: recurrence diff --git a/src/core/recurrence_microstates.jl b/src/core/recurrence_microstates.jl index 9edc19b..d5aab27 100644 --- a/src/core/recurrence_microstates.jl +++ b/src/core/recurrence_microstates.jl @@ -6,88 +6,61 @@ export RecurrenceMicrostates """ RecurrenceMicrostates """ -struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode, C <: RMACore} <: ComplexityMeasures.CountBasedOutcomeSpace +struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} <: ComplexityMeasures.CountBasedOutcomeSpace shape::MS expr::RE sampling::SM - core::C end ########################################################################################## # Recurrence Microstate: Convenience constructors ########################################################################################## -function RecurrenceMicrostates(ε::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) - - ## If using GPU, assert Float32. - if (core isa GPUCore) - @assert ε isa Float32 "When using GPU, the threshold must be a Float32." - if (!(metric isa GPUMetric)) - println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") - metric = GPUEuclidean() - end - end +function RecurrenceMicrostates(expr::RecurrenceExpression, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio)) + shape = RectMicrostate(N) + return RecurrenceMicrostates(shape, expr, sampling) +end +function RecurrenceMicrostates(ε::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) shape = RectMicrostate(N) expr = Standard(ε; metric = metric) - return RecurrenceMicrostates(shape, expr, sampling, core) + return RecurrenceMicrostates(shape, expr, sampling) +end + +function RecurrenceMicrostates(expr::RecurrenceExpression, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio)) + shape = RectMicrostate(structure) + return RecurrenceMicrostates(shape, expr, sampling) end function RecurrenceMicrostates(ε::Real, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) shape = RectMicrostate(structure) expr = Standard(ε; metric = metric) - return RecurrenceMicrostates(shape, expr, sampling, CPUCore()) + return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) - - ## If using GPU, assert Float32. - if (core isa GPUCore) - @assert ε isa Float32 "When using GPU, the threshold must be a Float32." - if (!(metric isa GPUMetric)) - println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") - metric = GPUEuclidean() - end - end +function RecurrenceMicrostates(expr::RecurrenceExpression, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio)) + return RecurrenceMicrostates(shape, expr, sampling) +end +function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) expr = Standard(ε; metric = metric) - return RecurrenceMicrostates(shape, expr, sampling, core) + return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε_min::Real, ε_max::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) - - ## If using GPU, assert Float32. - if (core isa GPUCore) - @assert ε isa Float32 "When using GPU, the threshold must be a Float32." - if (!(metric isa GPUMetric)) - println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") - metric = GPUEuclidean() - end - end - +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) shape = RectMicrostate(N) expr = Corridor(ε_min, ε_max; metric = metric) - return RecurrenceMicrostates(shape, expr, sampling, core) + return RecurrenceMicrostates(shape, expr, sampling) end function RecurrenceMicrostates(ε_min::Real, ε_max::Real, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) shape = RectMicrostate(structure) expr = Corridor(ε_min, ε_max; metric = metric) - return RecurrenceMicrostates(shape, expr, sampling, CPUCore()) + return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), core::RMACore = CPUCore(), metric::Metric = DEFAULT_METRIC) - - ## If using GPU, assert Float32. - if (core isa GPUCore) - @assert ε isa Float32 "When using GPU, the threshold must be a Float32." - if (!(metric isa GPUMetric)) - println("Warning: GPU backend must use a GPUMetric to evaluate distance. It will use a GPUEuclidean to avoid errors.") - metric = GPUEuclidean() - end - end - +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) expr = Corridor(ε_min, ε_max; metric = metric) - return RecurrenceMicrostates(shape, expr, sampling, core) + return RecurrenceMicrostates(shape, expr, sampling) end ########################################################################################## \ No newline at end of file diff --git a/src/core/shape.jl b/src/core/shape.jl index 8f450af..374958a 100644 --- a/src/core/shape.jl +++ b/src/core/shape.jl @@ -29,8 +29,9 @@ abstract type MicrostateShape end #......................................................................................... function compute_motif( shape::MicrostateShape, - x::AbstractArray{<: Real}, - y::AbstractArray{<: Real}, + ::RecurrenceExpression, + ::AbstractArray{<: Real}, + ::AbstractArray{<: Real}, ::Vector{Int}, ::Vector{Int}, ::SVector{N, Int} diff --git a/src/recurrences/corridor.jl b/src/recurrences/corridor.jl index 3121699..d2358f2 100644 --- a/src/recurrences/corridor.jl +++ b/src/recurrences/corridor.jl @@ -32,9 +32,9 @@ Corridor(0.05, 0.27; metric = Cityblock()) The recurrence evaluation is performed via the [`recurrence`](@ref) function. For GPU execution, the corresponding implementation is provided by `gpu_recurrence`. """ -struct Corridor{F <: Real, M <: Metric} <: RecurrenceExpression - ε_min::F - ε_max::F +struct Corridor{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} + ε_min::T + ε_max::T metric::M end #......................................................................................... diff --git a/src/recurrences/standard.jl b/src/recurrences/standard.jl index dc4e1d7..13d67ba 100644 --- a/src/recurrences/standard.jl +++ b/src/recurrences/standard.jl @@ -31,8 +31,8 @@ Standard(0.27; metric = Cityblock()) The recurrence evaluation is performed via the [`recurrence`](@ref) function. For GPU execution, the corresponding implementation is provided by `gpu_recurrence`. """ -struct Standard{F <: Real, M <: Metric} <: RecurrenceExpression - ε::F +struct Standard{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} + ε::T metric::M end #......................................................................................... diff --git a/src/rqa/det.jl b/src/rqa/det.jl index 1b1647a..9569a3f 100644 --- a/src/rqa/det.jl +++ b/src/rqa/det.jl @@ -14,7 +14,7 @@ function. # Using a distribution ```julia -measure(::Determinism, dist::Probabilities) +measure(::Determinism, rmspace::RecurrenceMicrostates, dist::Probabilities) ``` ## Arguments @@ -29,16 +29,18 @@ A `Float64` corresponding to the estimated determinism. ```julia using RecurrenceMicrostatesAnalysis, Distributions data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, 0.27, 3) -det = measure(Determinism(), dist) +rmspace = RecurrenceMicrostates(0.27, 3) +dist = probabilities(rmspace, data) +det = measure(Determinism(), rmspace, dist) ``` ### Using diagonal microstates ```julia using RecurrenceMicrostatesAnalysis, Distributions data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, Diagonal(Standard(0.27), 3)) -det = measure(Determinism(), dist) +rmspace = RecurrenceMicrostates(0.27, Diagonal(3)) +dist = probabilities(rmspace, data) +det = measure(Determinism(), rmspace, dist) ``` # Using a time series @@ -73,8 +75,12 @@ struct Determinism <: QuantificationMeasure end ########################################################################################## # Using as input a RMA distribution. #......................................................................................... -function measure(::Determinism, dist::Probabilities) - if (length(dist) == 512) +function measure( + ::Determinism, + rmspace::RecurrenceMicrostates, + dist::Probabilities + ) + if (rmspace.shape isa Rect2Microstate{3, 3, 2} && length(dist) == 512) rr = measure(RecurrenceRate(), dist) values = zeros(Int, 64) v_idx = 1 @@ -92,7 +98,7 @@ function measure(::Determinism, dist::Probabilities) return 1 - ((1/rr) * pl) - elseif (length(dist) == 8) + elseif (rmspace.shape isa DiagonalMicrostate{3, 2} && length(dist) == 8) rr = measure(RecurrenceRate(), dist) return 1 - ((1/rr) * dist[3]) else @@ -103,9 +109,16 @@ end #......................................................................................... # Using as input a time series #......................................................................................... -function measure(::Determinism, x::StateSpaceSet; threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, 3)[1]) - dist = distribution(x, Diagonal(Standard(threshold), 3)) - measure(Determinism(), dist) +function measure( + op::Determinism, + x::Union{StateSpaceSet, Vector{<:Real}}, + y::Union{StateSpaceSet, Vector{<:Real}} = x; + threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, 3)[1], + metric::Metric = DEFAULT_METRIC + ) + rmspace = RecurrenceMicrostates(threshold, DiagonalMicrostate(3); metric = metric) + dist = probabilities(rmspace, x, y) + measure(op, rmspace, dist) end ########################################################################################## \ No newline at end of file diff --git a/src/rqa/disorder.jl b/src/rqa/disorder.jl index c13af16..cd53a99 100644 --- a/src/rqa/disorder.jl +++ b/src/rqa/disorder.jl @@ -86,29 +86,49 @@ function measure(settings::Disorder{N}, probs::Probabilities, norm_param::Int) w return total_entropy / norm_param end #......................................................................................... -function measure(settings::Disorder{N}, x::StateSpaceSet; th::Float64 = optimize(Threshold(), Disorder(N), x)[1], th_min::Float64 = 0.85 * th, th_max::Float64 = 1.25 * th, num_tests::Int = 40) where {N} +function measure( + settings::Disorder{N}, + x::Union{StateSpaceSet, Vector{<:Real}}, + y::Union{StateSpaceSet, Vector{<:Real}} = x; + th::Float64 = optimize(Threshold(), Disorder(N), x)[1], + th_min::Float64 = 0.85 * th, + th_max::Float64 = 1.25 * th, + num_tests::Int = 40, + metric::Metric = DEFAULT_METRIC + ) where {N} A = get_disorder_norm_factor(settings, x) + Ay = get_disorder_norm_factor(settings, y) + A = Ay > A ? Ay : A + values = zeros(typeof(th), num_tests) th_range = range(th_min, th_max, num_tests) for i in eachindex(th_range) - probs = distribution(x, th_range[i], N; sampling = Full()) + rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = metric) + probs = probabilities(rmspace, x, y) values[i] = measure(settings, probs, A) end return maximum(values) end #......................................................................................... -function measure(settings::Disorder{N}, dataset::Vector{<:AbstractGPUVector{SVector{D, Float32}}}, th_min::Float32, th_max::Float32; num_tests::Int = 40, metric::GPUMetric = GPUEuclidean()) where {N, D} +function measure( + settings::Disorder{N}, + dataset::Vector{<:AbstractGPUVector{SVector{D, Float32}}}, + th_min::Float32, + th_max::Float32; + num_tests::Int = 40, + metric::GPUMetric = GPUEuclidean() + ) where {N, D} A = _norm_factor(Val(N), Val(D)) values = zeros(Float32, num_tests, length(dataset)) th_range = Float32.(range(th_min, th_max, num_tests)) backend = get_backend(dataset[1]) for i ∈ eachindex(th_range) - core = GPUCore(backend, Rect(Standard(th_range[i]; metric = metric), N), Full()) + rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = metric, core = GPUCore(backend)) for j in eachindex(dataset) - probs = distribution(core, dataset[j], dataset[j]) + probs = probabilities(rmspace, dataset[j]) values[i, j] = measure(settings, probs, A) end end @@ -122,15 +142,22 @@ function measure(settings::Disorder{N}, dataset::Vector{<:AbstractGPUVector{SVec return results end #......................................................................................... -function measure(settings::Disorder{N}, dataset::Vector{StateSpaceSet}, th_min::Float64, th_max::Float64; num_tests::Int = 40, metric::Metric = DEFAULT_METRIC) where {N} +function measure( + settings::Disorder{N}, + dataset::Vector{StateSpaceSet}, + th_min::Float64, + th_max::Float64; + num_tests::Int = 40, + metric::Metric = DEFAULT_METRIC + ) where {N} A = get_disorder_norm_factor(settings, dataset[1]) values = zeros(Float64, num_tests, length(dataset)) th_range = range(th_min, th_max, num_tests) for i ∈ eachindex(th_range) - core = CPUCore(Rect(Standard(th_range[i]; metric = metric), N), Full()) + rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = metric) for j in eachindex(dataset) - probs = distribution(core, dataset[j], dataset[j]) + probs = probabilities(rmspace, dataset[j]) values[i, j] = measure(settings, probs, A) end end @@ -159,7 +186,7 @@ _norm_factor(::Val{5}, ::Val{D}) where D = D > 1 ? throw(ArgumentError("Disorder ########################################################################################## function compute_labels(N::Int) S = collect(permutations(1:N)) - shape = Rect(Standard(0.27), N) + shape = RectMicrostate(N) row_permutation = PermuteRows(shape) col_permutation = PermuteColumns(shape; S = S) diff --git a/src/rqa/entropy.jl b/src/rqa/entropy.jl index 3f12404..f91e537 100644 --- a/src/rqa/entropy.jl +++ b/src/rqa/entropy.jl @@ -63,7 +63,7 @@ end #......................................................................................... # Using as input a time series #......................................................................................... -function measure(::RecurrenceEntropy, x::StateSpaceSet; N::Integer = 3) +function measure(::RecurrenceEntropy, x::Union{StateSpaceSet, Vector{<:Real}}; N::Integer = 3) return optimize(Threshold(), RecurrenceEntropy(), x, N)[2] end diff --git a/src/rqa/lam.jl b/src/rqa/lam.jl index a863d23..3d4702a 100644 --- a/src/rqa/lam.jl +++ b/src/rqa/lam.jl @@ -74,8 +74,12 @@ struct Laminarity <: QuantificationMeasure end ########################################################################################## # Using as input a RMA distribution. #......................................................................................... -function measure(::Laminarity, dist::Probabilities) - if (length(dist) == 512) +function measure( + ::Laminarity, + rmspace::RecurrenceMicrostates, + dist::Probabilities + ) + if (rmspace.shape isa Rect2Microstate{3, 3, 2} && length(dist) == 512) rr = measure(RecurrenceRate(), dist) values = zeros(Int, 64) @@ -94,7 +98,7 @@ function measure(::Laminarity, dist::Probabilities) return 1 - ((1/rr) * pl) - elseif (length(dist) == 8) + elseif (rmspace.shape isa Rect2Microstate{1, 3} && length(dist) == 8) rr = measure(RecurrenceRate(), dist) return 1 - ((1/rr) * dist[3]) else @@ -105,9 +109,16 @@ end #......................................................................................... # Using as input a time series #......................................................................................... -function measure(::Laminarity, x::StateSpaceSet; threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, 3)[1]) - dist = distribution(x, Rect(Standard(threshold); rows = 1, cols = 3)) - measure(Laminarity(), dist) +function measure( + op::Laminarity, + x::Union{StateSpaceSet, Vector{<:Real}}, + y::Union{StateSpaceSet, Vector{<:Real}} = x; + threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, 3)[1], + metric::Metric = DEFAULT_METRIC + ) + rmspace = RecurrenceMicrostates(threshold, RectMicrostate(1, 3); metric = metric) + dist = probabilities(rmspace, x, y) + measure(op, rmspace, dist) end ########################################################################################## \ No newline at end of file diff --git a/src/rqa/rr.jl b/src/rqa/rr.jl index 2d82178..f57b7d2 100644 --- a/src/rqa/rr.jl +++ b/src/rqa/rr.jl @@ -60,7 +60,10 @@ struct RecurrenceRate <: QuantificationMeasure end ########################################################################################## # Using as input a RMA distribution. #......................................................................................... -function measure(::RecurrenceRate, dist::Probabilities) +function measure( + ::RecurrenceRate, + dist::Probabilities + ) result = 0.0 hv = Int(log2(length(dist))) @@ -74,8 +77,15 @@ end #......................................................................................... # Using as input a time series #......................................................................................... -function measure(::RecurrenceRate, x::StateSpaceSet; n::Integer = 3, threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, n)[1]) - dist = distribution(x, threshold, n) +function measure( + ::RecurrenceRate, + x::Union{StateSpaceSet, Vector{<:Real}}, + y::Union{StateSpaceSet, Vector{<:Real}} = x; + n::Integer = 3, + threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, n)[1], + metric::Metric = DEFAULT_METRIC + ) + dist = probabilities(RecurrenceMicrostates(threshold, n; metric = metric), x, y) return measure(RecurrenceRate(), dist) end diff --git a/src/shapes/diagonal.jl b/src/shapes/diagonal.jl index d474dd2..45e0b73 100644 --- a/src/shapes/diagonal.jl +++ b/src/shapes/diagonal.jl @@ -30,7 +30,7 @@ diagonal = DiagonalMicrostate(expr, 3) """ struct DiagonalMicrostate{N, B} <: MicrostateShape end -DiagonalMicrostate(N::Int; B::Int = 2) = DiagonalMicrostate{N, B}(expr) +DiagonalMicrostate(N::Int; B::Int = 2) = DiagonalMicrostate{N, B}() ########################################################################################## # Implementations: SamplingSpace diff --git a/src/utils/opt/threshold.jl b/src/utils/opt/threshold.jl index 37f2f75..88554e1 100644 --- a/src/utils/opt/threshold.jl +++ b/src/utils/opt/threshold.jl @@ -54,7 +54,7 @@ function optimize( ::Threshold, qm::RecurrenceEntropy, x, - n::Int; + N::Int; rate::Float64 = 0.05, sampling::SamplingMode = SRandom(rate), th_max_range::Float64 = 0.5, @@ -75,7 +75,8 @@ function optimize( fmax = 0.0 for _ ∈ 1:fraction for _ ∈ 1:fraction - probs = distribution(x, ε, n; sampling = sampling) + rmspace = RecurrenceMicrostates(ε, N; sampling = sampling) + probs = probabilities(rmspace, x) f = measure(qm, probs) if f > fmax @@ -121,7 +122,8 @@ function optimize( fmax = 0.0 for _ ∈ 1:fraction for _ ∈ 1:fraction - probs = distribution(x, ε, N; sampling = sampling) + rmspace = RecurrenceMicrostates(ε, N; sampling = sampling) + probs = probabilities(rmspace, x) f = measure(qm, probs, A) if f > fmax diff --git a/test/core/backend.jl b/test/core/backend.jl index f64dee0..362a1aa 100644 --- a/test/core/backend.jl +++ b/test/core/backend.jl @@ -1,8 +1,4 @@ using Test using RecurrenceMicrostatesAnalysis -struct TestCore <: RMACore end -x = rand(10) |> StateSpaceSet - -@test_throws ArgumentError histogram(TestCore(), x, x) -@test_throws ArgumentError distribution(TestCore(), x, x) \ No newline at end of file +@test_throws ArgumentError histogram() \ No newline at end of file diff --git a/test/core/operation.jl b/test/core/operation.jl index 01bb9b5..cb4ac9e 100644 --- a/test/core/operation.jl +++ b/test/core/operation.jl @@ -1,6 +1,6 @@ using Test using RecurrenceMicrostatesAnalysis -@test_throws ArgumentError operate(PermuteColumns(Rect(3, 3))) -@test_throws ArgumentError operate(PermuteRows(Rect(3, 3))) -@test_throws ArgumentError operate(Transpose(Rect(3, 3))) \ No newline at end of file +@test_throws ArgumentError operate(PermuteColumns(RectMicrostate(3))) +@test_throws ArgumentError operate(PermuteRows(RectMicrostate(3))) +@test_throws ArgumentError operate(Transpose(RectMicrostate(3))) \ No newline at end of file diff --git a/test/core/sampling.jl b/test/core/sampling.jl index 4b37b60..d04b01a 100644 --- a/test/core/sampling.jl +++ b/test/core/sampling.jl @@ -7,6 +7,6 @@ struct TestSampling <: SamplingMode end @test_throws ArgumentError SamplingSpace(TestShape(), rand(100) |> StateSpaceSet, rand(100) |> StateSpaceSet) @test_throws ArgumentError SamplingSpace(TestShape(), rand(2, 100), rand(2, 100)) -space = SamplingSpace(Rect(2, 2), rand(100) |> StateSpaceSet, rand(100) |> StateSpaceSet) -@test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_sample(TestCore(), TestSampling(), space) +space = SamplingSpace(RectMicrostate(2), rand(100) |> StateSpaceSet, rand(100) |> StateSpaceSet) +@test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_sample(RecurrenceMicrostatesAnalysis.CPUCore(), TestSampling(), space) @test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_num_samples(TestSampling(), space) \ No newline at end of file diff --git a/test/core/shape.jl b/test/core/shape.jl index 9dca3fb..6e6827b 100644 --- a/test/core/shape.jl +++ b/test/core/shape.jl @@ -1,7 +1,7 @@ using Test using RecurrenceMicrostatesAnalysis -@test_throws ArgumentError RecurrenceMicrostatesAnalysis.compute_motif(TestShape(), rand(2, 100), rand(2, 100), [1], [1], SVector{1, Int}(1)) +@test_throws ArgumentError RecurrenceMicrostatesAnalysis.compute_motif(TestShape(), Standard(0.27), rand(2, 100), rand(2, 100), [1], [1], SVector{1, Int}(1)) @test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_histogram_size(TestShape()) -@test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_power_vector(TestCore(), TestShape()) -@test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_offsets(TestCore(), TestShape()) \ No newline at end of file +@test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_power_vector(RecurrenceMicrostatesAnalysis.CPUCore(), TestShape()) +@test_throws ArgumentError RecurrenceMicrostatesAnalysis.get_offsets(RecurrenceMicrostatesAnalysis.CPUCore(), TestShape()) \ No newline at end of file diff --git a/test/distributions.jl b/test/distributions.jl index 56f2e5c..e2599b6 100644 --- a/test/distributions.jl +++ b/test/distributions.jl @@ -5,143 +5,145 @@ using RecurrenceMicrostatesAnalysis @testset "time series" begin x = rand(100) |> StateSpaceSet y = rand(200) |> StateSpaceSet - core = CPUCore(Rect(Standard(0.27), 2), SRandom(0.05)) - @test distribution(core, x, y) isa Probabilities - @test distribution(core, x) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; metric = Cityblock()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling_ratio = 0.1, metric = Cityblock()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling = Full(), metric = Cityblock()), x)) - 1) <= 1e-3 - @test distribution(x, 0.27, 3; rate = 0.1) isa Probabilities - @test distribution(x, 0.27, 3; sampling = Full()) isa Probabilities - @test distribution(x, 0.27, 3; metric = Cityblock()) isa Probabilities - @test distribution(x, 0.27, 3; rate = 0.1, metric = Cityblock()) isa Probabilities - @test distribution(x, 0.27, 3; sampling = Full(), metric = Cityblock()) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), 3; sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), 3; sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), 3; sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), 3; sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), 3; sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), 3; sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), 3; sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), 3; sampling_ratio = 0.1), x)) - 1) <= 1e-3 - @test distribution(x, Standard(0.27), 3) isa Probabilities - @test distribution(x, Standard(0.27), 3; rate = 0.1) isa Probabilities - @test distribution(x, Standard(0.27), 3; sampling = Full()) isa Probabilities - @test distribution(x, Standard(0.27; metric = Cityblock()), 3) isa Probabilities - @test distribution(x, Standard(0.27; metric = Cityblock()), 3; rate = 0.1) isa Probabilities - @test distribution(x, Standard(0.27; metric = Cityblock()), 3; sampling = Full()) isa Probabilities - @test distribution(x, Corridor(0.05, 0.27), 3) isa Probabilities - @test distribution(x, Corridor(0.05, 0.27), 3; rate = 0.1) isa Probabilities - @test distribution(x, Corridor(0.05, 0.27), 3; sampling = Full()) isa Probabilities - @test distribution(x, Corridor(0.05, 0.27; metric = Cityblock()), 3) isa Probabilities - @test distribution(x, Corridor(0.05, 0.27; metric = Cityblock()), 3; rate = 0.1) isa Probabilities - @test distribution(x, Corridor(0.05, 0.27; metric = Cityblock()), 3; sampling = Full()) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(2, 3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(2, 3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(2, 3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(2, 3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), DiagonalMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), DiagonalMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), TriangleMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), TriangleMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), TriangleMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), TriangleMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), TriangleMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), TriangleMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), TriangleMicrostate(3); sampling = Full()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), TriangleMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 - @test distribution(x, Rect(Standard(0.27), 2)) isa Probabilities - @test distribution(x, Rect(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Standard(0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, Rect(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Rect(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Standard(0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, Triangle(Standard(0.27), 2)) isa Probabilities - @test distribution(x, Triangle(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Triangle(Standard(0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, Triangle(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Triangle(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Triangle(Standard(0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27), 2)) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, Triangle(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, Triangle(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Triangle(Corridor(0.05, 0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, Triangle(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Triangle(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Triangle(Corridor(0.05, 0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; metric = Cityblock()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling_ratio = 0.1, metric = Cityblock()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, 3; sampling = Full(), metric = Cityblock()), x, y)) - 1) <= 1e-3 - @test distribution(x, y, 0.27, 3; rate = 0.1) isa Probabilities - @test distribution(x, y, 0.27, 3; sampling = Full()) isa Probabilities - @test distribution(x, y, 0.27, 3; metric = Cityblock()) isa Probabilities - @test distribution(x, y, 0.27, 3; rate = 0.1, metric = Cityblock()) isa Probabilities - @test distribution(x, y, 0.27, 3; sampling = Full(), metric = Cityblock()) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), 3; sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), 3; sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), 3; sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), 3; sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), 3; sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), 3; sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), 3; sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), 3; sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 - @test distribution(x, y, Standard(0.27), 3) isa Probabilities - @test distribution(x, y, Standard(0.27), 3; rate = 0.1) isa Probabilities - @test distribution(x, y, Standard(0.27), 3; sampling = Full()) isa Probabilities - @test distribution(x, y, Standard(0.27; metric = Cityblock()), 3) isa Probabilities - @test distribution(x, y, Standard(0.27; metric = Cityblock()), 3; rate = 0.1) isa Probabilities - @test distribution(x, y, Standard(0.27; metric = Cityblock()), 3; sampling = Full()) isa Probabilities - @test distribution(x, y, Corridor(0.05, 0.27), 3) isa Probabilities - @test distribution(x, y, Corridor(0.05, 0.27), 3; rate = 0.1) isa Probabilities - @test distribution(x, y, Corridor(0.05, 0.27), 3; sampling = Full()) isa Probabilities - @test distribution(x, y, Corridor(0.05, 0.27; metric = Cityblock()), 3) isa Probabilities - @test distribution(x, y, Corridor(0.05, 0.27; metric = Cityblock()), 3; rate = 0.1) isa Probabilities - @test distribution(x, y, Corridor(0.05, 0.27; metric = Cityblock()), 3; sampling = Full()) isa Probabilities - - @test distribution(x, y, Rect(Standard(0.27), 2)) isa Probabilities - @test distribution(x, y, Rect(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Rect(Standard(0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Rect(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, y, Rect(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Rect(Standard(0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Triangle(Standard(0.27), 2)) isa Probabilities - @test distribution(x, y, Triangle(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Triangle(Standard(0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Triangle(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, y, Triangle(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Triangle(Standard(0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Diagonal(Standard(0.27), 2)) isa Probabilities - @test distribution(x, y, Diagonal(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Diagonal(Standard(0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Diagonal(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, y, Diagonal(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Diagonal(Standard(0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Rect(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, y, Rect(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Rect(Corridor(0.05, 0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Rect(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, y, Rect(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Rect(Corridor(0.05, 0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Triangle(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, y, Triangle(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Triangle(Corridor(0.05, 0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Triangle(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, y, Triangle(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Triangle(Corridor(0.05, 0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Diagonal(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, y, Diagonal(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Diagonal(Corridor(0.05, 0.27), 2); sampling = Full()) isa Probabilities - @test distribution(x, y, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, y, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, y, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2); sampling = Full()) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(2, 3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate(2, 3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(2, 3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate(2, 3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate(2, 3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), DiagonalMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), DiagonalMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), TriangleMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), TriangleMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), TriangleMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), TriangleMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), TriangleMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), TriangleMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), TriangleMicrostate(3); sampling = Full()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), TriangleMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 end @testset "spatial data" begin x = rand(1, 20, 20) y = rand(1, 10, 10) - core = CPUCore(Rect(Standard(0.27), (2, 1, 1, 2)), SRandom(0.05)) - @test distribution(x, Rect(Standard(0.27), (2, 1, 1, 2))) isa Probabilities - @test distribution(x, Rect(Standard(0.27), (2, 1, 1, 2)); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Standard(0.27; metric = Cityblock()), (2, 1, 1, 2))) isa Probabilities - @test distribution(x, Rect(Standard(0.27; metric = Cityblock()), (2, 1, 1, 2)); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27), 2)) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Diagonal(Standard(0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27), (2, 1, 1, 2))) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27), (2, 1, 1, 2)); rate = 0.1) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27; metric = Cityblock()), (2, 1, 1, 2))) isa Probabilities - @test distribution(x, Rect(Corridor(0.05, 0.27; metric = Cityblock()), (2, 1, 1, 2)); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27), 2)) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27), 2); rate = 0.1) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2)) isa Probabilities - @test distribution(x, Diagonal(Corridor(0.05, 0.27; metric = Cityblock()), 2); rate = 0.1) isa Probabilities + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1)), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); metric = Cityblock()), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling_ratio = 0.1, metric = Cityblock()), x)) - 1) <= 1e-3 + + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), (2, 1, 2, 1); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), (2, 1, 2, 1); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), (2, 1, 2, 1); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), (2, 1, 2, 1); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x)) - 1) <= 1e-3 + + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1)), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); metric = Cityblock()), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling_ratio = 0.1, metric = Cityblock()), x, y)) - 1) <= 1e-3 + + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), (2, 1, 2, 1); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), (2, 1, 2, 1); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), (2, 1, 2, 1); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), (2, 1, 2, 1); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), RectMicrostate((2, 1, 2, 1)); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Standard(0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 + @test abs(sum(probabilities(RecurrenceMicrostates(Corridor(0.05, 0.27; metric = Cityblock()), DiagonalMicrostate(3); sampling_ratio = 0.1), x, y)) - 1) <= 1e-3 end \ No newline at end of file diff --git a/test/rqa/det.jl b/test/rqa/det.jl index 6c99fd2..596d25f 100644 --- a/test/rqa/det.jl +++ b/test/rqa/det.jl @@ -4,8 +4,9 @@ using RecurrenceAnalysis using RecurrenceMicrostatesAnalysis @testset "invalid distribution" begin - dist = distribution(rand(Uniform(0, 1), 100) |> StateSpaceSet, 0.27, 2) - @test_throws ArgumentError measure(Determinism(), dist) + rmspace = RecurrenceMicrostates(0.27, 2) + dist = probabilities(rmspace, rand(Uniform(0, 1), 100)) + @test_throws ArgumentError measure(Determinism(), rmspace, dist) end ## We use a tolerance of 10% here. @@ -17,9 +18,13 @@ end @test measure(Determinism(), x) isa Real @test (abs(det_l2 - measure(Determinism(), x)) / det_l2) ≤ 0.1 - dist_square = distribution(x, 0.27, 3) - dist_diagonal = distribution(x, Diagonal(Standard(0.27), 3)) - @test (abs(det_l2 - measure(Determinism(), dist_square)) / det_l2) ≤ 0.1 - @test (abs(det_l2 - measure(Determinism(), dist_diagonal)) / det_l2) ≤ 0.1 + rms_square = RecurrenceMicrostates(0.27, 3) + rms_diagonal = RecurrenceMicrostates(0.27, DiagonalMicrostate(3)) + + dist_square = probabilities(rms_square, x) + dist_diagonal = probabilities(rms_diagonal, x) + + @test (abs(det_l2 - measure(Determinism(), rms_square, dist_square)) / det_l2) ≤ 0.1 + @test (abs(det_l2 - measure(Determinism(), rms_diagonal, dist_diagonal)) / det_l2) ≤ 0.1 end \ No newline at end of file diff --git a/test/rqa/entropy.jl b/test/rqa/entropy.jl index 4710ace..b1decd0 100644 --- a/test/rqa/entropy.jl +++ b/test/rqa/entropy.jl @@ -3,7 +3,7 @@ using Distributions using RecurrenceMicrostatesAnalysis x = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(x, 0.27, 4) +dist = probabilities(RecurrenceMicrostates(0.27, 4), x) @test 0 ≤ measure(RecurrenceEntropy(), dist) ≤ log(length(dist)) @test 0 ≤ measure(RecurrenceEntropy(), x; N = 4) ≤ log(length(dist)) diff --git a/test/rqa/lam.jl b/test/rqa/lam.jl index 855aa8b..224d13a 100644 --- a/test/rqa/lam.jl +++ b/test/rqa/lam.jl @@ -4,8 +4,9 @@ using RecurrenceAnalysis using RecurrenceMicrostatesAnalysis @testset "invalid distribution" begin - dist = distribution(rand(Uniform(0, 1), 100) |> StateSpaceSet, 0.27, 2) - @test_throws ArgumentError measure(Laminarity(), dist) + rmspace = RecurrenceMicrostates(0.27, 2) + dist = probabilities(rmspace, rand(100)) + @test_throws ArgumentError measure(Laminarity(), rmspace, dist) end ## We use a tolerance of 10% here. @@ -17,9 +18,12 @@ end @test measure(Laminarity(), x) isa Real @test (abs(det_l2 - measure(Laminarity(), x)) / det_l2) ≤ 0.1 - dist_square = distribution(x, 0.27, 3) - dist_diagonal = distribution(x, Diagonal(Standard(0.27), 3)) + rms_square = RecurrenceMicrostates(0.27, 3) + rms_line = RecurrenceMicrostates(0.27, RectMicrostate(1, 3)) - @test (abs(det_l2 - measure(Laminarity(), dist_square)) / det_l2) ≤ 0.1 - @test (abs(det_l2 - measure(Laminarity(), dist_diagonal)) / det_l2) ≤ 0.1 + dist_square = probabilities(rms_square, x) + dist_line = probabilities(rms_line, x) + + @test (abs(det_l2 - measure(Laminarity(), rms_square, dist_square)) / det_l2) ≤ 0.1 + @test (abs(det_l2 - measure(Laminarity(), rms_line, dist_line)) / det_l2) ≤ 0.1 end \ No newline at end of file diff --git a/test/rqa/rr.jl b/test/rqa/rr.jl index eb3077c..1aa07b2 100644 --- a/test/rqa/rr.jl +++ b/test/rqa/rr.jl @@ -3,7 +3,7 @@ using Distributions using RecurrenceMicrostatesAnalysis x = StateSpaceSet(rand(1000)) -dist = distribution(x, 0.27, 4) +dist = probabilities(RecurrenceMicrostates(0.27, 4), x) @test 0 ≤ measure(RecurrenceRate(), dist) ≤ 1 @test 0 ≤ measure(RecurrenceRate(), x) ≤ 1 diff --git a/test/sampling/full.jl b/test/sampling/full.jl index 1402708..92c8d3e 100644 --- a/test/sampling/full.jl +++ b/test/sampling/full.jl @@ -5,11 +5,11 @@ using KernelAbstractions using RecurrenceMicrostatesAnalysis ## Full not implemented for spatial data: -@test_throws ArgumentError distribution(rand(2, 100), Rect(Standard(0.27), (2, 2)); sampling = Full()) +@test_throws ArgumentError probabilities(RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling = Full()), rand(1, 10, 10)) @testset "num samples" begin data = rand(100) |> StateSpaceSet - space = SamplingSpace(Rect(2, 2), data, data) + space = SamplingSpace(RectMicrostate(2), data, data) @test RecurrenceMicrostatesAnalysis.get_num_samples(Full(), space) == space.W * space.H @test RecurrenceMicrostatesAnalysis.get_num_samples(Full(), space) isa Integer @@ -19,12 +19,12 @@ end @testset "CPU" begin data_1 = rand(50) |> StateSpaceSet data_2 = rand(20) |> StateSpaceSet - space = SamplingSpace(Rect(2, 2), data_1, data_2) - core = CPUCore(Rect(Standard(0.27), 3), Full()) + space = SamplingSpace(RectMicrostate(2), data_1, data_2) + core = RecurrenceMicrostatesAnalysis.CPUCore() @test RecurrenceMicrostatesAnalysis.get_sample(core, Full(), space, nothing, 10) isa Tuple{<: Integer, <: Integer} - samples = RecurrenceMicrostatesAnalysis.get_num_samples(core.sampling, space) + samples = RecurrenceMicrostatesAnalysis.get_num_samples(Full(), space) for m ∈ 1:samples i, j = RecurrenceMicrostatesAnalysis.get_sample(core, Full(), space, nothing, m) @test 1 ≤ i ≤ space.W @@ -35,12 +35,12 @@ end @testset "GPU" begin data_1 = rand(50) |> StateSpaceSet data_2 = rand(20) |> StateSpaceSet - space = SamplingSpace(Rect(2, 2), data_1, data_2) - core = GPUCore(CPU(), Rect(Standard(0.27f0), 3), Full()) + space = SamplingSpace(RectMicrostate(2), data_1, data_2) + core = RecurrenceMicrostatesAnalysis.GPUCore() @test RecurrenceMicrostatesAnalysis.get_sample(core, Full(), space, nothing, 10) isa Tuple{<: Integer, <: Integer} - samples = RecurrenceMicrostatesAnalysis.get_num_samples(core.sampling, space) + samples = RecurrenceMicrostatesAnalysis.get_num_samples(Full(), space) for m ∈ 1:samples i, j = RecurrenceMicrostatesAnalysis.get_sample(core, Full(), space, nothing, m) @test 1 ≤ i ≤ space.W @@ -52,7 +52,8 @@ end @testset "values" begin x = StateSpaceSet(rand(100)) y = StateSpaceSet(rand(50)) + rmspace = RecurrenceMicrostates(0.27, 3; sampling = Full()) - @test js_divergence(distribution(x, 0.27, 3; sampling = Full()), distribution(x, 0.27, 3; sampling = Full())) == 0 - @test js_divergence(distribution(x, y, 0.27, 3; sampling = Full()), distribution(x, y, 0.27, 3; sampling = Full())) == 0 + @test js_divergence(probabilities(rmspace, x), probabilities(rmspace, x)) == 0 + @test js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y)) == 0 end \ No newline at end of file diff --git a/test/sampling/random.jl b/test/sampling/random.jl index 375b353..9793921 100644 --- a/test/sampling/random.jl +++ b/test/sampling/random.jl @@ -13,7 +13,7 @@ using RecurrenceMicrostatesAnalysis @testset "num samples" begin @testset "time series" begin data = rand(100) |> StateSpaceSet - space = SamplingSpace(Rect(2, 2), data, data) + space = SamplingSpace(RectMicrostate(2), data, data) sampling = SRandom(0.05) @test RecurrenceMicrostatesAnalysis.get_num_samples(sampling, space) == ceil(Int, sampling.sampling_factor * space.W * space.H) @@ -26,7 +26,7 @@ using RecurrenceMicrostatesAnalysis @testset "spatial data" begin data = rand(2, 100) - space = SamplingSpace(Rect(Standard(0.27), (2, 2)), data, data) + space = SamplingSpace(RectMicrostate((2, 2)), data, data) sampling = SRandom(0.05) @test RecurrenceMicrostatesAnalysis.get_num_samples(sampling, space) == ceil(Int, sampling.sampling_factor * reduce(*, space.space)) @@ -42,13 +42,13 @@ end @testset "CPU" begin data_1 = rand(50) |> StateSpaceSet data_2 = rand(20) |> StateSpaceSet - space = SamplingSpace(Rect(2, 2), data_1, data_2) + space = SamplingSpace(RectMicrostate(2), data_1, data_2) sampling = SRandom(50) - core = CPUCore(Rect(Standard(0.27), 3), sampling) + core = RecurrenceMicrostatesAnalysis.CPUCore() @test RecurrenceMicrostatesAnalysis.get_sample(core, sampling, space, TaskLocalRNG(), nothing) isa Tuple{<: Integer, <: Integer} - samples = RecurrenceMicrostatesAnalysis.get_num_samples(core.sampling, space) + samples = RecurrenceMicrostatesAnalysis.get_num_samples(SRandom(0.05), space) for m ∈ 1:samples i, j = RecurrenceMicrostatesAnalysis.get_sample(core, sampling, space, TaskLocalRNG(), nothing) @test 1 ≤ i ≤ space.W @@ -59,13 +59,13 @@ end @testset "GPU" begin data_1 = rand(50) |> StateSpaceSet data_2 = rand(20) |> StateSpaceSet - space = SamplingSpace(Rect(2, 2), data_1, data_2) + space = SamplingSpace(RectMicrostate(2), data_1, data_2) sampling = SRandom(50) - core = GPUCore(CPU(), Rect(Standard(0.27f0), 3), sampling) + core = RecurrenceMicrostatesAnalysis.GPUCore() @test RecurrenceMicrostatesAnalysis.get_sample(core, sampling, space, 2) isa Vector{SVector{2, Int32}} - num_samples = RecurrenceMicrostatesAnalysis.get_num_samples(core.sampling, space) + num_samples = RecurrenceMicrostatesAnalysis.get_num_samples(SRandom(0.05), space) samples = RecurrenceMicrostatesAnalysis.get_sample(core, sampling, space, num_samples) for m ∈ samples i, j = m @@ -77,14 +77,14 @@ end @testset "spatial data" begin data_1 = rand(2, 50, 50) data_2 = rand(2, 20, 20) - space = SamplingSpace(Rect(Standard(0.27), (2, 1, 1, 2)), data_1, data_2) + space = SamplingSpace(RectMicrostate((2, 1, 1, 2)), data_1, data_2) sampling = SRandom(50) - core = CPUCore(Rect(Standard(0.27), 3), sampling) + core = RecurrenceMicrostatesAnalysis.CPUCore() idx = zeros(Int, 4) @test RecurrenceMicrostatesAnalysis.get_sample(core, sampling, space, idx, TaskLocalRNG(), nothing) isa Nothing - samples = RecurrenceMicrostatesAnalysis.get_num_samples(core.sampling, space) + samples = RecurrenceMicrostatesAnalysis.get_num_samples(SRandom(0.05), space) for m ∈ 1:samples RecurrenceMicrostatesAnalysis.get_sample(core, sampling, space, idx, TaskLocalRNG(), nothing) for i ∈ eachindex(idx) @@ -99,17 +99,19 @@ end @testset "time series" begin x = rand(Uniform(0, 1), 1000) |> StateSpaceSet y = rand(Uniform(0, 1), 1000) |> StateSpaceSet + rmspace = RecurrenceMicrostates(0.27, 3; sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, 0.27, 3; sampling = SRandom(0.1)), distribution(x, 0.27, 3; sampling = SRandom(0.1)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, 0.27, 3; sampling = SRandom(0.1)), distribution(x, y, 0.27, 3; sampling = SRandom(0.1)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end @testset "spatial data" begin x = rand(Uniform(0, 1), (1, 50, 50)) y = rand(Uniform(0, 1), (1, 50, 50)) + rmspace = RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)), distribution(x, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)), distribution(x, y, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end end \ No newline at end of file diff --git a/test/shapes/diagonal.jl b/test/shapes/diagonal.jl index 20fa2dd..4f17973 100644 --- a/test/shapes/diagonal.jl +++ b/test/shapes/diagonal.jl @@ -9,22 +9,24 @@ using RecurrenceMicrostatesAnalysis @testset "time series" begin x = rand(Uniform(0, 1), 1000) |> StateSpaceSet y = rand(Uniform(0, 1), 1000) |> StateSpaceSet + rmspace = RecurrenceMicrostates(0.27, DiagonalMicrostate(3); sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, Diagonal(Standard(0.27), 3); sampling = SRandom(0.1)), distribution(x, Diagonal(Standard(0.27), 3); sampling = SRandom(0.1)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, Diagonal(Standard(0.27), 3); sampling = SRandom(0.1)), distribution(x, y, Diagonal(Standard(0.27), 3); sampling = SRandom(0.1)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end @testset "spatial data" begin x = rand(Uniform(0, 1), (1, 50, 50)) y = rand(Uniform(0, 1), (1, 50, 50)) + rmspace = RecurrenceMicrostates(0.27, DiagonalMicrostate(3); sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, Diagonal(Standard(0.27), 3); sampling = SRandom(0.05)), distribution(x, Diagonal(Standard(0.27), 3); sampling = SRandom(0.05)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, Diagonal(Standard(0.27), 3); sampling = SRandom(0.05)), distribution(x, y, Diagonal(Standard(0.27), 3); sampling = SRandom(0.05)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end @testset "GPU" begin - shape = Diagonal(Standard(0.27f0), 3) - core = GPUCore(CPU(), shape, Full()) + shape = DiagonalMicrostate(3) + core = RecurrenceMicrostatesAnalysis.GPUCore() @test RecurrenceMicrostatesAnalysis.get_power_vector(core, shape) isa SVector{3, Int32} @test RecurrenceMicrostatesAnalysis.get_offsets(core, shape) isa SVector{3, SVector{2, Int32}} diff --git a/test/shapes/rect.jl b/test/shapes/rect.jl index 086efe3..7218ca5 100644 --- a/test/shapes/rect.jl +++ b/test/shapes/rect.jl @@ -9,22 +9,24 @@ using RecurrenceMicrostatesAnalysis @testset "time series" begin x = rand(Uniform(0, 1), 1000) |> StateSpaceSet y = rand(Uniform(0, 1), 1000) |> StateSpaceSet + rmspace = RecurrenceMicrostates(0.27, 3; sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, Rect(Standard(0.27), 3); sampling = SRandom(0.1)), distribution(x, Rect(Standard(0.27), 3); sampling = SRandom(0.1)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, Rect(Standard(0.27), 3); sampling = SRandom(0.1)), distribution(x, y, Rect(Standard(0.27), 3); sampling = SRandom(0.1)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end @testset "spatial data" begin x = rand(Uniform(0, 1), (1, 50, 50)) y = rand(Uniform(0, 1), (1, 50, 50)) + rmspace = RecurrenceMicrostates(0.27, (2, 1, 2, 1); sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)), distribution(x, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)), distribution(x, y, Rect(Standard(0.27), (2, 1, 2, 1)); sampling = SRandom(0.05)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end @testset "GPU" begin - shape = Rect(Standard(0.27f0), 3) - core = GPUCore(CPU(), shape, Full()) + shape = RectMicrostate(3) + core = RecurrenceMicrostatesAnalysis.GPUCore() @test RecurrenceMicrostatesAnalysis.get_power_vector(core, shape) isa SVector{9, Int32} @test RecurrenceMicrostatesAnalysis.get_offsets(core, shape) isa SVector{9, SVector{2, Int32}} diff --git a/test/shapes/triangle.jl b/test/shapes/triangle.jl index 752112a..bbcb003 100644 --- a/test/shapes/triangle.jl +++ b/test/shapes/triangle.jl @@ -7,19 +7,20 @@ using Random using RecurrenceMicrostatesAnalysis ## Triangle not implemented for spatial data: -@test_throws ArgumentError distribution(rand(2, 100), Triangle(Standard(0.27), 3)) +@test_throws ArgumentError probabilities(RecurrenceMicrostates(0.27, TriangleMicrostate(3)), rand(1, 10, 10)) @testset "time series" begin x = rand(Uniform(0, 1), 1000) |> StateSpaceSet y = rand(Uniform(0, 1), 1000) |> StateSpaceSet + rmspace = RecurrenceMicrostates(0.27, TriangleMicrostate(3); sampling_ratio = 0.1) - @test sqrt(js_divergence(distribution(x, Triangle(Standard(0.27), 3); sampling = SRandom(0.1)), distribution(x, Triangle(Standard(0.27), 3); sampling = SRandom(0.1)))) / log(2) ≤ 0.1 - @test sqrt(js_divergence(distribution(x, y, Triangle(Standard(0.27), 3); sampling = SRandom(0.1)), distribution(x, y, Triangle(Standard(0.27), 3); sampling = SRandom(0.1)))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x), probabilities(rmspace, x))) / log(2) ≤ 0.1 + @test sqrt(js_divergence(probabilities(rmspace, x, y), probabilities(rmspace, x, y))) / log(2) ≤ 0.1 end @testset "GPU" begin - shape = Triangle(Standard(0.27f0), 3) - core = GPUCore(CPU(), shape, Full()) + shape = TriangleMicrostate(3) + core = RecurrenceMicrostatesAnalysis.GPUCore() @test RecurrenceMicrostatesAnalysis.get_power_vector(core, shape) isa SVector{6, Int32} @test RecurrenceMicrostatesAnalysis.get_offsets(core, shape) isa SVector{6, SVector{2, Int32}} diff --git a/test/utils/operations.jl b/test/utils/operations.jl index 42139ef..2ad84e7 100644 --- a/test/utils/operations.jl +++ b/test/utils/operations.jl @@ -1,6 +1,6 @@ using Test using RecurrenceMicrostatesAnalysis -@test operate(PermuteColumns(Rect(3, 3)), 237, 2) == 347 -@test operate(PermuteRows(Rect(3, 3)), 237, [1, 3, 2]) == 349 -@test operate(Transpose(Rect(3, 3)), 237) == 231 \ No newline at end of file +@test operate(PermuteColumns(RectMicrostate(3, 3)), 237, 2) == 347 +@test operate(PermuteRows(RectMicrostate(3, 3)), 237, [1, 3, 2]) == 349 +@test operate(Transpose(RectMicrostate(3, 3)), 237) == 231 \ No newline at end of file From 5bb4fc49755a734de7aded6147f90ddc487c14be Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Mon, 2 Feb 2026 14:24:01 -0300 Subject: [PATCH 07/19] fix recurrence test --- test/core/recurrence.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/core/recurrence.jl b/test/core/recurrence.jl index 8bd5de4..85403af 100644 --- a/test/core/recurrence.jl +++ b/test/core/recurrence.jl @@ -1,7 +1,7 @@ using Test using RecurrenceMicrostatesAnalysis -struct TestExpression <: RecurrenceExpression end +struct TestExpression <: RecurrenceExpression{Float64, Euclidean} end @test_throws ArgumentError recurrence(TestExpression(), rand(100) |> StateSpaceSet, rand(100) |> StateSpaceSet, 1, 1) @test_throws ArgumentError recurrence(TestExpression(), rand(2, 100), rand(2, 100), (1, ), (1, )) \ No newline at end of file From 84e2be0a57fd75442f160ec295e72634d30313ab Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Mon, 2 Feb 2026 17:47:25 -0300 Subject: [PATCH 08/19] fix cpu memory allocation --- src/core/cpu_core.jl | 25 ++++++++++++++----------- src/core/gpu/gpu_core.jl | 2 +- src/shapes/rect.jl | 18 ++++++++++-------- 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/src/core/cpu_core.jl b/src/core/cpu_core.jl index 7c8326d..62d3276 100644 --- a/src/core/cpu_core.jl +++ b/src/core/cpu_core.jl @@ -43,16 +43,17 @@ function histogram( rmspace::RecurrenceMicrostates{MS, RE, SM}, x::Union{StateSpaceSet, Vector{<: Real}}, y::Union{StateSpaceSet, Vector{<: Real}} = x; - threads = Threads.nthreads() + threads::Int = 0 ) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} - if (x isa Vector); x = StateSpaceSet(x); end - if (y isa Vector); y = StateSpaceSet(y); end - + # Threads, input and core + threads = threads <= 0 ? Threads.nthreads() : threads + x_input = x isa Vector ? x |> StateSpaceSet : x + y_input = y isa Vector ? y |> StateSpaceSet : y core = CPUCore() # Info - space = SamplingSpace(rmspace.shape, x, y) + space = SamplingSpace(rmspace.shape, x_input, y_input) samples = get_num_samples(rmspace.sampling, space) # Allocate memory @@ -73,7 +74,7 @@ function histogram( for m in start:stop i, j = get_sample(core, rmspace.sampling, space, local_rng, m) - idx = compute_motif(rmspace.expr, x, y, i, j, pv, offsets) + idx = compute_motif(rmspace.expr, x_input, y_input, i, j, pv, offsets) @inbounds local_hist[idx] += 1 end @@ -81,7 +82,7 @@ function histogram( end end - res = reduce(+, fetch.(tasks)) + res::Vector{Int} = reduce(+, fetch.(tasks)) return res end #......................................................................................... @@ -91,16 +92,18 @@ function histogram( rmspace::RecurrenceMicrostates{MS, RE, SM}, x::AbstractArray{<: Real}, y::AbstractArray{<: Real} = x; - threads = Threads.nthreads() + threads::Int = 0 ) where {MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} + # Core and threads + threads = threads <= 0 ? Threads.nthreads() : threads + core = CPUCore() + # Info space = SamplingSpace(rmspace.shape, x, y) samples = get_num_samples(rmspace.sampling, space) dim_x = ndims(x) - 1 dim_y = ndims(y) - 1 - core = CPUCore() - # Allocate memory pv = get_power_vector(core, rmspace.shape) @@ -129,7 +132,7 @@ function histogram( end end - res = reduce(+, fetch.(tasks)) + res::Vector{Int} = reduce(+, fetch.(tasks)) return res end diff --git a/src/core/gpu/gpu_core.jl b/src/core/gpu/gpu_core.jl index af4c3b5..42fa8a4 100644 --- a/src/core/gpu/gpu_core.jl +++ b/src/core/gpu/gpu_core.jl @@ -70,7 +70,7 @@ function histogram( end KernelAbstractions.synchronize(backend) - res = hist |> Vector + res::Vector{Int} = hist |> Vector return res end diff --git a/src/shapes/rect.jl b/src/shapes/rect.jl index 4e7363b..3b8beb5 100644 --- a/src/shapes/rect.jl +++ b/src/shapes/rect.jl @@ -58,11 +58,14 @@ RectMicrostate(rows::Int, cols::Int; B = 2) = Rect2Microstate{rows, cols, B}() #......................................................................................... # Based on spatial data: (CPU only) #......................................................................................... -struct RectNMicrostate{D, B} <: RectMicrostate +struct RectNMicrostate{D, B, N} <: RectMicrostate structure::NTuple{D, Int} end -RectMicrostate(structure::NTuple{D, Int}; B = 2) where {D} = RectNMicrostate{D, B}(structure) +function RectMicrostate(structure::NTuple{D, Int}; B = 2) where {D} + N = prod(structure) + RectNMicrostate{D, B, N}(structure) +end ########################################################################################## # Implementations: SamplingSpace @@ -142,7 +145,7 @@ end @generated function get_power_vector(::CPUCore, ::Rect2Microstate{W, H, B}) where {W, H, B} N = W * H - expr = :(SVector{$N}( $([:(B^$i) for i in 0:(N-1)]... ) )) + expr = :(SVector{$N, Int}( $([:(B^$i) for i in 0:(N-1)]... ) )) return expr end @@ -154,7 +157,7 @@ end @generated function get_power_vector(::GPUCore, ::Rect2Microstate{W, H, B}) where {W, H, B} N = W * H - expr = :(SVector{$N}( $([:(Int32(B^$i)) for i in 0:(N-1)]... ) )) + expr = :(SVector{$N, Int32}( $([:(Int32(B^$i)) for i in 0:(N-1)]... ) )) return expr end @@ -164,12 +167,11 @@ end return :( SVector{$N, $(SVector{2, Int32})}( $(elems...) ) ) end -function get_histogram_size(shape::RectNMicrostate{D, B}) where {D, B} +function get_histogram_size(shape::RectNMicrostate{D, B, N}) where {D, B, N} size = B^(prod(shape.structure)) return size end -function get_power_vector(::CPUCore, shape::RectNMicrostate{D, B}) where {D, B} - N = prod(shape.structure) - return SVector{N}((B^i for i in 0:(N-1))...) +function get_power_vector(::CPUCore, shape::RectNMicrostate{D, B, N}) where {D, B, N} + return SVector{N, Int}((B^i for i in 0:(N-1))...) end \ No newline at end of file From 5a942a4340e8e8031085c38a790e84f46075671f Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Wed, 18 Feb 2026 14:48:34 -0300 Subject: [PATCH 09/19] update docs; add todo: write RecurrenceMicrostates docs --- README.md | 6 +- src/core/abstract_core.jl | 31 ++--- src/core/complexity_measures_interface.jl | 4 +- src/core/cpu_core.jl | 133 ---------------------- src/core/gpu/gpu_core.jl | 125 -------------------- src/core/recurrence_microstates.jl | 1 + src/recurrences/corridor.jl | 4 +- src/rqa/det.jl | 1 + src/rqa/lam.jl | 9 +- src/shapes/diagonal.jl | 2 +- src/utils/operations/permute_rows.jl | 2 +- src/utils/operations/transpose.jl | 2 +- 12 files changed, 26 insertions(+), 294 deletions(-) diff --git a/README.md b/README.md index 149934e..b01ff91 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # RecurrenceMicrostatesAnalysis.jl -[![CI](https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl/workflows/CI/badge.svg)](https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl/actions) -[![codecov](https://codecov.io/gh/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl/graph/badge.svg?token=NR3S4JOE4R)](https://codecov.io/gh/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl) +[![CI](https://github.com/JuliaDynamics/RecurrenceMicrostatesAnalysis.jl/workflows/CI/badge.svg)](https://github.com/JuliaDynamics/RecurrenceMicrostatesAnalysis.jl/actions) +[![codecov](https://codecov.io/gh/JuliaDynamics/RecurrenceMicrostatesAnalysis.jl/graph/badge.svg?token=NR3S4JOE4R)](https://codecov.io/gh/JuliaDynamics/RecurrenceMicrostatesAnalysis.jl) [![Package Downloads](https://img.shields.io/badge/dynamic/json?url=http%3A%2F%2Fjuliapkgstats.com%2Fapi%2Fv1%2Ftotal_downloads%2FRecurrenceMicrostatesAnalysis&query=total_requests&label=Downloads)](http://juliapkgstats.com/pkg/RecurrenceMicrostatesAnalysis) [![Publication](https://img.shields.io/badge/publication-Chaos-blue.svg)](https://doi.org/10.1063/5.0293708) @@ -24,5 +24,5 @@ import Pkg Pkg.add("RecurrenceMicrostatesAnalysis") ``` -The package documentation is available [online](https://dynamicsufpr.github.io/RecurrenceMicrostatesAnalysis.jl/), or you can build it +The package documentation is available [online](https://juliadynamics.github.io/RecurrenceMicrostatesAnalysis.jl/stable/), or you can build it locally by running `julia docs/make.jl`. diff --git a/src/core/abstract_core.jl b/src/core/abstract_core.jl index 93de4b1..a61d2b7 100644 --- a/src/core/abstract_core.jl +++ b/src/core/abstract_core.jl @@ -3,23 +3,6 @@ export histogram ########################################################################################## # RMACore ########################################################################################## -""" - RMACore - -Abstract supertype that defines the execution pipeline of the package. - -An instance of **RMACore** must be provided to the [`histogram`](@ref) function to determine how the -histogram computation is performed. - -Concrete implementations of `RMACore` are [`CPUCore`](@ref) and [`GPUCore`](@ref), which target CPU -and GPU execution, respectively. Implementing custom subtypes of `RMACore` is **strongly discouraged**, -as doing so requires reimplementing several internal utilities for the package ecosystem to function -correctly. - -# Implementations -- [`CPUCore`](@ref) -- [`GPUCore`](@ref) -""" abstract type RMACore end ########################################################################################## @@ -34,7 +17,7 @@ from the input data `[x]` and `[y]`. If `[x]` and `[y]` are identical, the result corresponds to a Recurrence Plot (RP); otherwise, it corresponds to a Cross-Recurrence Plot (CRP). -The result is returned as a `Vector{Int}` representing the histogram of recurrence +The result is returned as a `Vector{Int}` that is the histogram of recurrence microstates for the given input data. ### Arguments @@ -52,17 +35,18 @@ microstates for the given input data. a `Vector{Real}` as input it is accepted and converted internally to a [`StateSpaceSet`](@ref). ### Keyword Arguments -If using a [`CPUCore`](@ref): +If using CPU: - `threads`: Number of threads used to compute the histogram. By default, this is set to `Threads.nthreads()`, which can be specified at Julia startup using `--threads N` or via the `JULIA_NUM_THREADS` environment variable. -If using a [`GPUCore`](@ref): +If using GPU: - `groupsize`: Number of threads per GPU workgroup. -### Examples using [`CPUCore`](@ref) +### Examples using CPU - Time series: ```julia +using RecurrenceMicrostatesAnalysis ssset = StateSpaceSet(rand(Float64, (1000))) rmspace = RecurrenceMicrostates(0.27, 3) dist = histogram(rmspace, ssset) @@ -70,14 +54,15 @@ dist = histogram(rmspace, ssset) - Spatial data: ```julia +using RecurrenceMicrostatesAnalysis spatialdata = rand(Float64, (3, 50, 50)) rmspace = RecurrenceMicrostates(0.27, RectMicrostate((2, 1, 2, 1))) dist = histogram(rmspace, spatialdata) ``` -### Examples using [`GPUCore`](@ref) +### Examples using GPU ```julia -using CUDA +using CUDA, RecurrenceMicrostatesAnalysis gpudata = StateSpaceSet(Float32.(data)) |> CuVector core = GPUCore(CUDABackend(), Rect(Standard(0.27f0; metric = GPUEuclidean()), 2), SRandom(0.05)) dist = histogram(core, gpudata, gpudata) diff --git a/src/core/complexity_measures_interface.jl b/src/core/complexity_measures_interface.jl index 004b6d3..e059078 100644 --- a/src/core/complexity_measures_interface.jl +++ b/src/core/complexity_measures_interface.jl @@ -16,8 +16,8 @@ function ComplexityMeasures.outcome_space(rmspace::RecurrenceMicrostates, x, y = return eachindex(1:get_histogram_size(rmspace.shape)) end -## -## Needed to CRP +# +# Needed to CRP function ComplexityMeasures.probabilities(o::RecurrenceMicrostates, x, y) return first(probabilities_and_outcomes(o, x, y)) end diff --git a/src/core/cpu_core.jl b/src/core/cpu_core.jl index 62d3276..b4c7241 100644 --- a/src/core/cpu_core.jl +++ b/src/core/cpu_core.jl @@ -1,12 +1,6 @@ ########################################################################################## # RMACore: CPU ########################################################################################## -""" - CPUCore <: RMACore - -Type which represents the pipeline executed by **RecurrenceMicrostatesAnalysis.jl** on -central processing units. -""" struct CPUCore <: RMACore end ########################################################################################## @@ -136,131 +130,4 @@ function histogram( return res end -########################################################################################## -# Implementation: distribution -########################################################################################## -distribution( - x::StateSpaceSet, - y::StateSpaceSet, - shape::MicrostateShape; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads() -) = distribution(CPUCore(shape, sampling), x, y; threads = threads) -#......................................................................................... -distribution( - x::StateSpaceSet, - y::StateSpaceSet, - expr::RecurrenceExpression, - n::Int; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads() -) = distribution(CPUCore(Rect(expr, n), sampling), x, y; threads = threads) -#......................................................................................... -distribution( - x::StateSpaceSet, - y::StateSpaceSet, - ε::Float64, - n::Int; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads(), - metric::Metric = DEFAULT_METRIC -) = distribution(x, y, Standard(ε; metric = metric), n; rate = rate, sampling = sampling, threads = threads) -#......................................................................................... -distribution( - x::StateSpaceSet, - shape::MicrostateShape; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads() -) = distribution(x, x, shape; rate = rate, sampling = sampling, threads = threads) -#......................................................................................... -distribution( - x::StateSpaceSet, - expr::RecurrenceExpression, - n::Int; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads() -) = distribution(x, x, expr, n; rate = rate, sampling = sampling, threads = threads) -#......................................................................................... -distribution( - x::StateSpaceSet, - ε::Float64, - n::Int; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads(), - metric::Metric = DEFAULT_METRIC -) = distribution(x, x, ε, n; rate = rate, sampling = sampling, threads = threads, metric = metric) -#......................................................................................... -distribution( - x::AbstractArray{<:Real}, - y::AbstractArray{<:Real}, - shape::MicrostateShape; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads() -) = distribution(CPUCore(shape, sampling), x, y; threads = threads) -#......................................................................................... -distribution( - x::AbstractArray{<:Real}, - shape::MicrostateShape; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - threads::Int = Threads.nthreads() -) = distribution(x, x, shape; rate = rate, sampling = sampling, threads = threads) -#......................................................................................... -distribution( - core::CPUCore, - x; - threads = Threads.nthreads() -) = distribution(core, x, x; threads = threads) -#......................................................................................... -""" - distribution(core::CPUCore, [x], [y]; kwargs...) - -Compute an RMA distribution for the input data `[x]` and `[y]` using a CPU backend configuration -defined by `core`, which must be a [`CPUCore`](@ref). - -For time-series analysis, the inputs `[x]` and `[y]` must be provided as [`StateSpaceSet`](@ref) -objects. For spatial analysis, the inputs must be provided as `AbstractArray`s. - -### Arguments -- `core`: A [`CPUCore`](@ref) defining the [`MicrostateShape`](@ref), - [`RecurrenceExpression`](@ref), and [`SamplingMode`](@ref) used in the computation. -- `[x]`: Input data provided as a [`StateSpaceSet`](@ref) or an `AbstractArray`. -- `[y]`: Input data provided as a [`StateSpaceSet`](@ref) or an `AbstractArray`. - -### Keyword Arguments -- `threads`: Number of threads used to compute the distribution. By default, this is set to - `Threads.nthreads()`, which can be specified at Julia startup using `--threads N` or via the - `JULIA_NUM_THREADS` environment variable. - -### Examples -- Time series: -```julia -ssset = StateSpaceSet(rand(Float64, (1000))) -core = CPUCore(Rect(Standard(0.27), 2), SRandom(0.05)) -dist = distribution(core, ssset, ssset) -``` - -- Spatial data: -```julia -spatialdata = rand(Float64, (3, 50, 50)) -core = CPUCore(Rect(Standard(0.5), (2, 2, 1, 1)), SRandom(0.05)) -dist = distribution(core, spatialdata, spatialdata) -``` -""" -function distribution( - core::CPUCore, - x, - y; - threads = Threads.nthreads() -) - hist = histogram(core, x, y; threads = threads) - return Probabilities(hist) -end ########################################################################################## \ No newline at end of file diff --git a/src/core/gpu/gpu_core.jl b/src/core/gpu/gpu_core.jl index 42fa8a4..3520791 100644 --- a/src/core/gpu/gpu_core.jl +++ b/src/core/gpu/gpu_core.jl @@ -1,18 +1,6 @@ ########################################################################################## # RMACore: GPU ########################################################################################## -""" - GPUCore{B} <: RMACore - -Type which represents the pipeline executed by **RecurrenceMicrostatesAnalysis.jl** on -graphical processing units. - -It is initialized using: -```julia -GPUCore(backend) -``` -Here, `backend` is the GPU device backend, e.g., `MetalBackend`, `CUDABackend`. -""" struct GPUCore <: RMACore end ########################################################################################## @@ -95,118 +83,5 @@ end Atomix.@atomic hist[idx] += one(Int32) end end -########################################################################################## -# Implementation: distribution -########################################################################################## -distribution( - x::AbstractGPUVector{SVector{N, Float32}}, - y::AbstractGPUVector{SVector{N, Float32}}, - shape::MicrostateShape; - rate::Float32 = 0.05f0, - sampling::SamplingMode = SRandom(rate), - groupsize::Int = 256, - backend = get_backend(x) -) where {N} = distribution(GPUCore(backend, shape, sampling), x, y; groupsize = groupsize) -#......................................................................................... -distribution( - x::AbstractGPUVector{SVector{N, Float32}}, - y::AbstractGPUVector{SVector{N, Float32}}, - expr::RecurrenceExpression, - n::Int; - rate::Float32 = 0.05f0, - sampling::SamplingMode = SRandom(rate), - groupsize::Int = 256, - backend = get_backend(x) -) where {N} = distribution(GPUCore(backend, Rect(expr, n), sampling), x, y; groupsize = groupsize) -#......................................................................................... -distribution( - x::AbstractGPUVector{SVector{N, Float32}}, - y::AbstractGPUVector{SVector{N, Float32}}, - ε::Float32, - n::Int; - rate::Float32 = 0.05f0, - sampling::SamplingMode = SRandom(rate), - groupsize::Int = 256, - backend = get_backend(x), - metric::GPUMetric = GPUEuclidean() -) where {N} = distribution(x, y, Standard(ε; metric = metric), n; rate = rate, sampling = sampling, groupsize = groupsize, backend = backend) -#......................................................................................... -distribution( - x::AbstractGPUVector{SVector{N, Float32}}, - shape::MicrostateShape; - rate::Float32 = 0.05f0, - sampling::SamplingMode = SRandom(rate), - groupsize::Int = 256, - backend = get_backend(x), -) where{N} = distribution(x, x, shape; rate = rate, sampling = sampling, groupsize = groupsize, backend = backend) -#......................................................................................... -distribution( - x::AbstractGPUVector{SVector{N, Float32}}, - expr::RecurrenceExpression, - n::Int; - rate::Float32 = 0.05f0, - sampling::SamplingMode = SRandom(rate), - groupsize::Int = 256, - backend = get_backend(x), -) where{N} = distribution(x, x, expr, n; rate = rate, sampling = sampling, groupsize = groupsize, backend = backend) -#......................................................................................... -distribution( - x::AbstractGPUVector{SVector{N, Float32}}, - ε::Float32, - n::Int; - rate::Float32 = 0.05f0, - sampling::SamplingMode = SRandom(rate), - groupsize::Int = 256, - backend = get_backend(x), - metric::GPUMetric = GPUEuclidean() -) where {N} = distribution(x, x, ε, n; rate = rate, sampling = sampling, groupsize = groupsize, backend = backend, metric = metric) -#......................................................................................... -distribution( - core::GPUCore, - x; - groupsize::Int = 256, -) = distribution(core, x, x; groupsize = groupsize) -#......................................................................................... -""" - distribution(core::GPUCore, [x], [y]; kwargs...) - -Compute an RMA distribution for the input data `[x]` and `[y]` using a GPU backend configuration -defined by `core`, which must be a [`GPUCore`](@ref). - -The inputs `[x]` and `[y]` must be vectors of type `AbstractGPUVector`. This method supports -time-series analysis only. - -!!! note - The resulting distribution is copied from GPU memory back to the CPU. - -### Arguments -- `core`: A [`GPUCore`](@ref) defining the [`MicrostateShape`](@ref), - [`RecurrenceExpression`](@ref), and [`SamplingMode`](@ref) used in the computation. -- `[x]`: Input data provided as an `AbstractGPUVector`. -- `[y]`: Input data provided as an `AbstractGPUVector`. - -### Keyword Arguments -- `groupsize`: Number of threads per GPU workgroup. - -### Examples -```julia -using CUDA -gpudata = StateSpaceSet(Float32.(data)) |> CuVector -core = GPUCore(CUDABackend(), Rect(Standard(0.27f0; metric = GPUEuclidean()), 2), SRandom(0.05)) -dist = distribution(core, gpudata, gpudata) -``` - -!!! warning - Spatial data are not supported by [`GPUCore`](@ref). -""" -function distribution( - core::GPUCore, - x, - y; - groupsize::Int = 256, -) - hist = histogram(core, x, y; groupsize = groupsize) - return Probabilities(hist) -end ########################################################################################## \ No newline at end of file diff --git a/src/core/recurrence_microstates.jl b/src/core/recurrence_microstates.jl index d5aab27..8f0d0d8 100644 --- a/src/core/recurrence_microstates.jl +++ b/src/core/recurrence_microstates.jl @@ -3,6 +3,7 @@ export RecurrenceMicrostates ########################################################################################## # Recurrence Microstate ########################################################################################## +# TODO: Write RecurrenceMicrostates documentation. """ RecurrenceMicrostates """ diff --git a/src/recurrences/corridor.jl b/src/recurrences/corridor.jl index d2358f2..d6b7b27 100644 --- a/src/recurrences/corridor.jl +++ b/src/recurrences/corridor.jl @@ -29,8 +29,8 @@ Corridor(0.05, 0.27) Corridor(0.05, 0.27; metric = Cityblock()) ``` -The recurrence evaluation is performed via the [`recurrence`](@ref) function. For GPU -execution, the corresponding implementation is provided by `gpu_recurrence`. +The recurrence evaluation is performed via the [`recurrence`](@ref) function. +For GPU execution, the corresponding implementation is provided by `gpu_recurrence`. """ struct Corridor{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} ε_min::T diff --git a/src/rqa/det.jl b/src/rqa/det.jl index 9569a3f..15170be 100644 --- a/src/rqa/det.jl +++ b/src/rqa/det.jl @@ -18,6 +18,7 @@ measure(::Determinism, rmspace::RecurrenceMicrostates, dist::Probabilities) ``` ## Arguments +- `rmspace`: A recurrence outcome space. - `dist`: A distribution of recurrence microstates. The distribution must be computed from **square** or **diagonal** microstates of size 3. diff --git a/src/rqa/lam.jl b/src/rqa/lam.jl index 3d4702a..3039b29 100644 --- a/src/rqa/lam.jl +++ b/src/rqa/lam.jl @@ -14,10 +14,11 @@ function. # Using a distribution ```julia -measure(::Laminarity, dist::Probabilities) +measure(::Laminarity, rmspace::RecurrenceMicrostates, dist::Probabilities) ``` ## Arguments +- `rmspace`: A recurrence outcome space. - `dist`: A distribution of recurrence microstates. The distribution must be computed from **square** or **line** microstates of size 3. @@ -29,7 +30,8 @@ A `Float64` corresponding to the estimated laminarity. ```julia using RecurrenceMicrostatesAnalysis, Distributions data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, 0.27, 3) +rmspace = RecurrenceMicrostates(0.27, 3) +dist = probabilities(rmspace, data) lam = measure(Laminarity(), dist) ``` @@ -37,7 +39,8 @@ lam = measure(Laminarity(), dist) ```julia using RecurrenceMicrostatesAnalysis, Distributions data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, Rect(Standard(0.27); rows = 1, cols = 3)) +rmspace = RecurrenceMicrostates(0.27, RectMicrostate(1, 3)) +dist = probabilities(rmspace, data) lam = measure(Laminarity(), dist) ``` diff --git a/src/shapes/diagonal.jl b/src/shapes/diagonal.jl index 45e0b73..892d5f9 100644 --- a/src/shapes/diagonal.jl +++ b/src/shapes/diagonal.jl @@ -17,7 +17,7 @@ where `N` defines the length of the diagonal microstate. # Example ```julia -diagonal = DiagonalMicrostate(expr, 3) +diagonal = DiagonalMicrostate(3) ``` !!! info diff --git a/src/utils/operations/permute_rows.jl b/src/utils/operations/permute_rows.jl index a6789e2..04f5f95 100644 --- a/src/utils/operations/permute_rows.jl +++ b/src/utils/operations/permute_rows.jl @@ -11,7 +11,7 @@ Operation that permutes the rows of a microstate \$\\mathbf{M}\$. To initialize a `PermuteRows` operation, a rectangular microstate shape must be provided via a [`Rect`](@ref) structure: ```julia -PermuteRows(::Rect2Microstate{R, C, B, E}) +PermuteRows(::Rect2Microstate{R, C, B}) ``` # Examples diff --git a/src/utils/operations/transpose.jl b/src/utils/operations/transpose.jl index fcdb5ab..a438d0e 100644 --- a/src/utils/operations/transpose.jl +++ b/src/utils/operations/transpose.jl @@ -11,7 +11,7 @@ Operation that transposes a microstate \$\\mathbf{M}\$. To initialize a `Transpose` operation, a rectangular microstate shape must be provided via a [`Rect`](@ref) structure: ```julia -Transpose(::Rect2Microstate{R, C, B, E}) +Transpose(::Rect2Microstate{R, C, B}) ``` # Examples From 302a73d052c2c0f8858ee77c8deef41b99eacfae Mon Sep 17 00:00:00 2001 From: Datseris Date: Fri, 20 Mar 2026 14:37:13 +0000 Subject: [PATCH 10/19] add complexity measuresnote --- docs/src/tutorial.jl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/src/tutorial.jl b/docs/src/tutorial.jl index 2f5f30e..f99ddef 100644 --- a/docs/src/tutorial.jl +++ b/docs/src/tutorial.jl @@ -5,7 +5,10 @@ # how to optimize our choices regarding the distribution generation, # and how to perform Recurrence Microstate Analysis (RMA). -# But first, we'll start with a crash course! +# !!! info "ComplexityMeasures.jl" +# RecurrenceMicrostatesAnalysis.jl interfaces with, and extends, ComplexityMeasures.jl. +# It can enhance your understanding if you have first view the tutorial of +# ComplexityMeasures.jl. Regardless the current tutorial is written to be self-contained. # ## Crash-course into RMA From 36adda12735e417235c3836d8089e1aaf4c5f505 Mon Sep 17 00:00:00 2001 From: Datseris Date: Fri, 20 Mar 2026 14:38:02 +0000 Subject: [PATCH 11/19] delete tutorial.md --- docs/src/tutorial.md | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 docs/src/tutorial.md diff --git a/docs/src/tutorial.md b/docs/src/tutorial.md deleted file mode 100644 index c0f1a68..0000000 --- a/docs/src/tutorial.md +++ /dev/null @@ -1,14 +0,0 @@ -```@meta -EditURL = "tutorial.jl" -``` - -````@example tutorial -using Distributions, RecurrenceMicrostatesAnalysis -data = rand(Uniform(0, 1), 10_000); -ssset = StateSpaceSet(data) - -ε = 0.27 -N = 2 -dist = distribution(ssset, ε, N) -```` - From c2f02a757f3a417b96807309b85f6735daf7968b Mon Sep 17 00:00:00 2001 From: Datseris Date: Fri, 20 Mar 2026 14:38:15 +0000 Subject: [PATCH 12/19] add tutorial.md in gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 60e1c2a..1d3ba3a 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,5 @@ Manifest.toml *.scss test/*.png test/*.bson -*style.jl \ No newline at end of file +*style.jl +tutorial.md \ No newline at end of file From d8fc97b4404c199245937cef02e091170a02147b Mon Sep 17 00:00:00 2001 From: Datseris Date: Fri, 20 Mar 2026 14:46:27 +0000 Subject: [PATCH 13/19] add examples and API page --- docs/make.jl | 2 ++ docs/src/api.md | 3 --- docs/src/examples.md | 3 +++ docs/src/index.md | 3 +++ docs/src/tutorial.jl | 2 +- 5 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 docs/src/examples.md diff --git a/docs/make.jl b/docs/make.jl index 27d42e2..3ac66b5 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -12,6 +12,7 @@ Literate.markdown("src/tutorial.jl", "src"; credit = false) pages = [ "Welcome" => "index.md", "Tutorial" => "tutorial.md", + "API" => "api.md", # "API" => "api.md", # "Tutorial" => [ # "Distributions" => "tutorial/distributions.md", @@ -21,6 +22,7 @@ pages = [ # "GPU" => "tutorial/gpu.md", # "Utils" => "tutorial/utils.md", # ], + "Examples" => "examples.md", # "Ecosystem Integration" => [ # "Machine Learning" => "examples/ml.md", # ], diff --git a/docs/src/api.md b/docs/src/api.md index 7ac6d30..5e4278f 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -74,6 +74,3 @@ The following must be in the GPUCore struct: !!! warning GPU backends require inputs of type `Float32`. `Float64` inputs are not supported on GPU. - - -# ## \ No newline at end of file diff --git a/docs/src/examples.md b/docs/src/examples.md new file mode 100644 index 0000000..c08ddca --- /dev/null +++ b/docs/src/examples.md @@ -0,0 +1,3 @@ +# Examples for RecurrenceMicrostateAnalysis.jl + +TODO: Move here all code examples that do not need to be in the main tutorial. \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index c35e9e1..4eec86e 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -8,6 +8,9 @@ RecurrenceMicrostatesAnalysis If you find this package useful, please consider giving it a star on GitHub and don't forget to cite [our work](https://doi.org/10.1063/5.0293708). 😉 +To learn how to use the package, go first through the [Tutorial](@ref). +The full functionality is listed in [API](@ref). + ## About the documentation The documentation of **RecurrenceMicrostatesAnalysis.jl** is designed to explain how to use the package while also introducing the theoretical background of the RMA framework. The bibliography used throughout the documentation is listed in the [References](@ref) section; **please remember to cite the appropriate works if you use them**. diff --git a/docs/src/tutorial.jl b/docs/src/tutorial.jl index f99ddef..ce6fcaf 100644 --- a/docs/src/tutorial.jl +++ b/docs/src/tutorial.jl @@ -13,7 +13,7 @@ # ## Crash-course into RMA # Recurrence Plots (RPs) were introduced in 1987 by Eckmann et al. -# [Eckmann1987RP](@cite) as a method for analyzing dynamical systems through recurrence +# [Eckmann1987RP](@cite) as a method for analyzing dynamical systems through recurrence # properties. # Consider a time series $\vec{x}_i \in \mathbb{R}^d$, $i \in \{1, 2, \dots, K\}$, From 5c1dcb417dc64a824aebfdbf54c63ba2f82f0f1c Mon Sep 17 00:00:00 2001 From: Datseris Date: Sun, 22 Mar 2026 13:16:58 +0000 Subject: [PATCH 14/19] add some more stuff in tutorial --- docs/src/tutorial.jl | 30 ++++++++++++++++-------------- src/core/recurrence_microstates.jl | 4 +--- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/src/tutorial.jl b/docs/src/tutorial.jl index ce6fcaf..bb0afbe 100644 --- a/docs/src/tutorial.jl +++ b/docs/src/tutorial.jl @@ -3,7 +3,7 @@ # In this tutorial we go through a typical usage of **RecurrenceMicrostatesAnalysis.jl**. # We'll see how to calculate distributions of recurrence microstates, # how to optimize our choices regarding the distribution generation, -# and how to perform Recurrence Microstate Analysis (RMA). +# and how to perform Recurrence Microstate Analysis (**RMA**). # !!! info "ComplexityMeasures.jl" # RecurrenceMicrostatesAnalysis.jl interfaces with, and extends, ComplexityMeasures.jl. @@ -44,11 +44,12 @@ # ## Probability distributions of recurrence microstates -# Finding and counting microstates in data is straightforward. -# It amounts to passing the input data to the `probabilities` function, -# while specifying the options of the `RecurrenceMicrostates` estimator, +# Extracting propabilities corresponding to recurrence microstates +# is done via the ComplexityMeasures.jl interface and it is relatively straightforward. +# We first specify the options of [`RecurrenceMicrostates`](@ref), # which essentially means e.g., what sort of distance threshold defines a recurrence, -# and what is maximum microstate size to consider. +# and what is maximum microstate size to consider. Then we pass this to functions +# like `probabilities`, `entropy`, etc. # Let's first generate some data of a chaotic map using **DynamicalSystems.jl**: @@ -64,11 +65,8 @@ end u0 = [0.2, 0.3] p0 = [1.4, 0.3] - henon = DeterministicIteratedMap(henon_rule, u0, p0) - -total_time = 10_000 -X, t = trajectory(henon, total_time) +X, t = trajectory(henon, 10_000) X # Notice that `X` is already a [`StateSpaceSet`](@ref). Because **RecurrenceMicrostatesAnalysis.jl** @@ -84,7 +82,7 @@ rmspace = RecurrenceMicrostates(ε, N) # and finally call -probs = probabilities(ospace, X) +probs = probabilities(rmspace, X) # The [`probability`](@ref) function is the same function as in [`ComplexityMeasures`](@ref). # Given an outcome space, that is a way to _symbolize_ input data into discrete outcomes, @@ -126,11 +124,11 @@ entropy(Tsallis(), rmspace, X) # such as laminarity that fundamentally relate with the context of recurrences. # For example, -# These quantities are listed in XXX. - -# Note that if instead of - +# XXX TODO. +# All of these quantities like laminarity are in fact _complexity measures_ +# which is why RecurrenceMicrostateAnalysis.jl fits so well within the +# interface of ComplexityMeasures.jl. # ## Optimizing recurrence specification @@ -144,6 +142,8 @@ rmspace = RecurrenceMicrostates(ε, N) h = entropy(Shannon(), rmspace, X) (h, S) +# TODO: The two numbers reported above are not the same. +# Perhaps the logarithm base is off? # ## Custom specification of recurrence microstates @@ -177,5 +177,7 @@ probabilities(rmspace, X, Y) # This augmentation from one to two input data # works for all functions discussed in this tutorial. +# Coincidentally, the same extension of `probabilities` to multivariate data +# is done in [Associations.jl](https://juliadynamics.github.io/Associations.jl/stable/). # ## Spatial data diff --git a/src/core/recurrence_microstates.jl b/src/core/recurrence_microstates.jl index 8f0d0d8..041dfef 100644 --- a/src/core/recurrence_microstates.jl +++ b/src/core/recurrence_microstates.jl @@ -7,7 +7,7 @@ export RecurrenceMicrostates """ RecurrenceMicrostates """ -struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} <: ComplexityMeasures.CountBasedOutcomeSpace +struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} <: ComplexityMeasures.CountBasedOutcomeSpace shape::MS expr::RE sampling::SM @@ -63,5 +63,3 @@ function RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShap expr = Corridor(ε_min, ε_max; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end - -########################################################################################## \ No newline at end of file From c1a28f32999ed65f2325650451741d92907ddeb5 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Sat, 4 Apr 2026 14:35:41 -0300 Subject: [PATCH 15/19] update rqa backend (it'll break CI) --- Project.toml | 4 +- src/RecurrenceMicrostatesAnalysis.jl | 2 +- src/core/measures.jl | 39 --- src/core/optimize.jl | 14 +- src/rqa/det.jl | 179 ++++++------ src/rqa/disorder.jl | 401 ++++++++++++++++++--------- src/rqa/entropy.jl | 70 ----- src/rqa/lam.jl | 174 ++++++------ src/rqa/rqa.jl | 60 +++- src/rqa/rr.jl | 116 ++++---- src/utils/opt/threshold.jl | 98 +++---- 11 files changed, 617 insertions(+), 540 deletions(-) delete mode 100644 src/core/measures.jl delete mode 100644 src/rqa/entropy.jl diff --git a/Project.toml b/Project.toml index 2bb9193..4a52c7b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "RecurrenceMicrostatesAnalysis" uuid = "cb83a08b-85c6-4e94-91aa-4e946c7d4f0c" -repo = "https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl" version = "0.4.0" +repo = "https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" @@ -15,6 +15,7 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" StateSpaceSets = "40b095a5-5852-4c12-98c7-d43bf788e795" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [compat] Adapt = "4.4.0" @@ -28,6 +29,7 @@ Random = "1.10.9" Reexport = "1.2.2" StateSpaceSets = "2.5.3" StaticArrays = "1.9.15" +Statistics = "1.11.1" julia = "1.8" [extras] diff --git a/src/RecurrenceMicrostatesAnalysis.jl b/src/RecurrenceMicrostatesAnalysis.jl index b87e8bf..45ed34b 100644 --- a/src/RecurrenceMicrostatesAnalysis.jl +++ b/src/RecurrenceMicrostatesAnalysis.jl @@ -17,6 +17,7 @@ using KernelAbstractions using Random using Reexport using StaticArrays +using Statistics @reexport using Adapt @reexport using ComplexityMeasures @@ -37,7 +38,6 @@ include("core/recurrence_microstates.jl") include("core/cpu_core.jl") include("core/gpu/gpu_core.jl") -include("core/measures.jl") include("core/optimize.jl") include("core/operation.jl") diff --git a/src/core/measures.jl b/src/core/measures.jl deleted file mode 100644 index 5b75c00..0000000 --- a/src/core/measures.jl +++ /dev/null @@ -1,39 +0,0 @@ -export QuantificationMeasure, measure - -########################################################################################## -# Type: Quantification Measure -########################################################################################## -""" - QuantificationMeasure - -Abstract supertype defining an RQA or RMA quantification measure. - -All quantifiers implemented in the package subtype `QuantificationMeasure` and define their -computation via the [`measure`](@ref) function. - -# Implementations -- [`Determinism`](@ref) -- [`Disorder`](@ref) -- [`Laminarity`](@ref) -- [`RecurrenceEntropy`](@ref) -- [`RecurrenceRate`](@ref) -""" -abstract type QuantificationMeasure end - -########################################################################################## -# Implementation: measure -########################################################################################## -""" - measure(qm::QuantificationMeasure, [...]) - -Compute the quantification measure defined by the given [`QuantificationMeasure`](@ref) instance. - -The accepted arguments (`[...]`) depend on the specific quantifier implementation. -""" -function measure(ms::QuantificationMeasure) - T = typeof(ms) - msg = "`measures` not implemented without arguments for $T." - throw(ArgumentError(msg)) -end - -########################################################################################## \ No newline at end of file diff --git a/src/core/optimize.jl b/src/core/optimize.jl index b4b5a87..952f29b 100644 --- a/src/core/optimize.jl +++ b/src/core/optimize.jl @@ -6,9 +6,9 @@ export Parameter, optimize """ Parameter -Abstract supertype for free parameters that can be optimized using RMA. +Abstract supertype for free parameters that can be optimized using **Recurrence Microstates Analysis**. -# Implementations +## Implementations - [`Threshold`](@ref) """ abstract type Parameter end @@ -17,17 +17,17 @@ abstract type Parameter end # Implementation: optimize ########################################################################################## """ - optimize(param::Parameter, qm::QuantificationMeasure, args...) + optimize(param::Parameter, args...) -Optimize a free [`Parameter`](@ref) using the specified [`QuantificationMeasure`](@ref). +Optimize a free [`Parameter`](@ref) using a specific complexity measure. -!!! warning +!!! warning "Performance" The `optimize` function may compute multiple distributions and can be computationally expensive. Avoid calling it inside performance-critical loops. """ -function optimize(param::Parameter, qm::QuantificationMeasure) +function optimize(param::Parameter) T = typeof(param) - msg = "`optimize` not implemented without arguments for $T using the quantification measure $(typeof(qm))." + msg = "`optimize` not implemented without arguments for $T." throw(ArgumentError(msg)) end diff --git a/src/rqa/det.jl b/src/rqa/det.jl index 15170be..4251c9e 100644 --- a/src/rqa/det.jl +++ b/src/rqa/det.jl @@ -1,125 +1,138 @@ -export Determinism +export RecurrenceDeterminism ########################################################################################## -# Quantification Measure: RecurrenceRate +# Quantification Measure: Determinism +# Complexity Measure Implementation ########################################################################################## """ - Determinism <: QuantificationMeasure + RecurrenceDeterminism <: ComplexityEstimator + RecurrenceDeterminism(ε::Float64; kwargs...) -Define the *Determinism* (DET) quantification measure. +An estimator of recurrence determinism, used with [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity). +Determinism is estimated for a threshold `ε`. -DET can be computed either from a distribution of recurrence microstates or directly from -time-series data. In both cases, the computation is performed via the [`measure`](@ref) -function. +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +- `ratio`: The sampling ratio. The default is `0.1`. +- `sampling`: The sampling mode. The default is [`SRandom`](@ref). -# Using a distribution -```julia -measure(::Determinism, rmspace::RecurrenceMicrostates, dist::Probabilities) +## Description + +Recurrence determinism (DET) is defined as [Webber2015Recurrence](@cite) +```math +DET = \\frac{\\sum_{l=l_{min}}^{K} l H_D(l)}{\\sum_{i,j=1}^{K} r_{i,j}}, +``` +where \$H_D(l)\$ is the histogram of diagonal line lengths: +```math +H_D(l) = \\sum_{i,j=1}^{K} (1 - r_{i-1, j-1})(1 - r_{i+l,j+l})\\prod_{k=0}^{l-1} r_{i+k,j+k}. +``` +By inverting the determinism expression, we can rewrite it as [daCruz2025RQAMeasures](@cite) +```math +DET = 1 - \\frac{1}{K^2 \\sum_{i,j=1}^{K} r_{i,j}} \\sum_{l=1}^{l_{min} - 1} l H_D(l). ``` -## Arguments -- `rmspace`: A recurrence outcome space. -- `dist`: A distribution of recurrence microstates. The distribution must be computed from - **square** or **diagonal** microstates of size 3. - -## Returns -A `Float64` corresponding to the estimated determinism. - -## Examples -### Using square microstates -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -rmspace = RecurrenceMicrostates(0.27, 3) -dist = probabilities(rmspace, data) -det = measure(Determinism(), rmspace, dist) +An approximate value for DET can be estimated using recurrence microstates, as introduced by +da Cruz et al. [daCruz2025RQAMeasures](@cite). From an input dataset `x`, we estimate a +recurrence microstate distribution \$\\vec{p}\$. This distribution must be defined over +square microstates of size \$3 \\times 3\$. Here, we use the relation: +```math +\\frac{H_D(l)}{(K-l-1)^2} = \\vec{d}^{(l)} \\cdot \\mathcal{R}^{(l + 2)}\\vec{p}^{(l + 2)}. ``` -### Using diagonal microstates -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -rmspace = RecurrenceMicrostates(0.27, Diagonal(3)) -dist = probabilities(rmspace, data) -det = measure(Determinism(), rmspace, dist) +For the commonly used case \$l_{min} = 2\$, this leads to the approximation +```math +DET \\approx 1 - \\frac{\\vec{d}^{(1)}\\cdot\\mathcal{R}^{(3)}\\vec{p}^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. ``` -# Using a time series -```julia -measure(::Determinism, [x]; kwargs...) +The correlation term \$\\vec{d}^{(1)} \\cdot \\mathcal{R}^{(3)} \\vec{p}^{(3)}\$ can be +simplified by explicitly identifying the microstates selected by \$\\vec{d}^{(1)}\$. This corresponds +to selecting only microstates of the form: +```math +\\begin{pmatrix} +\\xi & \\xi & 0 \\\\ +\\xi & 1 & \\xi \\\\ +0 & \\xi & \\xi +\\end{pmatrix}, +``` +where \$\\xi\$ denotes an unconstrained entry. There are 64 microstates with this structure among +the 512 possible \$3 \\times 3\$ microstates. Defining the class \$C_D\$ as the set of microstates +with this structure, DET can be estimated as: +```math +DET \\approx 1 - \\frac{\\sum_{i\\in C_D} p_i^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. ``` -## Arguments -- `[x]`: Time-series data provided as a [`StateSpaceSet`](@ref). +The implementation used by [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity) +is an optimized version of this process using [`DiagonalMicrostate`](@ref) [Ferreira2025RMALib](@cite). +Since this microstate shape is symmetric with respect to the desired information, it is not necessary to account for +\$\\xi\$ values as in the square microstate case. Thus, determinism can be estimated as +```math +DET \\approx 1 - \\frac{p_3^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}, +``` +where \$p_3^{(3)}\$ is the probability of observing the microstate \$0~1~0\$. -## Returns -A `Float64` corresponding to the estimated determinism. +!!! note "Performance" + Although estimating DET using RMA is faster than typical RQA computation, + the precision depends on the time series length. Therefore, for small time series, + i.e., \$K \\leq 1000\$, we strongly recommend using standard RQA with + [RecurrenceAnalysis.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/quantification/#RecurrenceAnalysis.determinism). +""" +struct RecurrenceDeterminism <: ComplexityEstimator + ε::Float64 + metric::Metric + sampling::SamplingMode +end -## Keyword Arguments -- `threshold`: Threshold used to compute the RMA distribution. By default, this is chosen as - the threshold that maximizes the recurrence microstate entropy (RME). +function complexity( + c::RecurrenceDeterminism, + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}}, + y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; + ) -### Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -det = measure(Determinism(), data) -``` + rmspace = RecurrenceMicrostates(c.ε, DiagonalMicrostate(3); metric = c.metric, sampling = c.sampling) + probs = probabilities(rmspace, x, y) + return measure(c, rmspace, probs) +end -!!! note - When time-series data are provided directly, RecurrenceMicrostatesAnalysis.jl uses [`Diagonal`](@ref) microstates by default. -""" -struct Determinism <: QuantificationMeasure end +# -- Constructors +RecurrenceDeterminism(ε; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceDeterminism(ε, metric, sampling) ########################################################################################## -# Implementation: measure +# Internal: measure from probabilities ########################################################################################## -# Using as input a RMA distribution. -#......................................................................................... -function measure( - ::Determinism, - rmspace::RecurrenceMicrostates, - dist::Probabilities - ) - if (rmspace.shape isa Rect2Microstate{3, 3, 2} && length(dist) == 512) - rr = measure(RecurrenceRate(), dist) +# This is an internal function which estimates the determinism from a recurrence microstate +# outcome space, using a given probability distribution that was computed from this +# outcome space. + +# This function works for [`DiagonalMicrostate`](@ref) with length 3, +# or \$3 \\times 3\$ [`RectMicrostate`](@ref). Any other input will returns an error. +########################################################################################## +function measure(c::RecurrenceDeterminism, rmspace::RecurrenceMicrostates, probs::Probabilities) + rrc = RecurrenceRate(c.ε; metric = c.metric, sampling = c.sampling) + if (rmspace.shape isa Rect2Microstate{3, 3, 2} && length(probs) == 512) + rr = measure(rrc, probs) values = zeros(Int, 64) v_idx = 1 - for a1 in 0:1, a2 in 0:1, a3 in 0:1, a4 in 0:1, a5 in 0:1, a6 in 0:1 + for a1 ∈ 0:1, a2 ∈ 0:1, a3 ∈ 0:1, a4 ∈ 0:1, a5 ∈ 0:1, a6 ∈ 0:1 I_1 = 2 * a1 + 4 * a2 + 8 * a3 + 16 + 32 * a4 + 64 * a5 + 128 * a6 values[v_idx] = I_1 + 1 v_idx += 1 end pl = 0.0 - for i in values - pl += dist[i] + for i ∈ values + pl += probs[i] end return 1 - ((1/rr) * pl) - elseif (rmspace.shape isa DiagonalMicrostate{3, 2} && length(dist) == 8) - rr = measure(RecurrenceRate(), dist) - return 1 - ((1/rr) * dist[3]) + elseif (rmspace.shape isa DiagonalMicrostate{3, 2} && length(probs) == 8) + rr = measure(rrc, probs) + return 1 - ((1/rr) * probs[3]) else msg = "Determinism must be computed using square or diagonal microstates with n = 3." throw(ArgumentError(msg)) end end -#......................................................................................... -# Using as input a time series -#......................................................................................... -function measure( - op::Determinism, - x::Union{StateSpaceSet, Vector{<:Real}}, - y::Union{StateSpaceSet, Vector{<:Real}} = x; - threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, 3)[1], - metric::Metric = DEFAULT_METRIC - ) - rmspace = RecurrenceMicrostates(threshold, DiagonalMicrostate(3); metric = metric) - dist = probabilities(rmspace, x, y) - measure(op, rmspace, dist) -end ########################################################################################## \ No newline at end of file diff --git a/src/rqa/disorder.jl b/src/rqa/disorder.jl index cd53a99..284700e 100644 --- a/src/rqa/disorder.jl +++ b/src/rqa/disorder.jl @@ -1,191 +1,318 @@ -export Disorder +export Disorder, WindowedDisorder ########################################################################################## -# Quantification Measure: Disorder +# Quantification Measure: Determinism +# Complexity Measure Implementation +########################################################################################## +# Here we have four disorder definitions: +# - `Disorder`: it is the typical Disorder quantifier (https://doi.org/10.1103/1y98-x33s). +# Computed by maximizing the `PartialDisorder`. +# +# - `PartialDisorder`: it is an internal definition, which computes the entropy associated +# wit disorder for an specific threshold. It is computed using the disorder for all +# classes, which are computed using `ClassPartialDisorder`. +# +# - `ClassPartialDisorder`: it is the entropy quantity associated with disorder for an +# specific threshold and class of microstates. We use it to compute `PartialDisorder` disorder. +# +# - `WindowedDisorder`: it is same that `Disorder`, but spliting data in windows with +# a fixed length. ########################################################################################## """ - Disorder{N} <: QuantificationMeasure - -Define the *Disorder* quantification measure for microstates of size `N` [Flauzino2025Disorder](@cite). + Disorder{N} <: ComplexityEstimator + Disorder(N::Int = 4; kwargs...) + +An estimator of a disorder measure, introduced by [Flauzino2025Disorder](@cite), used with [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity). +It uses \$N \\times N\$ microstates and a specified `metric` to compute the disorder. + +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +- `range_len::Int`: The number of threshold values used to estimate the disorder. + +## Description +Disorder, or the disorder index via symmetry in recurrence microstates (DISREM), is based on the +implications of the disorder condition in microstates. It quantifies disorder through the +information entropy of classes of recurrence microstates, which are required to be equiprobable +within the same class according to the disorder condition. + +Let \$\\sigma\$ be a permutation, and \$S_N\$ the set of all permutations of \$N\$ elements, with +\$\\sigma \\in S_N\$. Also, let \$\\mathcal{L}_\\sigma\$ be the operator that permutes the rows of a +matrix, and \$\\mathcal{T}\$ the operator that transposes a matrix. A recurrence microstate class +is defined as [Flauzino2025Disorder](@cite): +```math +\\mathscr{M}_a (\\mathbf{M}) = \\bigcup_{\\sigma_i,\\sigma_j \\in S_N} \\{ \\mathcal{L}_{\\sigma_j}\\mathcal{T}\\mathcal{L}_{\\sigma_i}\\mathbf{M},\\quad\\mathcal{T}\\mathcal{L}_{\\sigma_j}\\mathcal{T}\\mathcal{L}_{\\sigma_i}\\mathbf{M} \\}. +``` -The `Disorder` struct stores a set of `labels` that identify the microstates belonging to -each equivalence class \$\\mathcal{M}_a\$. +Let \$p(\\mathbf{M})\$ be the probability of the microstate \$\\mathbf{M}\$. We renormalize this probability +with respect to its class: +```math +p^{(a)}(\\mathbf{M}) = \\frac{p(\\mathbf{M})}{\\sum_{\\mathbf{M}' \\in \\mathscr{M}_a} p(\\mathbf{M}')}. +``` -# Constructor -```julia -Disorder(N) +The normalized information entropy associated with the probability distribution of microstates in the class +\$\\mathscr{M}_a\$ is then defined as [Flauzino2025Disorder](@cite): +```math +\\xi_a(\\varepsilon) = \\frac{1}{m_a} \\sum_{\\mathbf{M} \\in \\mathscr{M}_a} p^{(a)}(\\mathbf{M}) \\ln p^{(a)}(\\mathbf{M}), ``` -Here, \$N\$ must be equal to 2, 3, 4, or 5. Computing disorder for larger values of \$N\$ is -currently not supported, as it would require a prohibitive amount of memory with the -current implementation. +where \$m_a\$ is the number of microstates in the class \$\\mathscr{M}_a\$. -The computation of *Disorder* is performed via the [`measure`](@ref) function: -```julia -measure(settings::Disorder{N}, [x]; kwargs...) +By summing the entropy over all classes and normalizing by its maximum amplitude \$A\$, we obtain the total entropy +across all classes: +```math +\\xi(\\varepsilon) = \\frac{1}{A}\\sum_{a = 1}^{A} \\xi_a(\\varepsilon). ``` -# Arguments -- `[x]`: Time-series data provided as a [`StateSpaceSet`](@ref). - -# Returns -A `Float64` corresponding to the disorder value (\$\\Xi\$). - -# Keyword Arguments -- `th`: Reference threshold used to maximize disorder. To improve computational performance, - this value limits the search range of thresholds. By default, it is set to the threshold - that maximizes disorder for a sampling rate of \$5%\$. -- `th_min`: Minimum threshold defining the search range. By default, this is set to `0.85 * th`. -- `th_max`: Maximum threshold defining the search range. By default, this is set to `1.25 * th`. -- `num_tests`: Number of threshold values evaluated within the specified range. The default value is `40`. - -# Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -disrem = measure(Disorder(4), data) +The disorder measure is then defined as the maximum value of \$\\xi(\\varepsilon)\$ [Flauzino2025Disorder](@cite): +```math +\\Xi = \\max_{\\varepsilon} \\xi(\\varepsilon). ``` + +!!! compat "Microstate size and shape" + Disorder is only defined for square microstates, with computations available for + \$N \\in \\{2, 3, 4, 5\\}\$ due to computational limitations. In particular, + computations for \$N = 5\$ are computationally expensive. """ -struct Disorder{N} <: QuantificationMeasure +struct Disorder{N} <: ComplexityEstimator labels::Vector{Vector{Int}} + metric::Metric + range_len::Int end -#......................................................................................... -Disorder(N::Int = 3) = 1 < N < 6 ? Disorder{N}(compute_labels(N)) : throw(ArgumentError("Disorder not implemented for N ≤ 1 or N ≥ 6.")) -########################################################################################## -# Implementation: measure -########################################################################################## -function measure(settings::Disorder{N}, class::Int, probs::Probabilities) where {N} - norm_factor = 0.0 - labels = settings.labels[class] - @inbounds @simd for i in labels - norm_factor += probs[i] +""" + WindowedDisorder{N, W} <: ComplexityEstimator + WindowedDisorder(W::Int, N::Int = 4; kwargs...) + +This estimator is equivalent to [`Disorder`](@ref), but computes it by splitting the data into windows of +length `W`, returning a vector of measured disorder values for each window. + +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +- `range_len::Int`: The number of threshold values used to estimate disorder. +- `step::Int`: The step between windows. The default is `W`. +""" +struct WindowedDisorder{W, N} <: ComplexityEstimator + labels::Vector{Vector{Int}} + metric::Metric + range_len::Int + step::Int +end + +struct PartialDisorder{N} <: ComplexityEstimator + labels::Vector{Vector{Int}} + rmspace::RecurrenceMicrostates +end + +struct ClassPartialDisorder <: ComplexityEstimator + labels::Vector{Int} + rmspace::RecurrenceMicrostates +end + +function complexity( + c::Disorder{N}, + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}} + ) where {N} + + data = x isa Vector ? StateSpaceSet(x) : x + data_cpu = data isa AbstractGPUVector ? data |> Vector |> StateSpaceSet : data + + # Define the disorder range. + th = optimize(Threshold(), c, data_cpu)[1] + th_min = 0.7 * th + th_max = 1.3 * th + + # Prepare to compute. + ξ = zeros(Float64, c.range_len) + th_range = range(th_min, th_max, c.range_len) + + # For GPU + if (data isa AbstractGPUVector) + th_range = Float32.(th_range) end - - if (norm_factor == 0.0) - return 0.0 + + # Compute disorder for each threshold. + for i ∈ eachindex(th_range) + rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = c.metric) + partial = PartialDisorder{N}(c.labels, rmspace) + ξ[i] = complexity(partial, data) end - s = 0.0 - @inbounds @simd for i in labels - p = probs[i] / norm_factor - s += p * log(p + eps()) + return maximum(ξ) +end + +function complexity( + c::WindowedDisorder{W, N}, + x::Union{StateSpaceSet, Vector{<:Real}} + ) where {N, W} + + windowed_data = [ StateSpaceSet(x[(i + 1):(i + W)]) for i ∈ 0:c.step:(size(x, 1) - W) ] + + # We need to define the threshold range here. + s = ceil(Int, length(windowed_data) * 0.1) + opt_ths = zeros(Float64, s) + + for i ∈ eachindex(s) + idx = rand(1:length(windowed_data)) + opt_ths[i] = optimize(Threshold(), Disorder{N}(c.labels, c.metric, c.range_len), windowed_data[idx])[1] end - s *= -1 - s /= log(length(labels)) + μ_th = mean(opt_ths) + σ_th = std(opt_ths) - return s -end -#......................................................................................... -function measure(settings::Disorder{N}, probs::Probabilities, norm_param::Int) where {N} - total_entropy = 0.0 - for c in 2:length(settings.labels) - 1 - total_entropy += measure(settings, c, probs) + th_min = μ_th - 1.5 * σ_th + th_min = th_min <= 0 ? 1e-16 : th_min + th_max = μ_th + 1.5 * σ_th + if th_max < th_min + a = th_min + th_min = th_max + th_max = a + end + + # Prepare to compute. + ξ = zeros(Float64, length(windowed_data), c.range_len) + th_range = range(th_min, th_max, c.range_len) + + # Finally, compute disorder for each window (note that it isn't just use "complexity(disorder, x)") + for j ∈ eachindex(th_range) + rmspace = RecurrenceMicrostates(th_range[j], N; sampling = Full(), metric = c.metric) + partial = PartialDisorder{N}(c.labels, rmspace) + + for i ∈ eachindex(windowed_data) + ξ[i, j] = complexity(partial, windowed_data[i]) + end end - return total_entropy / norm_param + return [ maximum(ξ[i, :]) for i ∈ eachindex(windowed_data) ] end -#......................................................................................... -function measure( - settings::Disorder{N}, - x::Union{StateSpaceSet, Vector{<:Real}}, - y::Union{StateSpaceSet, Vector{<:Real}} = x; - th::Float64 = optimize(Threshold(), Disorder(N), x)[1], - th_min::Float64 = 0.85 * th, - th_max::Float64 = 1.25 * th, - num_tests::Int = 40, - metric::Metric = DEFAULT_METRIC - ) where {N} - A = get_disorder_norm_factor(settings, x) - Ay = get_disorder_norm_factor(settings, y) - A = Ay > A ? Ay : A - values = zeros(typeof(th), num_tests) - th_range = range(th_min, th_max, num_tests) +function complexity( + c::WindowedDisorder{W, N}, + x::AbstractGPUVector{SVector{D, Float32}} + ) where {N, W, D} + + # GPU Settings + backend = KernelAbstractions.get_backend(x) + + # Here we have a small issue: we need to move data from GPU to CPU again, split it and + # then move back to the GPU... I really don't have any idea about how to create windows + # directly in GPU >.< + # And we also need to compute threshold range =/ + data = x |> Vector + windowed_data = [ StateSpaceSet(data[(i + 1):(i + W)]) for i ∈ 0:c.step:(size(x, 1) - W) ] + windowed_gpu = map(windowed_data) do w + w_vec = w |> Vector + gw = KernelAbstractions.allocate(backend, eltype(w_vec), size(w_vec)) + copyto!(gw, w_vec) + gw + end + + # We need to define the threshold range here. + s = ceil(Int, length(windowed_data) * 0.1) + opt_ths = zeros(Float64, s) - for i in eachindex(th_range) - rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = metric) - probs = probabilities(rmspace, x, y) - values[i] = measure(settings, probs, A) + for i ∈ eachindex(s) + idx = rand(1:length(windowed_data)) + opt_ths[i] = optimize(Threshold(), Disorder{N}(c.labels, c.metric, c.range_len), windowed_data[idx])[1] end - return maximum(values) -end -#......................................................................................... -function measure( - settings::Disorder{N}, - dataset::Vector{<:AbstractGPUVector{SVector{D, Float32}}}, - th_min::Float32, - th_max::Float32; - num_tests::Int = 40, - metric::GPUMetric = GPUEuclidean() - ) where {N, D} - A = _norm_factor(Val(N), Val(D)) - values = zeros(Float32, num_tests, length(dataset)) - th_range = Float32.(range(th_min, th_max, num_tests)) - backend = get_backend(dataset[1]) + μ_th = mean(opt_ths) + σ_th = std(opt_ths) - for i ∈ eachindex(th_range) - rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = metric, core = GPUCore(backend)) - for j in eachindex(dataset) - probs = probabilities(rmspace, dataset[j]) - values[i, j] = measure(settings, probs, A) + th_min = μ_th - σ_th + th_min = th_min <= 0 ? 1e-16 : th_min + th_max = μ_th + σ_th + if th_max < th_min + a = th_min + th_min = th_max + th_max = a + end + + # Prepare to compute. + ξ = zeros(Float64, length(windowed_gpu), c.range_len) + th_range = Float32.(range(th_min, th_max, c.range_len)) + + # Finally, compute disorder for each window (note that it isn't just use "complexity(disorder, x)") + for j ∈ eachindex(th_range) + rmspace = RecurrenceMicrostates(th_range[j], N; sampling = Full(), metric = c.metric) + partial = PartialDisorder{N}(c.labels, rmspace) + + for i ∈ eachindex(windowed_gpu) + ξ[i, j] = complexity(partial, windowed_gpu[i]) end end - results = zeros(Float32, length(dataset)) + return [ maximum(ξ[i, :]) for i ∈ eachindex(windowed_data) ] +end + +function complexity( + c::PartialDisorder{N}, + x::Union{StateSpaceSet{D, T, V}, <:AbstractGPUVector{SVector{D, Float32}}} + ) where {N, D, T, V} - for i ∈ eachindex(dataset) - results[i] = maximum(values[:, i]) + A = _norm_factor(Val(N), Val(D)) + probs = probabilities(c.rmspace, x) + ξ = 0.0 + for i ∈ 2:(length(c.labels) - 1) + cpartial = ClassPartialDisorder(c.labels[i], c.rmspace) + ξ += measure(cpartial, probs) end - return results + return ξ / A end -#......................................................................................... -function measure( - settings::Disorder{N}, - dataset::Vector{StateSpaceSet}, - th_min::Float64, - th_max::Float64; - num_tests::Int = 40, - metric::Metric = DEFAULT_METRIC - ) where {N} - A = get_disorder_norm_factor(settings, dataset[1]) - values = zeros(Float64, num_tests, length(dataset)) - th_range = range(th_min, th_max, num_tests) - for i ∈ eachindex(th_range) - rmspace = RecurrenceMicrostates(th_range[i], N; sampling = Full(), metric = metric) - for j in eachindex(dataset) - probs = probabilities(rmspace, dataset[j]) - values[i, j] = measure(settings, probs, A) - end +function complexity( + c::ClassPartialDisorder, + x::Union{StateSpaceSet, <:AbstractGPUVector{<:SVector}} + ) + probs = probabilities(c.rmspace, x) + return measure(c, probs) +end + +# -- Constructors +Disorder(N::Int = 4; metric::Metric = DEFAULT_METRIC, range_len::Int = 40) = Disorder{N}(compute_labels(N), metric, range_len) +WindowedDisorder(W::Int, N::Int = 4; metric::Metric = DEFAULT_METRIC, range_len::Int = 40, step::Int = W) = WindowedDisorder{W,N}(compute_labels(N), metric, range_len, step) +PartialDisorder(rexpr::RecurrenceExpression, N::Int = 4) = PartialDisorder{N}(compute_labels(N), RecurrenceMicrostates(rexpr, N; sampling = Full())) +ClassPartialDisorder(rexpr::RecurrenceExpression, c::Int, N::Int = 4) = ClassPartialDisorder(compute_labels(N)[c], RecurrenceMicrostates(rexpr, N; sampling = Full())) + +########################################################################################## +# Internal: measure from probabilities +########################################################################################## +# This is an internal function which estimates the determinism from a recurrence microstate +# outcome space, using a given probability distribution that was computed from this +# outcome space. + +# This function works for [`DiagonalMicrostate`](@ref) with length 3, +# or \$3 \\times 3\$ [`RectMicrostate`](@ref). Any other input will returns an error. +########################################################################################## +function measure(c::ClassPartialDisorder, probs::Probabilities) + + norm_factor = 0.0 + @inbounds @simd for i ∈ c.labels + norm_factor += probs[i] end - results = zeros(Float32, length(dataset)) + if (norm_factor <= 0.0) + return 0.0 + end - for i ∈ eachindex(dataset) - results[i] = maximum(values[:, i]) + ξ = 0.0 + @inbounds @simd for i ∈ c.labels + p = probs[i] / norm_factor + ξ += p * log(p + eps()) end - return results + ξ *= -1 / log(length(c.labels)) + return ξ end ########################################################################################## -# Utils +# Internal: Aux. functions ########################################################################################## -get_disorder_norm_factor(::Disorder{N}, ::StateSpaceSet{D, T, V}) where {N, D, T, V} = _norm_factor(Val(N), Val(D)) -#......................................................................................... _norm_factor(::Val{2}, ::Val{D}) where D = 4 _norm_factor(::Val{3}, ::Val{D}) where D = D > 1 ? 24 : 23 _norm_factor(::Val{4}, ::Val{D}) where D = D > 1 ? 190 : 145 _norm_factor(::Val{5}, ::Val{D}) where D = D > 1 ? throw(ArgumentError("Disorder not implemented using N = 5 for data with more than one dimension.")) : 1173 -########################################################################################## -# Compute labels -########################################################################################## -function compute_labels(N::Int) - S = collect(permutations(1:N)) +function compute_labels(N::Int; S = collect(permutations(1:N))) shape = RectMicrostate(N) row_permutation = PermuteRows(shape) diff --git a/src/rqa/entropy.jl b/src/rqa/entropy.jl deleted file mode 100644 index f91e537..0000000 --- a/src/rqa/entropy.jl +++ /dev/null @@ -1,70 +0,0 @@ -export RecurrenceEntropy - -########################################################################################## -# Quantification Measure: RecurrenceRate -########################################################################################## -""" - RecurrenceEntropy <: QuantificationMeasure - -Define the *Recurrence Microstates Entropy* (RME) quantification measure [Corso2018Entropy](@cite). - -RME can be computed either from a distribution of recurrence microstates or directly from -time-series data. In both cases, the computation is performed via the [`measure`](@ref) -function. - -# Using a distribution -```julia -measure(::RecurrenceEntropy, dist::Probabilities) -``` -## Arguments -- `dist`: A distribution of recurrence microstates. - -## Returns -A `Float64` corresponding to the RME computed using the Shannon entropy. - -### Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, 0.27, 3) -rme = measure(RecurrenceEntropy(), dist) -``` - -# Using a time series -```julia -measure(::RecurrenceEntropy, [x]; kwargs...) -``` -## Arguments -- `[x]`: Time-series data provided as an [`StateSpaceSet`](@ref). - -## Returns -A `Float64` corresponding to the **maximum** RME computed using the Shannon entropy. - -## Keyword Arguments -- `N`: Integer defining the microstate size. The default value is `3`. - -### Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -rme = measure(RecurrenceEntropy(), data; N = 4) -``` -""" -struct RecurrenceEntropy <: QuantificationMeasure end - -########################################################################################## -# Implementation: measure -########################################################################################## -# Using as input a RMA distribution. -#......................................................................................... -function measure(::RecurrenceEntropy, dist::Probabilities) - return entropy(Shannon(; base = MathConstants.e), dist) -end -#......................................................................................... -# Using as input a time series -#......................................................................................... -function measure(::RecurrenceEntropy, x::Union{StateSpaceSet, Vector{<:Real}}; N::Integer = 3) - return optimize(Threshold(), RecurrenceEntropy(), x, N)[2] -end - -########################################################################################## \ No newline at end of file diff --git a/src/rqa/lam.jl b/src/rqa/lam.jl index 3039b29..6d1873c 100644 --- a/src/rqa/lam.jl +++ b/src/rqa/lam.jl @@ -1,89 +1,115 @@ -export Laminarity +export RecurrenceLaminarity ########################################################################################## # Quantification Measure: Laminarity +# Complexity Measure Implementation ########################################################################################## """ - Laminarity <: QuantificationMeasure + RecurrenceLaminarity <: ComplexityEstimator + RecurrenceLaminarity(ε::Float64; kwargs...) -Define the *Laminarity* (LAM) quantification measure. +An estimator of recurrence laminarity, used with [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity). +Laminarity is estimated for a threshold `ε`. -LAM can be computed either from a distribution of recurrence microstates or directly from -time-series data. In both cases, the computation is performed via the [`measure`](@ref) -function. +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +- `ratio`: The sampling ratio. The default is `0.1`. +- `sampling`: The sampling mode. The default is [`SRandom`](@ref). -# Using a distribution -```julia -measure(::Laminarity, rmspace::RecurrenceMicrostates, dist::Probabilities) +## Description + +Recurrence laminarity (LAM) is defined as [Webber2015Recurrence](@cite) +```math +LAM = \\frac{\\sum_{l=l_{min}}^{K} l H_V(l)}{\\sum_{i,j=1}^{K} r_{i,j}}, +``` +where \$H_V(l)\$ is the histogram of vertical line lengths: +```math +H_V(l) = \\sum_{i,j=1}^{K} (1 - r_{i, j-1})(1 - r_{i,j+l})\\prod_{k=0}^{l-1} r_{i,j+k}. +``` +By inverting the laminarity expression, we can rewrite it as [daCruz2025RQAMeasures](@cite) +```math +LAM = 1 - \\frac{1}{K^2 \\sum_{i,j=1}^{K} r_{i,j}} \\sum_{l=1}^{l_{min} - 1} l H_V(l). ``` -## Arguments -- `rmspace`: A recurrence outcome space. -- `dist`: A distribution of recurrence microstates. The distribution must be computed from - **square** or **line** microstates of size 3. - -## Returns -A `Float64` corresponding to the estimated laminarity. - -## Examples -### Using square microstates -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -rmspace = RecurrenceMicrostates(0.27, 3) -dist = probabilities(rmspace, data) -lam = measure(Laminarity(), dist) +An approximate value for LAM can be estimated using recurrence microstates, as introduced by +da Cruz et al. [daCruz2025RQAMeasures](@cite). From an input dataset `x`, we estimate a +recurrence microstate distribution \$\\vec{p}\$. This distribution must be defined over +square microstates of size \$3 \\times 3\$. Here, we use the relation: +```math +\\frac{H_V(l)}{(K-l-1)^2} = \\vec{v}^{(l)} \\cdot \\mathcal{R}^{(l + 2)}\\vec{p}^{(l + 2)}. ``` -### Using line microstates: -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -rmspace = RecurrenceMicrostates(0.27, RectMicrostate(1, 3)) -dist = probabilities(rmspace, data) -lam = measure(Laminarity(), dist) +For the commonly used case \$l_{min} = 2\$, this leads to the approximation +```math +LAM \\approx 1 - \\frac{\\vec{v}^{(1)}\\cdot\\mathcal{R}^{(3)}\\vec{p}^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. ``` -# Using a time series -```julia -measure(::Laminarity, [x]; kwargs...) +The correlation term \$\\vec{v}^{(1)} \\cdot \\mathcal{R}^{(3)} \\vec{p}^{(3)}\$ can be +simplified by explicitly identifying the microstates selected by \$\\vec{v}^{(1)}\$. This corresponds +to selecting only microstates of the form: +```math +\\begin{pmatrix} +0 & 1 & 0 \\\\ +\\xi & \\xi & \\xi \\\\ +\\xi & \\xi & \\xi +\\end{pmatrix}, +``` +where \$\\xi\$ denotes an unconstrained entry. There are 64 microstates with this structure among +the 512 possible \$3 \\times 3\$ microstates. Defining the class \$C_V\$ as the set of microstates +with this structure, LAM can be estimated as: +```math +LAM \\approx 1 - \\frac{\\sum_{i\\in C_V} p_i^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. ``` -## Arguments -- `[x]`: Time-series data provided as a [`StateSpaceSet`](@ref). +The implementation used by [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity) +is an optimized version of this process using \$1 \\times 3\$ [`RectMicrostate`](@ref) [Ferreira2025RMALib](@cite). +Since this microstate shape is symmetric with respect to the desired information, it is not necessary to account for +\$\\xi\$ values as in the square microstate case. Thus, laminarity can be estimated as +```math +LAM \\approx 1 - \\frac{p_3^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}, +``` +where \$p_3^{(3)}\$ is the probability of observing the microstate \$0~1~0\$. -## Returns -A `Float64` corresponding to the estimated laminarity. +!!! note "Performance" + Although estimating LAM using RMA is faster than typical RQA computation, + the precision depends on the time series length. Therefore, for small time series, + i.e., \$K \\leq 1000\$, we strongly recommend using standard RQA with + [RecurrenceAnalysis.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/quantification/#RecurrenceAnalysis.laminarity). +""" +struct RecurrenceLaminarity <: ComplexityEstimator + ε::Float64 + metric::Metric + sampling::SamplingMode +end -## Keyword Arguments -- `threshold`: Threshold used to compute the RMA distribution. By default, this is chosen as - the threshold that maximizes the recurrence microstate entropy (RME). +function complexity( + c::RecurrenceLaminarity, + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}}, + y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; + ) -### Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -lam = measure(Laminarity(), data) -``` + rmspace = RecurrenceMicrostates(c.ε, RectMicrostate(3, 1); metric = c.metric, sampling = c.sampling) + probs = probabilities(rmspace, x, y) + return measure(c, rmspace, probs) +end -!!! note - When time-series data are provided directly, RecurrenceMicrostatesAnalysis.jl uses - line microstates by default. -""" -struct Laminarity <: QuantificationMeasure end +# -- Constructors +RecurrenceLaminarity(ε; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceLaminarity(ε, metric, sampling) ########################################################################################## -# Implementation: measure +# Internal: measure from probabilities ########################################################################################## -# Using as input a RMA distribution. -#......................................................................................... -function measure( - ::Laminarity, - rmspace::RecurrenceMicrostates, - dist::Probabilities - ) - if (rmspace.shape isa Rect2Microstate{3, 3, 2} && length(dist) == 512) - rr = measure(RecurrenceRate(), dist) +# This is an internal function which estimates the determinism from a recurrence microstate +# outcome space, using a given probability distribution that was computed from this +# outcome space. + +# This function works for [`DiagonalMicrostate`](@ref) with length 3, +# or \$3 \\times 3\$ [`RectMicrostate`](@ref). Any other input will returns an error. +########################################################################################## +function measure(c::RecurrenceLaminarity, rmspace::RecurrenceMicrostates, probs::Probabilities) + rrc = RecurrenceRate(c.ε; metric = c.metric, sampling = c.sampling) + if (rmspace.shape isa Rect2Microstate{3, 3, 2} && length(probs) == 512) + rr = measure(rrc, probs) values = zeros(Int, 64) v_idx = 1 @@ -96,32 +122,18 @@ function measure( pl = 0.0 for i in values - pl += dist[i] + pl += probs[i] end return 1 - ((1/rr) * pl) - elseif (rmspace.shape isa Rect2Microstate{1, 3} && length(dist) == 8) - rr = measure(RecurrenceRate(), dist) - return 1 - ((1/rr) * dist[3]) + elseif (rmspace.shape isa Rect2Microstate{3, 1} && length(probs) == 8) + rr = measure(rrc, probs) + return 1 - ((1/rr) * probs[3]) else msg = "Laminarity must be computed using square or line microstates with n = 3." throw(ArgumentError(msg)) end end -#......................................................................................... -# Using as input a time series -#......................................................................................... -function measure( - op::Laminarity, - x::Union{StateSpaceSet, Vector{<:Real}}, - y::Union{StateSpaceSet, Vector{<:Real}} = x; - threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, 3)[1], - metric::Metric = DEFAULT_METRIC - ) - rmspace = RecurrenceMicrostates(threshold, RectMicrostate(1, 3); metric = metric) - dist = probabilities(rmspace, x, y) - measure(op, rmspace, dist) -end ########################################################################################## \ No newline at end of file diff --git a/src/rqa/rqa.jl b/src/rqa/rqa.jl index a46e5d4..0c2b268 100644 --- a/src/rqa/rqa.jl +++ b/src/rqa/rqa.jl @@ -1,9 +1,61 @@ +export rma + ########################################################################################## -# Include typicall RQA estimators. +# Include typicall RQA estimators (complexity measures) ########################################################################################## +include("rr.jl") include("det.jl") -include("disorder.jl") -include("entropy.jl") include("lam.jl") -include("rr.jl") +include("disorder.jl") +########################################################################################## +""" + rma(ε::Float64, [x]; kwargs...) → Dict{Symbol, Float64} + +Calculate all RMA estimations for a threshold `ε` and a time series `x`. +All values are estimated using \$3\\times 3\$ square microstates and a +[`Full`](@ref) sampling mode. + +## Return +The returned value constains the following entries, +which can be retrieved as a dictionary (e.g. `results[:RR]`, etc.): + +* `:RR`: recurrence rate (see [`RecurrenceRate`](@ref)) +* `:DET`: determinism (see [`RecurrenceDeterminism`](@ref)) +* `:LAM`: laminarity (see [`RecurrenceLaminarity`](@ref)) +* `:DISREM`: disorder (see [`Disorder`](@ref)) +* `:RENT`: recurrence entropy. + +All the parameters returned by `rma` are `Float64` numbers. + +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +""" +function rma( + ε::Float64, + x::StateSpaceSet; + metric::Metric = DEFAULT_METRIC, + ) + + # Compute a recurrence distribution (3 × 3) + rmspace = RecurrenceMicrostates(ε, 3; metric = metric, sampling = Full()) + probs = probabilities(rmspace, x) + + # Compute quantifiers + S = entropy(Shannon(), probs) + rr = measure(RecurrenceRate(ε, 3), probs) + det = measure(RecurrenceDeterminism(ε), rmspace, probs) + lam = measure(RecurrenceLaminarity(ε), rmspace, probs) + Ξ = complexity(PartialDisorder{3}(compute_labels(3), rmspace), x) + + # Construct a dict + dict = Dict{Symbol, Float64}( + :RR => rr, + :DET => det, + :LAM => lam, + :RENT => S, + :DISREM => Ξ + ) + + return dict +end ########################################################################################## \ No newline at end of file diff --git a/src/rqa/rr.jl b/src/rqa/rr.jl index f57b7d2..8759182 100644 --- a/src/rqa/rr.jl +++ b/src/rqa/rr.jl @@ -2,91 +2,79 @@ export RecurrenceRate ########################################################################################## # Quantification Measure: RecurrenceRate +# Complexity Measure Implementation ########################################################################################## """ - RecurrenceRate <: QuantificationMeasure + RecurrenceRate <: ComplexityEstimator + RecurrenceRate(ε::Float64, N::Int = 1; kwargs...) -Define the *Recurrence Rate* (RR) quantification measure. +An estimator of the recurrence rate, used with [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity). +It uses a \$1 \\times 1\$ microstate by default, but you can set a different size +via the `N` parameter. The recurrence rate is estimated for a threshold `ε`. -RR can be computed either from a distribution of recurrence microstates or directly from -time-series data. In both cases, the computation is performed via the [`measure`](@ref) -function. +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +- `ratio`: The sampling ratio. The default is `0.1`. +- `sampling`: The sampling mode. The default is [`SRandom`](@ref). -# Using a distribution -```julia -measure(::RecurrenceRate, dist::Probabilities) -``` - -## Arguments -- `dist`: A distribution of recurrence microstates. - -## Returns -A `Float64` corresponding to the estimated recurrence rate. +## Description -## Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, 0.27, 3) -rr = measure(RecurrenceRate(), dist) +Recurrence rate (RR) is defined as [Webber2015Recurrence](@cite) +```math +RR = \\frac{1}{K^2}\\sum_{i,j=1}^{K} R_{i,j}. ``` -# Using a time series -```julia -measure(::RecurrenceRate, [x]; kwargs...) +When estimating it using RMA, the recurrence rate is defined as the expected value +over the microstate distribution: +```math +RR \\approx \\sum_{i=1}^{2^\\sigma} p_i^{(N)}RR_i^{(N)}, ``` -## Arguments -- `[x]`: Time-series data provided as a [`StateSpaceSet`](@ref). +where \$RR_i^{(N)}\$ denotes the recurrence rate of the \$i\$-th microstate. -## Returns -A `Float64` corresponding to the estimated recurrence rate. +!!! note "Performance" + Although estimating RR using RMA is faster than typical RQA computation, + the precision depends on the time series length. Therefore, for small time series, + i.e., \$K \\leq 1000\$, we strongly recommend using standard RQA with + [RecurrenceAnalysis.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/quantification/#RecurrenceAnalysis.recurrencerate). +""" +struct RecurrenceRate{N} <: ComplexityEstimator + ε::Float64 + metric::Metric + sampling::SamplingMode +end -## Keyword Arguments -- `N`: Integer defining the microstate size. The default value is `3`. -- `threshold`: Threshold used to compute the RMA distribution. By default, this is chosen as - the threshold that maximizes the recurrence microstate entropy (RME). +function complexity( + c::RecurrenceRate{N}, + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}}, + y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; + ) where {N} + + probs = probabilities(RecurrenceMicrostates(c.ε, N; metric = c.metric, sampling = c.sampling), x, y) + return measure(c, probs) +end -## Examples -```julia -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -rme = measure(RecurrenceRate(), data; N = 4) -``` -""" -struct RecurrenceRate <: QuantificationMeasure end +# -- Constructors +RecurrenceRate(ε::Float64, N::Int = 1; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceRate{N}(ε, metric, sampling) ########################################################################################## -# Implementation: measure +# Internal: measure from probabilities ########################################################################################## -# Using as input a RMA distribution. -#......................................................................................... -function measure( - ::RecurrenceRate, - dist::Probabilities - ) +# This is an internal function which estimates the recurrence rate from a recurrence microstate +# outcome space, using a given probability distribution that was computed from this +# outcome space. + +# This function works for any recurrence microstates probabilities. +########################################################################################## +function measure(::RecurrenceRate, probs::Probabilities) result = 0.0 - hv = Int(log2(length(dist))) + hv = Int(log2(length(probs))) - for i in eachindex(dist) + for i in eachindex(probs) rr = count_ones(i - 1) / hv - result += rr * dist[i] + result += rr * probs[i] end return result end -#......................................................................................... -# Using as input a time series -#......................................................................................... -function measure( - ::RecurrenceRate, - x::Union{StateSpaceSet, Vector{<:Real}}, - y::Union{StateSpaceSet, Vector{<:Real}} = x; - n::Integer = 3, - threshold::Real = optimize(Threshold(), RecurrenceEntropy(), x, n)[1], - metric::Metric = DEFAULT_METRIC - ) - dist = probabilities(RecurrenceMicrostates(threshold, n; metric = metric), x, y) - return measure(RecurrenceRate(), dist) -end ########################################################################################## \ No newline at end of file diff --git a/src/utils/opt/threshold.jl b/src/utils/opt/threshold.jl index 88554e1..83af2d7 100644 --- a/src/utils/opt/threshold.jl +++ b/src/utils/opt/threshold.jl @@ -6,45 +6,32 @@ export Threshold """ Threshold <: Parameter -Threshold parameter used to classify two states as recurrent or non-recurrent. +The threshold is a free parameter used to classify two states as recurrent or non-recurrent. +This parameter can be optimized using the [`optimize`](@ref) function in +combination with specific __complexity measures__, e.g., **Recurrence Entropy** or +[`Disorder`](@ref). -The `Threshold` parameter can be optimized using the [`optimize`](@ref) function in -combination with specific [`QuantificationMeasure`](@ref)s: +Use: ```julia -optimize(::Threshold, qm::RecurrenceEntropy, [x], n::int; kwargs...) -optimize(::Threshold, qm::Disorder{N}, [x]; kwargs...) +optimize(p::Threshold, q::Entropy, N::Int, [x]; kwargs...) +optimize(p::Threshold, q::Disorder{N}, [x]; kwargs...) ``` -!!! compat - Threshold optimization using RMA is currently supported only for the - [`RecurrenceEntropy`](@ref) and [`Disorder`](@ref) quantification measures. - -# Arguments -- `qm`: A [`QuantificationMeasure`](@ref) used to determine the optimal threshold. Supported measures are [`RecurrenceEntropy`](@ref) and [`Disorder`](@ref). -- `[x]`: Input data used to estimate the optimal threshold. -- `n`: Size of the square microstate used in the optimization. - -# Returns -A `Tuple{Float64, Float64}`, where: -- the first element is the optimal threshold value, and -- the second element is the value of the corresponding [`QuantificationMeasure`](@ref) at the optimum. - -# Keyword Arguments -- `rate`: Sampling rate. Default is `0.05`. -- `sampling`: Sampling mode. Default is [`SRandom`](@ref). -- `th_max_range`: Fraction of the maximum distance defining the upper bound of the threshold search range. Default is `0.5`. -- `th_start`: Initial value of the threshold search range. Default is `1e-6`. -- `fraction`: Interaction fraction controlling the refinement process. Default is `5`. - -# Example -```julia -using Distributions, RecurrenceMicrostatesAnalysis -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -th, s = optimize(Threshold(), RecurrenceEntropy(), data, 3) -``` +## Arguments +- `n`: The size of the square microstate used in the optimization. +- `[x]`: The input data. + +## Keyword arguments +- `ratio`: The sampling ratio. The default is `0.1`. +- `sampling`: The sampling mode. The default is [`SRandom`](@ref). +- `th_max_range`: The fraction of the maximum distance defining the upper bound of the threshold search range. The default is `0.5`. +- `th_start`: The initial value of the threshold search range. The default is `1e-6`. +- `fraction`: The interaction fraction controlling the refinement process. The default is `5`. +- `metric::Metric`: The metric used to compute recurrence. """ struct Threshold <: Parameter end + ########################################################################################## # Implementation: optimize ########################################################################################## @@ -52,23 +39,28 @@ struct Threshold <: Parameter end #......................................................................................... function optimize( ::Threshold, - qm::RecurrenceEntropy, - x, - N::Int; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), + q::ComplexityMeasures.Entropy, + N::Int, + x::Union{StateSpaceSet, <:AbstractGPUVector{<:SVector}}, + y::Union{StateSpaceSet, <:AbstractGPUVector{<:SVector}} = x; + ratio::Float64 = 0.1, + sampling::SamplingMode = SRandom(ratio), th_max_range::Float64 = 0.5, th_start::Float64 = 1e-6, fraction::Int = 5, + metric::Metric = DEFAULT_METRIC ) + data_x = x isa AbstractGPUVector ? x |> Vector |> StateSpaceSet : x + data_y = y isa AbstractGPUVector ? y |> Vector |> StateSpaceSet : y + ε = th_start εopt = 0.0 if length(x) <= 1000 - εopt = maximum(pairwise(Euclidean(), x)) * (th_max_range - ε) + εopt = maximum(pairwise(metric, x, y)) * (th_max_range - ε) else - εopt = ((maximum(x) - minimum(x)))[1] * size(x, 2) + εopt = ((th_max_range - ε) / 2) * (((maximum(data_x) - minimum(data_x)))[1] * size(data_x, 2) + ((maximum(data_y) - minimum(data_y)))[1] * size(data_y, 2)) end Δε = (εopt - ε) / fraction @@ -77,7 +69,7 @@ function optimize( for _ ∈ 1:fraction rmspace = RecurrenceMicrostates(ε, N; sampling = sampling) probs = probabilities(rmspace, x) - f = measure(qm, probs) + f = entropy(q, probs) if f > fmax fmax = f @@ -99,32 +91,32 @@ end #......................................................................................... function optimize( ::Threshold, - qm::Disorder{N}, - x; - rate::Float64 = 0.05, - sampling::SamplingMode = SRandom(rate), - th_max_range::Float64 = 0.5, + q::Disorder{N}, + x::StateSpaceSet; + ratio::Float64 = 0.1, + sampling::SamplingMode = SRandom(ratio), + th_max_range::Float64 = 0.67, th_start::Float64 = 1e-6, fraction::Int = 5, + metric::Metric = DEFAULT_METRIC ) where {N} - ε = th_start εopt = 0.0 + ε = th_start - if length(x) <= 1000 - εopt = maximum(pairwise(Euclidean(), x)) * (th_max_range - ε) + if (length(x) <= 1000) + εopt = maximum(pairwise(metric, x)) * (th_max_range - ε) else - εopt = ((maximum(x) - minimum(x)))[1] * size(x, 2) + εopt = (th_max_range - ε) * ((maximum(x) - minimum(x)))[1] * size(x, 2) end - A = get_disorder_norm_factor(qm, x) Δε = (εopt - ε) / fraction fmax = 0.0 for _ ∈ 1:fraction for _ ∈ 1:fraction - rmspace = RecurrenceMicrostates(ε, N; sampling = sampling) - probs = probabilities(rmspace, x) - f = measure(qm, probs, A) + rmspace = RecurrenceMicrostates(ε, N; sampling = sampling, metric = metric) + partial = PartialDisorder{N}(q.labels, rmspace) + f = complexity(partial, x) if f > fmax fmax = f From af4b690ec0753512b61fdea4e54a442879129c65 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Fri, 10 Apr 2026 23:26:12 -0300 Subject: [PATCH 16/19] update docs: finalize API, add tutorial --- docs/Project.toml | 4 + docs/make.jl | 13 +- docs/refs.bib | 1 - docs/src/api.md | 99 ++--- docs/src/assets/example.jpg | Bin 0 -> 11479 bytes docs/src/dev.md | 292 +-------------- docs/src/examples.md | 4 +- docs/src/examples/ml.md | 180 --------- docs/src/gpu.md | 1 + docs/src/index.md | 56 ++- docs/src/tutorial.jl | 194 +++++++--- docs/src/tutorial/distributions.md | 60 --- docs/src/tutorial/gpu.md | 72 ---- docs/src/tutorial/quantifiers.md | 343 ------------------ docs/src/tutorial/recurrences.md | 31 -- docs/src/tutorial/shapes_and_sampling.md | 45 --- docs/src/tutorial/utils.md | 174 --------- src/core/recurrence.jl | 34 +- src/core/recurrence_microstates.jl | 159 ++++++-- src/core/shape.jl | 6 +- src/recurrences/corridor.jl | 40 +- src/recurrences/recurrences.jl | 2 +- src/recurrences/{standard.jl => threshold.jl} | 40 +- src/rqa/disorder.jl | 15 + src/rqa/lam.jl | 4 +- src/utils/operations/permute_cols.jl | 2 +- src/utils/operations/permute_rows.jl | 2 +- src/utils/operations/transpose.jl | 4 +- 28 files changed, 438 insertions(+), 1439 deletions(-) create mode 100644 docs/src/assets/example.jpg delete mode 100644 docs/src/examples/ml.md create mode 100644 docs/src/gpu.md delete mode 100644 docs/src/tutorial/distributions.md delete mode 100644 docs/src/tutorial/gpu.md delete mode 100644 docs/src/tutorial/quantifiers.md delete mode 100644 docs/src/tutorial/recurrences.md delete mode 100644 docs/src/tutorial/shapes_and_sampling.md delete mode 100644 docs/src/tutorial/utils.md rename src/recurrences/{standard.jl => threshold.jl} (67%) diff --git a/docs/Project.toml b/docs/Project.toml index bd86c98..a415af2 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -8,11 +8,15 @@ Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" DocumenterCitations = "daee34ce-89f3-4625-b898-19384cb65244" DocumenterTools = "35a29f4d-8980-5a13-9543-d66fff28ecb8" DynamicalSystems = "61744808-ddfa-5f27-97ff-6e42cc95d634" +DynamicalSystemsBase = "6e36e845-645a-534a-86f2-f5d4aa5a06b4" +FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" +Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +RecurrenceAnalysis = "639c3291-70d9-5ea2-8c5b-839eba1ee399" RecurrenceMicrostatesAnalysis = "cb83a08b-85c6-4e94-91aa-4e946c7d4f0c" StateSpaceSets = "40b095a5-5852-4c12-98c7-d43bf788e795" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" diff --git a/docs/make.jl b/docs/make.jl index 3ac66b5..c6bf61a 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -13,19 +13,8 @@ pages = [ "Welcome" => "index.md", "Tutorial" => "tutorial.md", "API" => "api.md", - # "API" => "api.md", - # "Tutorial" => [ - # "Distributions" => "tutorial/distributions.md", - # "Quantifiers" => "tutorial/quantifiers.md", - # "Recurrence Functions" => "tutorial/recurrences.md", - # "Shapes and Sampling" => "tutorial/shapes_and_sampling.md", - # "GPU" => "tutorial/gpu.md", - # "Utils" => "tutorial/utils.md", - # ], + "GPU" => "gpu.md", "Examples" => "examples.md", - # "Ecosystem Integration" => [ - # "Machine Learning" => "examples/ml.md", - # ], "Developers docs" => "dev.md", "References" => "refs.md", ] diff --git a/docs/refs.bib b/docs/refs.bib index 2e4f91a..7b69dd8 100644 --- a/docs/refs.bib +++ b/docs/refs.bib @@ -137,6 +137,5 @@ @article{Ferreira2025RMALib month = {11}, title = {RecurrenceMicrostatesAnalysis.jl: A Julia library for analyzing dynamical systems with recurrence microstates}, volume = {35}, - url = {https://pubs.aip.org/cha/article/35/11/113123/3372778/RecurrenceMicrostatesAnalysis-jl-A-Julia-library}, year = {2025} } diff --git a/docs/src/api.md b/docs/src/api.md index 5e4278f..a8afe26 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -1,76 +1,77 @@ # API -# ## Probabilities and counts - +## Recurrence Microstates ```@docs RecurrenceMicrostates -RecurrenceExpression -CorridorRecurrence -ThresholdRecurrence # What uised to be called Standard ``` -# ## Specification of recurrence microstates - +### Recurrence expressions ```@docs RecurrenceExpression -MicrostateShape -``` - - - - -Alternatively, a [`RecurrenceExpression`](@ref) can be specified directly: -```julia -distribution([x], expr::RecurrenceExpression, n::Int; kwargs...) -``` -**Example:** -```@example quick_example -expr = Corridor(0.05, 0.27) -dist = distribution(ssset, expr, 2) +recurrence +ThresholdRecurrence +CorridorRecurrence ``` -If a custom [`MicrostateShape`](@ref) is required, the call simplifies to: -```julia -distribution([x], shape::MicrostateShape; kwargs...) -``` -**Example:** -```@example quick_example -shape = Triangle(Standard(0.27), 3) -dist = distribution(ssset, shape) +### Microstate shapes +```@docs +MicrostateShape +RectMicrostate +DiagonalMicrostate +TriangleMicrostate ``` - -# ## Sampling modes - +### Sampling modes ```@docs +SamplingMode SRandom -what else? +Full ``` -# ## Computational specification - +### Sampling space +!!! todo "Future implementation" + We pretend to expand the [`RecurrenceMicrostates`](@ref) structure to also consider + a setted space from the recurrence plot as source of information to construct + the RMA distribution. + ```@docs -CPUCore -GPUCore +SamplingSpace ``` -The following needs to change: +## Recurrence Quantification Analysis +```@docs +RecurrenceRate +RecurrenceDeterminism +RecurrenceLaminarity +Disorder +WindowedDisorder +rma ``` -This method automatically selects a [`CPUCore`](@ref) when `x` is a [`StateSpaceSet`](@ref) -and a [`GPUCore`](@ref) when `x` is an `AbstractGPUVector`. By default, square microstates of size `n` are used. -Additional keyword arguments include: -- `rate::Float64`: Sampling rate (default: `0.05`). -- `sampling::SamplingMode`: Sampling mode (default: [`SRandom`](@ref)). -- `metric::Metric`: Distance metric from [Distances.jl](https://github.com/JuliaStats/Distances.jl). When using a [`GPUCore`](@ref), a [`GPUMetric`](@ref) must be provided. +## Optimization +```@docs +Parameter +optimize +Threshold ``` -all these keyword arguments should not be given to the -`probabilities` function. THey should be given to a generic computation type that is a field of the RecurrenceMicrostates!!! +## Operations +```@docs +Operation +operate +PermuteRows +PermuteColumns +Transpose +``` -The following must be in the GPUCore struct: +## Utils -!!! warning - GPU backends require inputs of type `Float32`. `Float64` inputs are not supported on GPU. +### GPU Metrics +**Distances.jl** is not compatible with GPU execution; therefore, distance evaluations must be reimplemented +for GPU usage. This is done using a [`GPUMetric`](@ref). +```@docs +GPUMetric +GPUEuclidean +``` \ No newline at end of file diff --git a/docs/src/assets/example.jpg b/docs/src/assets/example.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1a1465d29c80ed06228f250c48314b7557250ae GIT binary patch literal 11479 zcmb7q1zeO*_wTcGgVGHm(jZcj5=sh4cf-=%DIy4hbT_#4(%qc`(nurSof2{v^!NV% z?|c9E-p`$9clOztXXl(bGkebWoHKV*cRv9fX>loW00aU65bOf(mH@^#ZsuSBkdk5q zP=Nbpb`TtZ47($Nz3+>Jt*xn*3GCTfz#o8zgG2dKuuzauuu)Kukx`If4al(TzJ9oY zwLQGh?psk&QIU|*G0{*kVfQE?cmxC_L?kpMBs4TkG)ycM%=-eXQSL8Td8qDh{txA@ z6?lXKq5@IDgB}5Jk3jH`KzE%0`TcYdLHG0er+^TVUci~e;1M1nQX?ThW*1RF!F^(2{{i)dsA6O$9*v=2S=-1{4kd@ELwuvJ?PAfRT#imL zkn+y4B~I92WH2iCwEiIk`$Pi4MngnKf%OW*xB|gQAtS*fAp`LD55YPgA+RGprWR2! zz_tGnnb|gi^yGzN*`a9E5)Fr;!`D$fe82Mcv7@K2mF{K$40sT1V(^cESHN}@;7f%9 z2lyg@0AZXz74XpTr~0o}5ODw4->!c>3+w-{Ryg4A2=~9be-Mr(7tR8M6WjqAN0ireRRm~Q=n$+$K9<-7dIEIBF1^t68B`H~}z^?Q&BIW(7@ z<5JmZN2 zT0E%BUg<6Rrm2Q6f3#Rcw~|;RFTHI3o9S%&avi(}oPGpP8Y+8IMPR<+=)}0E70N>m&3H1HCR3O7^I0r zQxe0Mc|>lZDBmCZSr9{(aYr*H`goT$yV5G+VZ)#SzA>=X0ipbl`+>#UXooQxEyr8M zIQdcaH4AaG8s3Ws@%Op29zF6}3A5mJ$HU5;^f7d zu^}3n;4yg|XYB>%029U=jjLCjv&`pM#bs57Q#4TX{*mjJ ziAVP1Gg*qE;iUHt3s|oTN+cdPkx6^Y6+?s|{qpF(601HIKcooiC2QDaA0;uLyUZT& z@VkXB8N_mbQOp@xf_Nw>NpP5ECl0@DNAa>{c{WD|s+e&rYyH^AFrLcG&{p{aTZk!o zN;OqVF|LwSK7L`MFL`mKKLR*+Q?G{5WVE+Z|0!|;VP;zUo`{$i3 z!*WTW&aYRi3HH!5?N-V#T>F`(dVeB~?K!w(#3!t;?oVY6G_|%z0;;1@##Y%@ z;hv1K4}62@(QPH2c%%H*F+cMMn#_0F3GU$K=SGITlc%8=M29$sPMxPAb!FL5eQ6_Y zL$s*0SRFjw^d6P`QECu7)MUj%DG}SlmzTg|ircITu-dx9VS%@uGf$%n>q$x$G+rgL z{bIRsq$8n|JcVZFA2Fv7#3vnbMx=<6_ zODopfxS(6-FePOiEU!-fLhr(Hc-a*`%_a;VqK(1bnzgIxeX1l=!cpZ-qr)B{+taJ0 zJK*aSvhK|t;4({?=H+{&a4De?c`3TvHzE!I=%5IAU#(BmR}bV+P=!U|7DwmFaXHK( zI~obsVv#p-yzRGO-tNuP!?zZi8q0YBv$(BGPi;tN)(;P#?9oIrY2wB_%_SSF-#zO) zel9AP60?#t(|p4h2HiH&uwR~X7BqU{*lM+V@k}31b(SCf{F4vV?~Iv%*3hi`>)z@G zWUSOJHYlrrbwL)ahwe82hmrr$9qsw8(U!`O^i%2>1VGqFj!nh*>2e7&*D-yAy8_J|-I&W!qlOOc0#=K|_h!d>Ay& z>DUTho64)$Lx|YASeicfZ-)|(1hbaA^#Cs69~~JVA-V*;#h;Fy+^QA*YZD*rAWM@# z*33uC5ELO`^*wEH!IV*6HQu1q7CWTtaQnlqba$;IF0!!5J~{Zu{9AI}kVkS}Pg|h# zcR)nj;8>?pIx6eDWzYB9Kuwz_{WIaeyA3%`~%~}8%D45SrKQGl8MB6nE-)PnRp1?Q{e$^1q-AH;&{d<^gzW-Nz zz)h&iFfof@UDYBje)iEL3wiU%$&vDZ00hFl?U5OEx0Ywvujvdh{!O68oEXOGfLV|P zz8-B|+>ZTVA!fDQ5{%v~MRI!nohij9Z&whHpV`QJc;paZ(+@6}Jc}tTglVCF2M{pH z!hp&@pa`kXX=ivv=_Bgwj)LTLk@cP%yQCM%4?=hP^;Aen;E!LL-WMVJvgLv~n>)jk zuy5CtA9;U$m^^b7zn+3)`{|m8p_63%n025(qiDsB>L0CwgMgS{$T%xLY46_DkbG6h zrQ1(bAQ&gq99mCvI_HS(AJY9JF=yWx!?Eo+M&d~0)R{FNSB}5Lbpmhkx)3~anyUSl zrvAcPin|4>yq$DLGuD1D?0+%-pW;RnN;K+_rz^KGP{N>sA<1+GMi+4 zO-_SzY;Ve-NJ{0Sf4O<_N#vCVL{VXUNb31?95Npd_8RY_xg|qvk7JL@d4_K(6>+VIrth znNPjq(soVx5U6Khy>f%8LeZgt2YKWUfGSQZFGulQq*shaaZKAdzVwcdn!M~*ZV@&p z%+@J9XDvF&Z^>jGR`+C3^*m=2x&vtJ$6Mq4VLRzwalXU(A22L|uz8D|w|M-AFJmwE z^)X8<#Z5wi7mhG(jF=h^O|u}+wbQ#zU~XnD8odMLeygp%c3MEY18#!Y){o=Czr~Y( zURGqfdHNjwTlc=@rnXnagfe8zPqJdZKKI!@bBZu?#iS#8b7 zz>6+>8alEtlGrZ+AI;w?q3fC4s2}w1Gudz`azi-34aTl5KNw?yf`XY=?PC&T+}k;b z%K+?Eb{Z&$UfkhIysCZyrgBb7*!h}+0HM-3~g~H zuIjX^BfS>i2#wD&+oL3V%DtCdwHUP;{(9sRWyzpho@cd=hRwHrL;XQ^_Yy_8uK*l$ zm_+~6bAoSg!8R^*awwG|ZL^#@59Au;Z!;&L)}qg65(qKDhsicO>0*=bJdv@~W>#)& zQFsG*Gq@+W0-p7TxD(VAW+v>ROl%w60c_`u4WuDi++>3ROV;lMW0=RE8Q9!cv^BfGWcS~5eZPS1~7LR zb&++{sF0+vz^xyq^y?}lFjw=>(Iv*(#YbjYj$ImVUp{gD*gDqUJu-7qlYFZEC8^$2 z^4_Zar|4lC=f3ryRVjy?7)wakTg*` z4uvWQzxR^$7(I}fzzAc7K%-#D?Qj({v1Xs!GKs%JVY_)~)aG=Jw%_O~1+5+_+0{R3 zr-I#wW%GAelDv`T5hFiEbP9z6I}|Kb8&+7V zw|}~rnWYGlS2rx5Cw@h9smd$@6%iv<0>CQJ%Q~t$71pjg75+vs6L`=H|Dd7&=D*|J zcuMMLe;!koo-zR2L`i(AbWfx6C5|=D$ta;SHid|0yB9cpU8AeB=F-zxh+tILzAXLb zsC*QWA8}^q8uw+*yJ2eBy_nXTiD);cWXLsti-dnFc;ayys|EU+mdWg;nx|K&{pf!D zO#FX`?i2DP$G=S!KU%v@>+1mG3c^X5bw+QtN!(WZG@rykn6)QQ4y& zR|;L{iK|vi181(Rjw1J2@Kw0?U9`|N$-{rjDMGW;+tM{p7i3LReLf%SP#N|lM}DxA zkuh3>XQ6-1Kac%6oZDbicg2zdzs=V`kVzp)XSwIDbwVMg+HP`Bm z95>DMJh_ghli~)!*uhNv8pnF^Ep-V32VT|;QtP;)y5@C%re{DB&$;f(TzyPs#kVX^ zHOLvY+kwH?17>oW_ZYvEw!pu#>XL0UGzT(aK6YLKDgTQZOm0uMwxeXhz)m}zE0s(Y zU93C63!A-r^F|!aBO&Nz3-b7}cfgQy!Uk+R|5+UDfhYj=Su0qI8^Bym1E`);RMwj; zxw*3o%Q_3!viOGOhf&;QeXw1fl z&1pYZ=yr8x-;*9`buOvB1IBc&G@tnkG4-AF*LkF>>oZ93w=`4chp>BFEIak z3#|_<$aM}-GYdt%3Tq)xwXJ1y<&oj{@Rpr3-YTa-9qe|90`oc|i!xk^&kqS%-V1f4 zu0m>4FtuNGR@-a%q~<=UCUL6JdI@<}?4E~%0Q?mm42N`iTx=mreGsW)o1Eyy9@!#V zs-Y&f<`?QT3JLeE(|lQ|{rsq5$YJH`u`FiB&X#oCr(U*TTiQPR=(&dPwmb6L37cU#ow2S5?7@4^l;&2frw$jsQAiW`{LPOS{6e6to%I*3Dv)9WNdWNZCNU^HC zGD44B_MY%@@q{>Ql9!ZAu<-k(+17}pyj)o!x(>CB$nP4u?BiRcPMr# zIYGOm*&L7EbH|;qA@VW__SV6Cvm&1Xc-am^C>7YKZnxJ;DkN@aDqj_y0_qt(&357J zK|!;JT!GuF+e~o`S&{eiQr)w;CVfJv@9Y1djS-P$E0>e8a-s@~MabCwr06P-dDuf2 zE*k9224a&!iB+Qf@n^61U)-DLi7->as;$we8bZhA&)xA@1Kdc-LJD4~9Npma=2mV( z-dOIo&22Yl*J^DyXSNqof$idPKkn~|z=7CafhM6df*sV$Zf>!YR*QAc23Jk?-z8>u zYdF?jz3{_KYMAUcWnZ349oaL^gB5qr`Do>D*pC>K26WPCRsDpQrW>%I@7#n**AHmd z9I+t8#xwI@9D$FbzN}O#TIz;*uJk~MR}AeKuE(#N1TiOb@-1h6lm@c5 z@)n!dI!e}3eF>uB&x5E=eaBT5#D^jEe`2Lbyg#bzGTP|%lxR~qiodCI`#O!zQVI3j zkgc?`2&g{eq%J&}tF36ttdy0d3sTN3XZ)`Ci;$uwYWch7R$RjPGX9H+2}zSQMeSu* zPTXlDr1i3of=QSud4p03eV-OlD+ai9id`)8AIs(fv9tA-g<6A1}UF^i#im-C8r z_uI$p%Z#ekz40;jz5raS4G7HXlAZ-ySy+6PVExrs*{J>S3OZJ-_dD$52kZPuR8#|xYUAt z=~+Wc)PR6-?^1Y?h=jk9L8!uQl)zwdiW2N@D>AE7YsYd>2%hPO5I*}_!5dpL?=BxY z5g(-?@#slZIulnzeb4M<#jjWJ6FVkoZ!TjmMqW-XuwlEG}p{;~b3YhsfHfc}z z^Mcgi93w9^R4-O$w?hFk-hf2@M1@Ln>??`-G*ttYQQagN?l9BHGi;u#+?Utbz1$n+ zHZKFnMiUUZ#+0;FEyIPH>1ap;CUu^2EDojTH-TQGT>iDo@JGy<8MVn=8BuaSk&M%U z07dI*vWhL&=(G^Rl7I$(!l3(rwBeSr{_Q zhLi@gcpg{gT+FXgp61$=e;d9*wmK^zkGp~_2;9Jp`@5#jOt9>K{e{jx1S9(n;r`!D zFd45;YQ00kZ++H0d@I^RKQ{wW{655%DKhNMnT`eZt4^KeP^uMBzuKF-96YAprLCKT z6Zwf>Ss)z)i$onQHz-6C`{#y081dB_Xl;tNbs2Zcv zb7JYV)FLy>H%Tu*2JC5Z$_xf3iq!<7AJC>W&=44z1DKV{2JkCNE3Z1Z`6F-fmC78Z z45z<3*-)M9b~GA`B{@)CkhN)iNlc~QOfSYh(&j3P{ZR;+{^TIp=!*@FPq+TsnYwQo zgDqv?fmWl3K2v9}og!;2z(dLzxYx(w`t9*-#r3dM%HU3_T-5MtM`whm%nJ$i(1az-5@;(%#Z zt*r`^@0rew?T1^;$p%$Z)tD-XqoX&4X8TNV35Mrq_Vlb))TV^PNy*7$OKlSC`U6I( zuNJ$pJd+KM0@1T6n7Q(V_(>D-nhha4=FKY(G5*I9@9_4X_K?cT3yR%J68+L!B9#gM zS(1y)OJB6ddc4`AkyVotIa>6@+vM$;qA9J|B2DvAFE&WOi8TfPB|Xcx&hL*Df`jMu zNT>sx-}y;c-{$&=^)gD$!w{ZIb`2G^q9}Sq0IFyWBAPb!y8sl??C?Si#;)=YE#-Fr z1@e$%-vgJN)W=IF4sDS5N3_aSFl z<=GZbll2+jTRe=AKUNDV%tPG%g4phx);W;$bMgkg|MMJGavvzO3UcfcqXl zQsxd|4|QE{{ANE99gg(F9zCkUvhaE$AoEY^0n6FxwwSdWVla>Q7IS|G#r{`bSgdPd zJX`+$LT8%0n&t>v?JE$&-6naak(@}wudFsOzH*Na*Xhl6WOFN~s` zxs~jbxxjLx65TtMd32FZ0b%53PhszHOJcuOGyjF}Lp)Q-Hxlx;O9o!!JwhY%O;}z2 z)(KD!uhvAx^`=&bMttM@uonU@{}D*i=}D8ud$uILD=wdrGk1s-r&LW|_&~mYIsX9- zeXM`CYy?gKfT9cojd&=^$yC&!4Ml#|!&iRr7;-OMlcvOtopY6)N!@hGR$B6MC{fg3 z(XQ9VT2$=BkAIq+R@AX-u&VmO~z6oq9^Vxd(H1(7o2j0|6rXz6kVi`1B;!EEzSA zH3@iEt%h~)0Cz!(Jia+cbo~VrooUiDR@F~t3PAnMgc@$XNuP_C|5bgRxzj#c8dB@(M<#UXtbK@Ee=ybLkr zPno|37_cJ%)WQLxuxtoSDdJ?JC?Az1n}pNZA>2d|If&Rvyc)D3{$6YGJ=zE>!?XeO zxsad?+lj7ukSmn#yPm2TW5ahHt2))Xpa=LpwkR1E7(e#z2dB^4{wJlrXi(X@F+hJF zpX=7`MV@Y>OP7C*8ms_SgE>Ku6?N=~u58S0f+u(7W>?9lg)DfLcJvq)G=&0Cb2xNZL?` zX|qv~DSYo6!u9L-mw(!CX(>9-N?_v#+K8@ zjdjSoUYC8`!$@C#`RhOE#J8Q<6w$M#}_Ms?|l9g#GJsM^<+Bwe3&~U>*$QKy><6cHx?`Pm7M7`szYY!A zB!zzD(!PwIb;}P_LVa?PB;p#`+dY_a2MjR2%CX#TQ`BfG$v0W}9mpg(QK`b!VhdjJ z*w0+``s;mSqHhi5IU?KvD|iF9XC|nuti$?}>@ayD?Q40n zbN;slm3KhvgwLrD`fu8*+Or;4mQRKHZXKCE!-KhWH3_$(OLRt~79z6heagnK&%(|3`9G?2JY8!@%GNB)M&>nc z9>q=a*G`k%_JLM;6A+5YzaiL6;$G~Q!g3IRxpl>!0fC4cZo&Ii5n<@2STFU2`8WoK-#3&(1I2R1pz6iVkJGWYMGjjFFzd$L_QxvYETHe2lqA#t2lLE!p93a;>F;4a)YAbzliPgb3@D5l+3v zI$zg(-Oy29g@UW}=wnky%g(J*15eK7 z(-A*+mVI3}Y9j{o2ACUM3B!!qaY3Oa(%2jcZ?rL^M#6atp-#jq4>8us4J0tQ7t-0p zN?Z3)e@uCxmnw8jo&nR?8u7bhiWQI75TeGQh^5ptH3L1gF)Y{#ytzSviLE1;{#lOn zhk$l9()w$KMXEQu7rxv+bX@jz=gL zNzhn)-x`Pfj^}VR6s!91@aT{I5 z)uEDIhwk~S?DDu7v2}dBMvi3IAK+JOAAk9}_jF8j^{gH)g)&Z{Di0-w$k!9n9c2 znlx7j3Sv~3s$uk$WjBCTig9irG!NQ9I~Q-sO{&<0ZjbT0++x^_z8LlK z^V?^tw?TvTrEY>hYWe+kmMWd48%+0mx~`lfyxI0_NRd+c2=bMpFARURblITIgKsTP zR8QS>_|5Y&ABhVZA}-Pt8UCX1l(%1(QY_#!kmf5@)l=$XpAl{5gFTTqHojpLtc z@34RW<4vPbgNpcmZCcCg>Zt@q%Bb6H^w{R**$+e^*4L!Ff9*_;Padx~5a2#s18chE8!6hMnpD3L2~Bzdng<(fYc z@W!p-_K<%3WoP77I4na;iPM9h8sr^SI=L zp>lLC%S8Oi?-L1CCd5?mG?RJ+=J#0z*SB{FiD^{4$?XL+I`aKHumn&psmZw zi;7+7m%^1;r#wqXE>@AnmA%0%p)6i}Y#VUCLWmi=gj)Ox{c!rHk;jk(zVkIv;cM># zRNLbMO?k2L`Io=dZae*i7M5BFC~}Qg?f``~WJowpizU84);20k{W2)sq?HU&e!Bx| z>vb2FTn)B;+}@AtUeRvuW`muphEl=}Q97B0_{Ak|$Ot9`AK@SMH5a87t}Hjz7qKLG zD1`G0f*WGgJUG@%>R%ZU=_AF3`iUf@iJw*ICB(_**0g0IPQc=1GE2cbiwnJ~{IVNXb!bC1JmCZQH(C+cu0+3wY? zLa;rhM9PJr+_S+nIBA}Et;TA7ArU7?&dO!RHOVOtO%|k?t>sh*=}Hh<0!t*lvArQn zzlCLoPU_!3ZTN8>e+MLrt$FVZ*FMLV$it$P?yOGisyv<73SWpk#y%~?9ee+jfByGp zqU@|@^^2C*Uj{SgaZ5EHQ_SdPNek9TPyPy;cWXPq)GnBjZqf-;)vk71H&gCbNxOC? z_8}q}hFv2OW&rFUFOrBOX)`$TS{dQ*LC?Ri2Npm2;}gRa9n7Qo+X@H#SL?$;j{B@M zEQ5Mq{W-q&aCi;w{sh~f^Zh literal 0 HcmV?d00001 diff --git a/docs/src/dev.md b/docs/src/dev.md index 9524913..5fa737b 100644 --- a/docs/src/dev.md +++ b/docs/src/dev.md @@ -1,291 +1 @@ -# RecurrenceMicrostatesAnalysis.jl for Devs -!!! tip - All pull requests that introduce new functionality must be thoroughly tested and documented. Tests are required only for methods that you extend. We recommend reading the [Good Scientific Code Workshop](https://github.com/JuliaDynamics/GoodScientificCodeWorkshop). - - Always remember to add docstrings to your implementations, as well as tests to validate them. - -## RecurrenceMicrostatesAnalysis.jl backend -**RecurrenceMicrostatesAnalysis.jl** supports multiple backends, depending on the usage context. Each backend is implemented based on an [`RMACore`](@ref), which defines how the package computes a [`histogram`](@ref). - -There are two main backend implementations: - -- [`CPUCore`](@ref): defines how distributions are computed on the CPU. The default implementation is [`StandardCPUCore`](@ref). -- [`GPUCore`](@ref): defines how distributions are computed on the GPU. The default implementation is [`StandardGPUCore`](@ref). - -```@docs -RMACore -CPUCore -GPUCore -StandardCPUCore -StandardGPUCore -``` -!!! info - Backend implementations are located in `src/core/cpu_core.jl` and `src/core/gpu/gpu_core.jl`. If you plan to implement a new backend, we recommend opening an [issue](https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl/issues) on GitHub beforehand to discuss the design. - -### Implementing an RMACore -Although it is possible to implement a custom [`RMACore`](@ref) directly, we **do not recommend** doing so. Instead, we strongly suggest implementing either a [`CPUCore`](@ref) or a [`GPUCore`](@ref). - -This approach allows you to reuse utility functions such as `get_offsets` and `get_power_vector`, which expect an [`RMACore`](@ref) as input. Since these functions have different implementations for [`CPUCore`](@ref) and [`GPUCore`](@ref), writing a custom `RMACore` would require reimplementing them. - -To avoid this, define a new struct that subtypes [`CPUCore`](@ref) or [`GPUCore`](@ref). In this case, the only required method to implement is [`histogram`](@ref). For example: -```@example mycore -using Random -using ComplexityMeasures -import RecurrenceMicrostatesAnalysis as rma -struct MyCore{M<:rma.MicrostateShape, S<:rma.SamplingMode} <: rma.CPUCore{M, S} - shape::M - sampling::S -end -``` - -```@example mycore -function histogram( - core::MyCore, - x::rma.StateSpaceSet, - y::rma.StateSpaceSet -) - - # Construct the sampling space and determine the number of samples - space = rma.SamplingSpace(core.shape, x, y) - samples = rma.get_num_samples(core.sampling, space) - - # Precompute power vector and offsets - pv = rma.get_power_vector(core, core.shape) - offsets = rma.get_offsets(core, core.shape) - - # Allocate histogram - hist = zeros(Int, rma.get_histogram_size(core.shape)) - - # Task-local RNG (ignored for Full sampling) - local_rng = TaskLocalRNG() - - # Histogram computation - for m in 1:samples - # Get the sample. - i, j = rma.get_sample(core, core.sampling, space, local_rng, m) - # Compute the microstate index. - idx = rma.compute_motif(core.shape.expr, x, y, i, j, pv, offsets) - @inbounds hist[idx] += 1 - end - - return Counts(hist, eachindex(hist)) -end -``` - -!!! info - To ensure compatibility with the internal API, custom backends must support the keyword argument `threads` (for [`CPUCore`](@ref)) or `groupsize` (for [`GPUCore`](@ref)), as required by the [`distribution`](@ref) overloads. - -```@example mycore -data = rma.StateSpaceSet(rand(1000)) -histogram(MyCore(rma.Rect(rma.Standard(0.27), 2), rma.SRandom(0.05)), data, data) -``` - -!!! warning - CPU and GPU backends differ significantly in their execution models. In the GPU backend, random samples must be generated before histogram computation. The histogram itself is computed inside the `gpu_histogram!` kernel. - - See the [`StandardGPUCore`](@ref) implementation in `src/core/gpu_core.jl` for details. - -!!! danger - **RecurrenceMicrostatesAnalysis.jl** provides multiple backends that are only partially compatible. - - - CPU backends do not necessarily support spatial data. - - Spatial analyses require dedicated implementations. - - The GPU backend is fully incompatible with spatial data. - - Please consider these limitations carefully when extending or using backend functionality. - -## Adding a New Recurrence Function -### Steps -1. Define the mathematical expression of your recurrence function. It must return a binary value: `0` for non-recurrence and `1` for recurrence. -2. Define a new type `YourType <: `[`RecurrenceExpression`](@ref). Constant parameters (e.g., thresholds and metric) should be fields of this type. -3. Implement the appropriate [`recurrence`](@ref) dispatch: - - Time series: - `recurrence(expr::YourType, x::StateSpaceSet, y::StateSpaceSet, i::Int, j::Int)` - - Spatial data: - `recurrence(expr::YourType, x::AbstractArray{<:Real}, y::AbstractArray{<:Real}, i::NTuple{N,Int}, j::NTuple{M,Int})` -4. Add a docstring describing the mathematical definition and relevant references. -5. Add the recurrence expression to `docs/src/tutorial/recurrences.md`. -6. Add the expression to the [`RecurrenceExpression`](@ref) docstring. -7. Add tests to `test/distributions.jl` under the test set `recurrence expressions (with CPUCore)`. - -!!! warning - A recurrence function must always return `UInt(0)` or `UInt(1)`. - -!!! todo - To support GPU execution, also implement `gpu_recurrence(expr::YourType, x, y, i, j, n)`. See [`Standard`](@ref) for reference. - -## Adding a New Sampling Mode - -### Steps -1. Define how the sampling mode operates: which microstates are sampled, from which regions, and in what quantity. The [`SamplingSpace`](@ref) must be taken into account when designing the sampling logic. -2. Define a new struct that is a subtype of [`SamplingMode`](@ref). The struct may be empty (e.g. [`Full`](@ref)) or contain parameters such as a sampling rate (e.g. [`SRandom`](@ref)). -3. Implement the dispatch `get_num_samples(mode::YourType, space::SamplingSpace)` which determines the number of samples to be drawn given the sampling mode and the sampling space. Two sampling space types exist: `SSRect2` (time series) and `SSRectN` (spatial data). -4. Implement the dispatch `get_sample(core::RMACore, mode::YourType, space::SamplingSpace)` which returns the positions to be sampled. Separate implementations may be required for each [`RMACore`](@ref) and each [`SamplingSpace`](@ref). Full coverage is encouraged but not mandatory, provided that the supported cases are clearly documented in the docstring. -5. Add a docstring to your sampling mode describing its behavior and initialization. Follow the style of the existing sampling modes listed in [Implemented sampling modes](@ref). -6. Add your sampling mode to the list in `docs/src/tutorial/shapes_and_sampling.md`. -7. Add your type to the list in the [`SamplingMode`](@ref) docstring. -8. Add tests in `test/distributions.jl` under the test set `sampling mode (CPU backend)`. - -!!! warning - The `get_sample` logic differs between CPU and GPU backends. On the CPU, random samples are generated during histogram computation. On the GPU, samples must be generated beforehand, outside the kernel, and the kernel operates only on precomputed values. - -## Adding a new Microstate Shape -Defining a Microstate Shape is one of the most challenging tasks in this package (except for backend development, which is described by an [`RMACore`](@ref)). - -A microstate shape acts as an intermediate structure between the sampling process and the recurrence function. Given an initial RP position $(i, j)$, it determines which additional recurrences must be evaluated and computes them using the recurrence expression. The resulting microstate is then converted into a decimal representation, which is used as an index in the histogram. - -### Design considerations - -Before implementing a microstate shape, it is essential to define its structure and reading order. For example, square microstates are typically read row-wise, while triangular microstates may be read column-wise. Each position in the microstate structure must be associated with a power of two in order to convert the binary microstate into a decimal index. -```math -\begin{pmatrix} -2^0 & 2^1 & 2^2 \\ -2^3 & 2^4 & 2^5 \\ -2^6 & 2^7 & 2^8 -\end{pmatrix} -``` - -### Implementation steps -Define a new struct that is a subtype of [`MicrostateShape`](@ref). The struct must include a field `expr`, which stores the [`RecurrenceExpression`](@ref) used to compute recurrences at runtime. - -Unlike [`RecurrenceExpression`](@ref) and [`SamplingMode`](@ref), a [`MicrostateShape`](@ref) does not require the implementation of `recurrence` or `get_sample` methods. Microstate computation is handled by the unified `compute_motif` function for the [`CPUCore`](@ref), and by `gpu_compute_motif` for the [`GPUCore`](@ref). - -The only exception is spatial data, for which a custom `compute_motif` implementation is required. For example: -```julia -@inline function compute_motif( - shape::RectN, - x::AbstractArray{<: Real}, - y::AbstractArray{<: Real}, - idx::Vector{Int}, - itr::Vector{Int}, - power_vector::SVector{N, Int} -) where {N} - - index = 0 - dim = ndims(x) - 1 - copy!(itr, idx) - - @inbounds @fastmath for p in power_vector - - i = ntuple(k -> itr[k], dim) - j = ntuple(k -> itr[dim + k], length(shape.structure) - dim) - - index += recurrence(shape.expr, x, y, i, j) * p - - itr[1] += 1 - for k in 1:length(shape.structure) - 1 - if (itr[k] > idx[k] + (shape.structure[k] - 1)) - itr[k] = idx[k] - itr[k + 1] += 1 - else - break - end - end - end - - return index + 1 -end -``` - -Although time-series microstate shapes do not require a custom `compute_motif` implementation, three utility functions must be defined to describe the properties of the shape: - -1. `get_histogram_size(shape::YourType)` - Returns the length of the histogram, given by $2^\sigma$, where $\sigma$ is the number of recurrences in the microstate structure. - -2. `get_power_vector(core::RMACore, shape::YourType)` - Returns the vector of powers of two used to convert the microstate into its decimal representation. This function differs between CPU and GPU backends due to integer size (`Int` on CPU, `Int32` on GPU). - -3. `get_offsets(core::RMACore, shape::YourType)` - Returns the offsets relative to the initial position $(i, j)$ that define the remaining recurrence positions of the microstate. This function must be consistent with `get_power_vector` and also differs between CPU and GPU backends. - -!!! tip - For improved performance, we strongly recommend using `@generated` functions when implementing these utilities (except for spatial data). - -Additionally, a [`SamplingSpace`](@ref) must be defined for the new microstate shape. For time-series data, this is typically `SSRect2`, while spatial data require `SSRectN`. - -The sampling space must be initialized using the following constructor: -```julia -SamplingSpace( - ::MicrostateShape, - x::Union{StateSpaceSet, AbstractGPUVector{SVector{N, Float32}}}, - y::Union{StateSpaceSet, AbstractGPUVector{SVector{N, Float32}}} -) -``` - -After implementation, the following steps are required: - -1. Add a docstring to your [`MicrostateShape`](@ref), explaining its behavior and initialization. -2. Add the definition to the section [Implemented microstates shapes](@ref) in `docs/src/tutorial/shapes_and_sampling.md`. -3. Add the type to the list in the [`MicrostateShape`](@ref) docstring. -4. Add tests to `test/distributions.jl` under the test set `motif shapes (with CPUCore)`. - -### Example -First, we define the shape struct. -```@example line -using RecurrenceMicrostatesAnalysis -struct Line{N, B, E <: RecurrenceExpression} <: MicrostateShape - expr::E -end - -Line(expr::E, N; B = 2) where {E} = Line{N, B, E}(expr) -``` - -Next, we implements the three utils functions. -```@example line -@generated function get_histogram_size(::Line{N, B, E}) where {N, B, E} - size = B^(N) - return :( $size ) -end -``` -```@example line -@generated function get_power_vector(::CPUCore, ::Line{N, B, E}) where {N, B, E} - expr = :(SVector{$N}( $([:(B^$i) for i in 0:(N-1)]... ) )) - return expr -end -``` -```@example line -@generated function get_offsets(::CPUCore, ::Line{N, B, E}) where {N, B, E} - elems = [ :(SVector{2, Int}(0, $h)) for h in 0:(N - 1)] - return :( SVector{$N, $(SVector{2, Int})}( $(elems...) ) ) -end -``` - -Finally, we define our sampling space: (can be only for time series) -```@example line -using GPUArraysCore, StaticArrays -SamplingSpace( - ::Line{N, B, E}, - x::Union{StateSpaceSet, AbstractGPUVector{SVector{D, Float32}}}, - y::Union{StateSpaceSet, AbstractGPUVector{SVector{D, Float32}}} -) where {N, B, E<:RecurrenceExpression, D} = RecurrenceMicrostatesAnalysis.SSRect2(length(x), length(y) - N + 1) -``` - -And done, the shape can be used 😃. Remember to document it! - -!!! warning - The backend needs to have access to the util functions, them it is important to do the implementation inside the package module. - -## Adding a New Quantifier -### Steps -1. Define a new quantifier type that is a subtype of [`QuantificationMeasure`](@ref). -2. Implement the corresponding [`measure`](@ref) dispatch used to compute the quantifier. -3. Add a docstring to the quantifier type, following the style of existing quantifiers. -4. Document the quantifier in `docs/src/tutorial/quantifiers.md`, including its definition, mathematical formulation, references, and examples when possible. -5. Add the quantifier to the list in the [`QuantificationMeasure`](@ref) docstring. -6. Add tests for the quantifier in `test/rqa.jl`. - - -## Adding a New GPU Metric -Since the [Distances.jl](https://github.com/JuliaStats/Distances.jl) package is not compatible with GPU execution, metric evaluations must be implemented manually to enable GPU support. - -### Steps -1. Define a new type that is a subtype of [`GPUMetric`](@ref). -2. Implement the dispatch - `gpu_evaluate(::YourMetric, x, y, i, j, n)` - where `x` and `y` are `AbstractGPUVector{SVector{N, Float32}}`, `i` and `j` are indices, and `n` is the dimensionality of the vectors. -3. Add a docstring describing the metric, including its mathematical definition and parameters. Include references when applicable. -4. Document the metric in the section [Implemented GPU metrics](@ref) in `docs/src/tutorial/gpu.md`. -5. Add a reference to the metric in the [`GPUMetric`](@ref) docstring. - -!!! danger - GPU backends in **RecurrenceMicrostatesAnalysis.jl** operate exclusively with `Float32`. The use of `Float64` is not supported. +# RecurrenceMicrostatesAnalysis.jl for devs \ No newline at end of file diff --git a/docs/src/examples.md b/docs/src/examples.md index c08ddca..f3aeb33 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -1,3 +1 @@ -# Examples for RecurrenceMicrostateAnalysis.jl - -TODO: Move here all code examples that do not need to be in the main tutorial. \ No newline at end of file +# Examples \ No newline at end of file diff --git a/docs/src/examples/ml.md b/docs/src/examples/ml.md deleted file mode 100644 index ad90a39..0000000 --- a/docs/src/examples/ml.md +++ /dev/null @@ -1,180 +0,0 @@ -# RMA with Machine Learning - -## Classification using `Flux.jl` -[Flux.jl](https://fluxml.ai/Flux.jl/stable/) is a user-friendly machine learning library in Julia that provides a wide range of tools for building and training neural networks. - -In this example, we demonstrate how to use **Flux.jl** together with -**RecurrenceMicrostatesAnalysis.jl** to train a multilayer perceptron (MLP) for classifying -a dynamical system, following the approach presented in [Spezzatto2024ML](@cite). - -### Importing required packages -```@example flux -using Flux -using DynamicalSystems -using RecurrenceMicrostatesAnalysis -``` - -### Generating data -We use the Lorenz system as the data source, which can be simulated using **DynamicalSystems.jl**: -```@example flux -function lorenz!(u, p, t) - σ, ρ, β = p - x, y, z = u - - return SVector( - σ * (y - x), - x * (ρ - z) - y, - x * y - β * z - ) -end -``` -```@example flux -function lorenz_trajectory(σ, ρ, β; u0 = rand(3), t = 250.0, Ttr = 1200.0, Δt_sample = 0.2) - p = (σ, ρ, β) - cds = ContinuousDynamicalSystem(lorenz!, u0, p) - x, _ = trajectory(cds, t; Ttr = Ttr, Δt = Δt_sample) - return x -end -``` - -We fix the parameters $\sigma = 10$ and $\beta = 8/3$, and vary $\rho \in {26.0, 27.0, 28.0, 29.0, 30.0}$. -The goal is to generate time series for each value of $\rho$ and train a classifier to identify which parameter value generated a given trajectory. - -### Creating training and test datasets -First, we define the classes and the size of the training and test datasets: -```@example flux -ρ_cls = [26.0, 27.0, 28.0, 29.0, 30.0]; -num_samples_to_test = 50; -num_samples_to_train = 200; -``` - -First, we define the classes and the size of the training and test datasets: -```@example flux -train_timeseries = Vector{StateSpaceSet}(undef, length(ρ_cls) * num_samples_to_train) -test_timeseries = Vector{StateSpaceSet}(undef, length(ρ_cls) * num_samples_to_test) - -train_labels = Vector{Float64}(undef, length(ρ_cls) * num_samples_to_train) -test_labels = Vector{Float64}(undef, length(ρ_cls) * num_samples_to_test) -``` - -The following function generates the data: -```@example flux -function generate_data!(labels, data, classes, num_elements_per_class) - c = 1 - for i ∈ eachindex(labels) - labels[i] = classes[c] - data[i] = lorenz_trajectory(10.0, classes[c], 8/3) - - if (i % num_elements_per_class == 0) - c += 1 - end - end -end -``` - -```@example flux -generate_data!(train_labels, train_timeseries, ρ_cls, num_samples_to_train) -train_timeseries -``` - -```@example flux -generate_data!(test_labels, test_timeseries, ρ_cls, num_samples_to_test) -test_timeseries -``` - -### Preparing the input features -For each time series, we compute the RMA distribution and store it as a feature vector. -```@example flux -microstate_n = 3 -train_dataset = Matrix{Float64}(undef, 2^(microstate_n * microstate_n) + 2, length(train_labels)) -test_dataset = Matrix{Float64}(undef, 2^(microstate_n * microstate_n) + 2, length(test_labels)) -``` - -The following function computes the RMA features: -```@example flux -function get_probs!(dataset, timeseries, n) - for i ∈ eachindex(timeseries) - th, s = optimize(Threshold(), RecurrenceEntropy(), timeseries[i], n) - dist = distribution(timeseries[i], th, n) - dataset[1, i] = th - dataset[2, i] = s - dataset[3:end, i] = dist[1:end] - end -end -``` - -```@example flux -get_probs!(train_dataset, train_timeseries, microstate_n) -train_dataset -``` -```@example flux -get_probs!(test_dataset, test_timeseries, microstate_n) -test_dataset -``` - -### Defining the neural network model -```@example flux -model = Chain( - Dense(2^(microstate_n * microstate_n) + 2 => 512, identity), - Dense(512 => 256, selu), - Dense(256 => 64, selu), - Dense(64 => length(ρ_cls)), - softmax -) - -model = f64(model) -``` - -### Training the MLP -First, we encode the labels using one-hot vectors: -```@example flux -train_labels = Flux.onehotbatch(train_labels, ρ_cls) -``` -```@example flux -test_labels = Flux.onehotbatch(test_labels, ρ_cls) -``` - -We then define the data loader and optimizer: -```@example flux -loader = Flux.DataLoader((train_dataset, train_labels), batchsize = 32, shuffle = true) -``` -```@example flux -opt = Flux.setup(Flux.Adam(0.005), model) -``` - -The training loop is: -```@example flux -for epc ∈ 1:50 - for (x, y) ∈ loader - _, grads = Flux.withgradient(model) do m - y_hat = m(x) - Flux.crossentropy(y_hat, y) - end - - Flux.update!(opt, model, grads[1]) - end -end -``` - -### Model evaluation -We compute the classification accuracy as follows: -```@example flux -using LinearAlgebra -function get_quatifiers(predict, trusty, classes) - conf = zeros(Int, length(classes), length(classes)) - sz = size(predict, 2) - - for i in 1:sz - mx_prd = findmax(predict[:, i]) - mx_trt = findmax(trusty[:, i]) - - conf[mx_prd[2], mx_trt[2]] += 1 - end - - return tr(conf) / (sum(conf) + eps()) -end -``` -```@example flux -accuracy = get_quatifiers(model(test_dataset), test_labels, ρ_cls) -accuracy * 100 -``` \ No newline at end of file diff --git a/docs/src/gpu.md b/docs/src/gpu.md new file mode 100644 index 0000000..1421af0 --- /dev/null +++ b/docs/src/gpu.md @@ -0,0 +1 @@ +# GPU \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index 4eec86e..38a2f00 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,53 +1,39 @@ # RecurrenceMicrostatesAnalysis.jl -```@docs -RecurrenceMicrostatesAnalysis -``` - !!! info "Citation and credit" If you find this package useful, please consider giving it a star on GitHub and don't forget to cite [our work](https://doi.org/10.1063/5.0293708). 😉 +## Latest news +- Integration with **DynamicalSystems.jl** and **ComplexityMeasures.jl**. +- See the **CHANGELOG.md** (at the GitHub repo) for more! -To learn how to use the package, go first through the [Tutorial](@ref). -The full functionality is listed in [API](@ref). - -## About the documentation -The documentation of **RecurrenceMicrostatesAnalysis.jl** is designed to explain how to use the package while also introducing the theoretical background of the RMA framework. The bibliography used throughout the documentation is listed in the [References](@ref) section; **please remember to cite the appropriate works if you use them**. - -This welcome section begins with an introduction to the [Input data for RecurrenceMicrostatesAnalysis.jl](@ref). Understanding the data types used by the package and their intended purposes is essential before proceeding with the rest of the documentation. We also describe the [Output data from RecurrenceMicrostatesAnalysis.jl](@ref), explaining the type of data returned when computing recurrence microstate distributions. - -The **Tutorial** section explains how to use the package in practice. It starts with a brief introduction to RMA and demonstrates how to compute [Distributions](@ref) using **RecurrenceMicrostatesAnalysis.jl**. Next, we show how to estimate RQA [Quantifiers](@ref) using RMA and discuss quantifiers defined specifically for RMA. This material constitutes the *basic level* of the documentation and is sufficient to learn how to effectively use this package. +## Getting started +Start by reviewing [Input data for RecurrenceMicrostatesAnalysis.jl](@ref) and +[Output data from RecurrenceMicrostatesAnalysis.jl](@ref). Then, you can explore the +[Tutorial for RecurrenceMicrostatesAnalysis.jl](@ref) for a basic introduction to using the package. +You can also consult individual functions in the [API](@ref), and find applied examples in the +dedicated [Examples](@ref) section. -For users interested in more advanced topics, the [Recurrence Functions](@ref) section discusses different ways of computing recurrence between two states, while the [Shapes and Sampling](@ref) section explains motif shapes used to extract specific information from a Recurrence Plot. +We also provide a section [RecurrenceMicrostatesAnalysis.jl for devs](@ref) for those interested +in developing new methods for **RecurrenceMicrostatesAnalysis.jl**, such as new microstate shapes, +sampling modes, recurrence functions, or complexity estimators. -We also provide a pipeline for [GPU](@ref) computations, which we recommend reading if you plan to use the GPU backend. - -The documentation includes applied examples, such as: -- [RMA with Machine Learning](@ref) - -Finally, developers interested in contributing to RecurrenceMicrostatesAnalysis.jl are encouraged to read the [RecurrenceMicrostatesAnalysis.jl for Devs](@ref) section. - -## Input data for RecurrenceMicrostatesAnalysis.jl +### Input data for RecurrenceMicrostatesAnalysis.jl **RecurrenceMicrostatesAnalysis.jl** accepts three types of input, each associated with a different backend: +- [`StateSpaceSet`] or `Vector{<:Real}`: used for multivariate time series, datasets, or state-space representations. +- `AbstractArray{<:Real}`: used for spatial data, enabling RMA to be applied within the generalized framework of Spatial Recurrence Plots (SRP) [Marwan2007Spatial](@cite). We give some examples about its use in [Spatial data](@ref). +- `AbstractGPUVector`: used for time series analysis with the GPU backend. We provide some explanations about it in [GPU](@ref). -- [`StateSpaceSet`](@ref) — used for multivariate time series, datasets, or state-space representations. This type is employed when working with Recurrence Plots (RP) or Cross-Recurrence Plots (CRP). For RP and CRP analyses, we strongly recommend using this data type, as the backend is optimized for this context. - -- `AbstractArray{<: Real}` — used for spatial data, enabling RMA to be applied within the generalized framework of Spatial Recurrence Plots (SRP) [Marwan2007Spatial](@cite). Although a `Matrix` can be used as a substitute for a [`StateSpaceSet`](@ref), this is **not recommended**, since the `AbstractArray` backend is heavier and incompatible with some features. - -- `AbstractGPUVector` — used for time series analysis with the GPU backend. A better explanation is provided in the [GPU](@ref) and [Computing RMA distributions](@ref) sections. - -!!! warning - RMA with SRP is an open research field. We include this functionality in the package for exploratory purposes, but the method is not - yet mature enough for production use. Nevertheless, feel free to experiment with it in your research. 😃 +!!! todo "Spatial Recurrence Microstates Analysis" + RMA with SRP is an open research field. We include this functionality in the package for exploratory purposes, but the method is not yet mature enough for production use. Nevertheless, feel free to experiment with it in your research. 😃 ```@docs StateSpaceSet ``` -## Output data from RecurrenceMicrostatesAnalysis.jl -When computing the RMA distribution, RecurrenceMicrostatesAnalysis.jl returns a [`Probabilities`](@ref) structure. This type is -provided by [ComplexityMeasures.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/), allowing this package -to interoperate naturally with its tools and workflows. +### Output data from RecurrenceMicrostatesAnalysis.jl +When computing the RMA distribution, **RecurrenceMicrostatesAnalysis.jl** returns a [`Probabilities`](@ref). +This type is provided by **ComplexityMeasures.jl**, allowing this package to interoperate naturally with its tools and workflows. ```@docs Probabilities diff --git a/docs/src/tutorial.jl b/docs/src/tutorial.jl index bb0afbe..946d8ed 100644 --- a/docs/src/tutorial.jl +++ b/docs/src/tutorial.jl @@ -7,8 +7,8 @@ # !!! info "ComplexityMeasures.jl" # RecurrenceMicrostatesAnalysis.jl interfaces with, and extends, ComplexityMeasures.jl. -# It can enhance your understanding if you have first view the tutorial of -# ComplexityMeasures.jl. Regardless the current tutorial is written to be self-contained. +# It can enhance your understanding if you have first view the [tutorial of ComplexityMeasures.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/tutorial/). +# Regardless the current tutorial is written to be self-contained. # ## Crash-course into RMA @@ -20,7 +20,7 @@ # where $K$ is the length of the time series and $d$ is the dimension of the phase space. # The recurrence plot is defined by the recurrence matrix # ```math -# R_{i,j} = \Theta(\varepsilon - \|\vec x_i - \vec x_j\|), +# r_{(i,j)} = \Theta(\varepsilon - \|\vec x_i - \vec x_j\|), # ``` # where $\Theta(\cdot)$ denotes the Heaviside step function and $\varepsilon$ is the recurrence # threshold. @@ -31,13 +31,13 @@ # (c) a logistic map with a linear trend; # (d) Brownian motion. -# ![Image of four RPs with their timeseries](../assets/rps.png) +# ![Image of four RPs with their timeseries](assets/rps.png) # A recurrence microstate is a local structure extracted from an RP. For a given microstate # shape and size, the set of possible microstates is finite. For example, square microstates # with size $N = 2$ yield $16$ distinct configurations. -# ![Image of the 16 squared microstates to N = 2](../assets/microstates.png) +# ![Image of the 16 squared microstates to N = 2](assets/microstates.png) # Recurrence Microstates Analysis (RMA) uses the probability distribution of these microstates # as a source of information for characterizing dynamical systems. @@ -71,12 +71,13 @@ X # Notice that `X` is already a [`StateSpaceSet`](@ref). Because **RecurrenceMicrostatesAnalysis.jl** # is part of **DynamicalSystems.jl**, this data type is the preferred input type. -# Other types are also possible however, see the documentation of the -# [`RecurrenceMicrostates`](@ref) central type for more. +# Other types are also possible as we described in [Input data for RecurrenceMicrostatesAnalysis.jl](@ref). # Now, we specify the recurrence microstate configuration -ε = 0.27 +using RecurrenceMicrostatesAnalysis, Distances + +ε = 0.25 N = 2 rmspace = RecurrenceMicrostates(ε, N) @@ -84,7 +85,7 @@ rmspace = RecurrenceMicrostates(ε, N) probs = probabilities(rmspace, X) -# The [`probability`](@ref) function is the same function as in [`ComplexityMeasures`](@ref). +# The `probabilities` function is the same function as in **ComplexityMeasures.jl**. # Given an outcome space, that is a way to _symbolize_ input data into discrete outcomes, # `probabilities` return the probability (relative occurrence frequency) for each outcome. # And indeed, the recurrence microstates is an outcome space. @@ -94,19 +95,17 @@ probs = probabilities(rmspace, X) counts(rmspace, X) - # ## Recurrence microstates analysis (RMA) - # To actually analyze your data, there are two ways forwards. # One way, is to utilize these probabilities within the interface provided -# by [`ComplexityMeasures`](@ref) to calculate entropies. +# by **ComplexityMeasures.jl** to calculate entropies. # For example, the corresponding Shannon entropy is entropy(Shannon(), probs) # (note that the API of `ComplexityMeasures` is re-exported by `RecurrenceMicrostateAnalysis`). # This number corresponds to the **recurrence microstate entropy** as defined in our -# publication [`WhichPaperIscorrectToCite`](@cite). +# publication [Corso2018Entropy](@cite). # `ComplexityMeasures` allows the convenience syntax of @@ -119,65 +118,146 @@ entropy(Tsallis(), rmspace, X) # although we haven't explored alternative entropies in research yet. -# The secon way forwards is the more traditional recurrence quantification analysis -# route, where you estimate (approximate really) various quantities -# such as laminarity that fundamentally relate with the context of recurrences. -# For example, +# The second way forward is the more traditional recurrence quantification analysis +# route, where you estimate (approximately, really) various quantities +# such as laminarity that fundamentally relate to the context of recurrences. -# XXX TODO. +# These quantities are estimated using a `ComplexityEstimator`, similar to +# **ComplexityMeasures.jl**. We begin by defining our estimators: -# All of these quantities like laminarity are in fact _complexity measures_ -# which is why RecurrenceMicrostateAnalysis.jl fits so well within the -# interface of ComplexityMeasures.jl. +# - For recurrence rate: -# ## Optimizing recurrence specification +rr_estimator = RecurrenceRate(ε) -# In the above example we blindly selected the recurrence threshold `ε`. -# A better approach is to optimize it, so it (for example) maximizes -# the recurrence microstate entropy. -# This can be done with the [`optimize`](@ref) function +# - For laminarity: -ε, S = optimize(Threshold(), RecurrenceEntropy(), X, N) -rmspace = RecurrenceMicrostates(ε, N) -h = entropy(Shannon(), rmspace, X) -(h, S) +lam_estimator = RecurrenceLaminarity(ε) + +# - For determinism: + +det_estimator = RecurrenceDeterminism(ε) + +# Then, we use the `complexity` function to estimate the quantities: + +rr = complexity(rr_estimator, X) +lam = complexity(lam_estimator, X) +det = complexity(det_estimator, X) + +rr, lam, det -# TODO: The two numbers reported above are not the same. -# Perhaps the logarithm base is off? +# We can compare these values with the exact values computed using **RecurrenceAnalysis.jl**. -# ## Custom specification of recurrence microstates +using RecurrenceAnalysis -# When we write `rmspace = RecurrenceMicrostates(ε, N)`, -# we are in fact accepting a default definition for both what counts as a recurrence -# as well as what recurrence microstates to examine. -# We can alter either, by choosing the recurrence expression, or the specific -# microstate(s) we wish to analyze. For example +rp = RecurrenceMatrix(X, ε) +qt = rqa(rp) -expr = CorridorRecurrence(0.05, 0.27) -shape = MicrostateTriangle(lalala) -rmspace = RecurrenceMicrostates(; expression = expr, shape) -probabilities(rmspace, X) +qt[:RR], qt[:LAM], qt[:DET] -# More details are given in [`RecurrenceMicrostates`](@ref) -# and the [API](@ref) section of the docs. +# All of these quantities, like laminarity, are in fact _complexity measures_, +# which is why **RecurrenceMicrostateAnalysis.jl** fits so well within the +# interface of **ComplexityMeasures.jl**. +# We have also implemented a unified function to compute all RMA estimations, +# similar to the `rqa` function from **RecurrenceAnalysis.jl**. This is the +# `rma` function: -# ## Cross recurrence plots +rma(ε, X) -# For cross-recurrences, nearly nothing changes for you, nor for the source code -# of the code base! Simply call `function(..., rmspace, X, Y)`, adding an additional -# final argument `Y` corresponding to the second trajectory from which cross recurrences are estimated. +# ## Disorder -# For example, here are the cross recurrence microstate distribution for -# the original Henon map trajectory and one at slightly different parameters +# Recurrence Microstates Analysis also introduces a novel quantifier: +# "Disorder index via symmetry in recurrence microstates" (DISREM). This quantifier +# uses the equiprobability property of recurrence microstates, due to the disorder +# condition, to quantify the disorder of a sequence of data elements. We also estimate +# disorder using a complexity estimator: -set_parameter!(henon, 1, 1.35) -Y, t = trajectory(henon, total_time) -probabilities(rmspace, X, Y) +disorder = Disorder() -# This augmentation from one to two input data -# works for all functions discussed in this tutorial. -# Coincidentally, the same extension of `probabilities` to multivariate data -# is done in [Associations.jl](https://juliadynamics.github.io/Associations.jl/stable/). +# Then, we can estimate disorder for our time series: + +complexity(disorder, X) + +# Note that disorder is free of parameters (except for the microstate length and +# the number of thresholds used, or its range). This is because the quantifier is +# defined as the maximum total entropy of recurrence microstate classes, considering +# a large range of thresholds. Moreover, this quantifier can only be estimated +# through recurrence microstates. + +# It is also possible to estimate disorder while splitting the data into windows. +# We prepared a special complexity estimator for this, aiming to facilitate +# its usage. In this situation, you must define a [`WindowedDisorder`](@ref). + +window_len = 1000 +win_disorder = WindowedDisorder(window_len; step = 100) + +# Here, we are using windows of length 1000 points, moved in steps of 100 points. +# Finally, we compute the quantifier: + +wd = complexity(win_disorder, X) + +# Plotting it: +using CairoMakie +lines(wd) + +# ## Optimization of free parameters # ## Spatial data + +# Finally, let's discuss spatial data. This is an exploratory +# method implemented in the package based on Spatial Recurrence Plots [Marwan2007Spatial](@cite). +# It means that the microstates can be a tensor structure, e.g., a hypercube. +# However, it is also important to note that the number of bits (or recurrences) +# inside the microstate increases, resulting in an exponential increase in +# the probability distribution length, which can result in a lack of memory. + +# To exemplify its use, we will define here 2D square microstates $3\times 3$, +# but that is a projection of the tensorial hypercubic microstate with side length $3$ +# into the first and third dimensions; that is: + +shape = (3, 1, 3, 1) +srmspace = RecurrenceMicrostates(ε, shape) + +# As an example, let's use an RGB image: + +import Images +img = Images.load("assets/example.jpg") + +# We need to reorganize it as an `Array{3, Float64}`. The first dimension +# must be our RGB values, while the other two need to be the horizontal and +# vertical positions of the pixels. + +W, H = size(img) +arr = Array{Float64}(undef, 3, H, W) +for i in 1:H, j in 1:W + c = img[i, j] + arr[1, i, j] = float(c.r) + arr[2, i, j] = float(c.g) + arr[3, i, j] = float(c.b) +end + +size(arr) == (3, H, W) + +# Finally, we can use the `probabilities` function to estimate our recurrence +# microstate distribution. + +probs = probabilities(srmspace, arr) + +# Although Marwan adapted some RQA quantities for spatial recurrence plots, +# they cannot be estimated using RMA. The only exception here is the recurrence +# rate, which can be estimated as: + +RecurrenceMicrostatesAnalysis.measure(rr_estimator, probs) + +# Another quantity that can be computed is the recurrence microstate entropy: + +entropy(Shannon(), probs) + +# RMA with spatial data is a very interesting and complex topic, and we +# have implemented it to motivate possible research using this feature. +# So feel free to try it and notify us if you have some success 😁 + +# Note that if you are using a grayscale image, you need to use an +# `Array` with size `(1, H, W)`. The first dimension stores the +# features of the data, which are used to compute the recurrences, +# i.e., $\vec{x}_{\vec{i}}$. \ No newline at end of file diff --git a/docs/src/tutorial/distributions.md b/docs/src/tutorial/distributions.md deleted file mode 100644 index 3242216..0000000 --- a/docs/src/tutorial/distributions.md +++ /dev/null @@ -1,60 +0,0 @@ -# Distributions - -This section introduces the computation of Recurrence Microstates Analysis (RMA) distributions -using **RecurrenceMicrostatesAnalysis.jl**. - -We begin with a [Quick start with RecurrenceMicrostatesAnalysis.jl](@ref), which demonstrates a -simple application example. Next, we present [A brief review](@ref) of Recurrence Plots (RP) -and RMA. Finally, we explain the [`distribution`](@ref) function in -[Computing RMA distributions](@ref), including the role of [Histograms](@ref). - -## Computing RMA distributions - -The computation of RMA distributions is the core functionality of -RecurrenceMicrostatesAnalysis.jl. All other tools in the package rely on these -distributions as their primary source of information. - -RMA distributions are computed using the [`distribution`](@ref) function: -```@docs -distribution -``` - -A commonly used convenience interface is: -```julia -distribution([x], ε::Float, n::Int; kwargs...) -``` - -## Spatial data - -The package also provides experimental support for spatial data, following *"Generalised Recurrence Plot Analysis for Spatial Data"* [Marwan2007Spatial](@cite). -In this context, input data are provided as `AbstractArray`s: -```math - \vec{x}_{\vec i} \in \mathbb{R}^m,\quad \vec{i} \in \mathbb{Z}^d -``` - -For example: -```@example quick_example -spatialdata = rand(Uniform(0, 1), (2, 50, 50)) -``` -Due to the high dimensionality of spatial recurrence plots, direct visualization is often -infeasible. RMA distributions provide a compact alternative by sampling microstates directly -from the data. - -**Examples:** -- Full $2 \times d$ microstates: -```@example quick_example -distribution(spatialdata, Rect(Standard(0.27), (2, 2, 2, 2))) -``` -```@example quick_example -spatialdata_1 = rand(Uniform(0, 1), (2, 50, 50)) -spatialdata_2 = rand(Uniform(0, 1), (2, 25, 25)) -distribution(spatialdata_1, spatialdata_2, Rect(Standard(0.27), (2, 2, 2, 2))) -``` - -- Projected microstates: -```@example quick_example -distribution(spatialdata, Rect(Standard(0.27), (2, 1, 2, 1))) -``` -```@example quick_example -distribution(spatialdata_1, spatialdata_2, Rect(Standard(0.27), (2, 1, 2, 1))) -``` diff --git a/docs/src/tutorial/gpu.md b/docs/src/tutorial/gpu.md deleted file mode 100644 index abb4f49..0000000 --- a/docs/src/tutorial/gpu.md +++ /dev/null @@ -1,72 +0,0 @@ -# GPU - -**RecurrenceMicrostatesAnalysis.jl** supports GPU acceleration for computing RMA distributions. -The GPU backend is implemented using -[KernelAbstractions.jl](https://juliagpu.github.io/KernelAbstractions.jl/stable/), which enables -portable GPU execution across different hardware backends. - -The GPU pipeline is implemented via a [`GPUCore`](@ref), which defines a single internal kernel -used to compute microstate histograms across supported devices. - -!!! compat - The GPU kernel is **not compatible with spatial data**. - -## Data requirements -The GPU backend supports **only `Float32` data**. Therefore, input datasets must be explicitly -converted before being used: -```@example gpu -using RecurrenceMicrostatesAnalysis, Distributions -data = StateSpaceSet(Float32.(rand(Uniform(0, 1), 1000))) -``` - -!!! danger - When using the GPU backend, inputs must be of type `Float32`. **RecurrenceMicrostatesAnalysis.jl** - is not compatible with `Float64` on GPU. - -## Recurrence expressions and metrics -When defining a [`RecurrenceExpression`](@ref) for GPU execution, the distance metric must be a -subtype of [`GPUMetric`](@ref). Metrics from [Distances.jl](https://github.com/JuliaStats/Distances.jl) are -**not supported** on GPU. - -For example: -```@example gpu -expr = Standard(0.27f0; metric = GPUEuclidean()) -``` - -!!! compat - The GPU backend is not compatible with metrics from [Distances.jl](https://github.com/JuliaStats/Distances.jl). - -## Moving data to GPU memory -To enable GPU computation, the data must be transferred to GPU memory. For example: - -- Using CUDA: -```julia -using CUDA -gpu_data = data |> CuVector -``` - -- Using Metal: -```julia -using Metal -gpu_data = data |> MtlVector -``` - -### Output handling -Results computed on the GPU are automatically transferred back to the CPU: -- [`histogram`](@ref) returns a [`Counts`](@ref) object. -- [`distribution`](@ref) returns a [`Probabilities`](@ref) object. - -No manual data transfer is required for the output. - -## Metrics for GPU -Since the GPU backend does not support [Distances.jl](https://github.com/JuliaStats/Distances.jl), -distance metrics must be implemented explicitly as subtypes of [`GPUMetric`](@ref). - -```@docs -GPUMetric -``` - -### Implemented GPU metrics -```@docs -GPUEuclidean -``` diff --git a/docs/src/tutorial/quantifiers.md b/docs/src/tutorial/quantifiers.md deleted file mode 100644 index 4022017..0000000 --- a/docs/src/tutorial/quantifiers.md +++ /dev/null @@ -1,343 +0,0 @@ -# Quantifiers - -Quantifiers are measures used to characterize specific properties of a dynamical system. -Currently, Recurrence Microstates Analysis (RMA) provides five quantifiers that can be -computed or estimated from a microstate distribution. - -Three of these correspond to classical Recurrence Quantification Analysis (RQA) measures: -**recurrence rate**, **determinism**, and **laminarity**. One is an information-theoretic -entropy measure. The final quantifier is **disorder**, which is defined directly in terms of -the microstate distribution and exploits symmetry properties of recurrence structures. - -All quantifiers implemented in the package inherit from [`QuantificationMeasure`](@ref), and -their computation is performed using the [`measure`](@ref) function. - -```@docs -QuantificationMeasure -measure -``` - -## Recurrence microstates entropy - -The Recurrence Microstates Entropy (RME) was introduced in 2018 and marks the starting -point of the RMA framework [Corso2018Entropy](@cite). It is defined as the Shannon entropy of -the RMA distribution: - -```math -RME = -\sum_{i = 1}^{2^\sigma} p_i^{(N)} \ln p_i^{(N)}, -``` - -where $n$ is the microstate size, $\sigma$ is the number of recurrence elements constrained -within the microstate (e.g. $\sigma = n^2$ for square microstates), and $p_i^{(N)}$ denotes -the probability of the microstate with decimal representation $i$. - -In **RecurrenceMicrostatesAnalysis.jl**, the RME is implemented by the [`RecurrenceEntropy`](@ref) struct. - -```@docs -RecurrenceEntropy -``` - -Since the output of the [`distribution`](@ref) function is a [`Probabilities`](@ref) object, the package also supports other information or complexity measures provided by [ComplexityMeasures.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/). - -### Quick example -As an example, consider a uniform random process. The RME as a function of the threshold -can be computed and visualized as follows: -```@example quick_rme_example -using RecurrenceMicrostatesAnalysis, Distributions, CairoMakie - -data_len = 10000 -resolution = 50 - -data = StateSpaceSet(rand(Uniform(0, 1), data_len)) -thres_range = range(0, 1, resolution) -results = zeros(Float64, resolution) - -for i ∈ eachindex(results) - dist = distribution(data, thres_range[i], 4) - results[i] = measure(RecurrenceEntropy(), dist) -end - -results ./= maximum(results) -scatterlines(thres_range, results) -``` - -## Recurrence rate -The Recurrence Rate (RR) quantifies the density of recurrence points in a recurrence plot [Webber2015Recurrence](@cite). In standard RQA, it is defined as -```math -RR = \frac{1}{K^2} \sum_{i,j=1}^K R_{i,j}. -``` -where $K$ is the length of the time series. - -When estimated using RMA, RR is defined as the expected recurrence rate over the microstate -distribution: -```math -RR = \sum_{i = 1}^{2^\sigma} p_i^{(N)} RR_i^{(N)}, -``` -where $RR_i^{(N)}$ denotes the recurrence rate of the $i$-th microstate. For square -microstates, this quantity is given by -```math -RR_i^{(N)} = \frac{1}{\sigma} \sum_{j,k=1}^N M_{j,k}^{i, (N)}, -``` -with $\mathbf{M}^{i, (N)}$ denoting the microstate structure corresponding to index $i$. - -In **RecurrenceMicrostatesAnalysis.jl**, RR is implemented by [`RecurrenceRate`](@ref) struct. -```@docs -RecurrenceRate -``` - -## Determinism -In standard RQA, Determinism (DET) measures the fraction of recurrence points forming -diagonal line structures [Webber2015Recurrence](@cite): -```math -DET = \frac{\sum_{l=d_{min}}^K l~H_D(l)}{\sum_{i,j=1}^K R_{i,j}}, -``` -where $H_D(l)$ is the histogram of diagonal line lengths, -```math -H_D(l)=\sum_{i,j]1}^K(1-R_{i-1,j-1})(1-R_{i+l,j+l})\prod_{k=0}^{l-1}R_{i+k,j+k}. -``` - -The estimation of DET using RMA is based on the work *"Density-Based Recurrence Measures from Microstates"* [daCruz2025RQAMeasures](@cite). In that work, the DET expression is rewritten as -```math -DET = 1 - \frac{1}{K^2~RR}\sum_{l=1}^{l_{min}-1} l~H_D(l), -``` -and the diagonal histogram $H_D(l)$ is related to the RMA distribution through correlations between microstate structures: -```math -\frac{H_D(l)}{(K-l-1)^2}=\vec d^{(l)}\cdot\mathcal{R}^{(l+2)}\vec p^{(l+2)}. -``` - -For the commonly used case $l_{min} = 2$ (currently the only case implemented in the package), this leads to the approximation -```math -DET\approx 1 - \frac{\vec d^{(1)}\cdot\mathcal{R}^{(3)}\vec p^{3}}{RR}. -``` - -The correlation term $\vec d^{(1)}\cdot\mathcal{R}^{(3)}\vec p^{3}$ can be simplified by explicitly identifying the microstates selected by $\vec d^{(1)}$. -These correspond to microstates of the form -```math -\begin{pmatrix} -\xi & \xi & 0 \\ -\xi & 1 & \xi \\ -0 & \xi & \xi \\ -\end{pmatrix}, -``` -where $\xi$ denotes an unconstrained entry. There are $64$ such microstates among the $512$ possible square microstates of size $N = 3$. -Defining the class $C_D$ as the set of microstates with this structure, DET can be estimated as: -```math -DET\approx 1 - \frac{\sum_{i\in C_D} p_i^{(3)}}{RR}, -``` -where $p_i^{(3)}$ is the probability of the $i$-th microstate in an RMA distribution of square microstates with size $N = 3$. - -A futher simplification can be obtained by defining [`Diagonal`](@ref)-shaped microstates [Ferreira2025RMALib](@cite). -In the structure above, the unconstrained entries $\xi$ may represent either recurrences or non-recurrences, leading to the need for all $64$ combinations. -Diagonal microstates focus directly on the relevant information, namely the diagonal pattern $0~1~0$. -In this case, DET can be approximated as -```math -DET\approx 1 - \frac{p_3^{(3)}}{RR}, -``` -where $p_3^{(3)}$ is the probability of observing the diagonal motif $0~1~0$. - -In **RecurrenceMicrostatesAnalysis.jl**, the computation of DET is implemented by the [`Determinism`](@ref) struct. -```@docs -Determinism -``` - -## Laminarity - -Laminarity (LAM) is another classical RQA quantifier that measures the proportion of recurrence points forming vertical (line) structures in a recurrence plot. -It is defined as -```math -LAM = \frac{\sum_{l=v_{min}}^K l~H_V(l)}{\sum_{i,j=1}^K R_{i,j}}, -``` -where -```math -H_V(l)=\sum_{i,j]1}^K(1-R_{i,j-1})(1-R_{i,j+l})\prod_{k=0}^{l-1}R_{i,j+k}. -``` - -The estimation of LAM using RMA is also based on the work *"Density-Based Recurrence Measures from Microstates"* [daCruz2025RQAMeasures](@cite) and follows the same logical used for determinsm. -In this case, the estimation requires microstates of the form -```math -\begin{pmatrix} -0 & 1 & 0 \\ -\xi & \xi & \xi \\ -\xi & \xi & \xi \\ -\end{pmatrix}, -``` -which defines the class $C_L$ of microstates used to estimate LAM as -```math -LAM\approx 1 - \frac{\sum_{i\in C_L} p_i^{(3)}}{RR}. -``` - -As with determinism, this process can be further simplified by defining a `line` motif [Ferreira2025RMALib](@cite), which captures only the relevant information, namely vertical line patterns of the form $0~1~0$ in the recurrence plot. -In this case, LAM can be approximated as -```math -LAM\approx 1 - \frac{p_3^{(3)}}{RR}, -``` -where $p_3^{(3)}$ denotes the probability of observing the line motif $0~1~0$. - -In **RecurrenceMicrostatesAnalysis.jl**, the computation of LAM is implemented by the [`Laminarity`](@ref) struct. - -```@docs -Laminarity -``` - -## Disorder -The disorder quantifier is implemented based on the work *“Quantifying Disorder in Data”* [Flauzino2025Disorder](@cite). -It is a novel and powerful tool for data analysis, allowing the distinction between stochastic and deterministic time series, as well as between different types of stochastic dynamics, such as white, pink, and red Gaussian noise. - -Disorder is implemented using square recurrence microstates, which can be permuted by rows and columns and transposed (see [Permutations and Transposition](@ref)). -This procedure generates a set of equivalent microstates given by -```math -\mathcal{M}_a(\mathbf{M}) = \bigcup_{\sigma_i,\sigma_j\in S_N}\{\mathcal{L}_{\sigma_j}\mathcal{T}\mathcal{L}_{\sigma_i}\mathbf{M},\quad\mathcal{T}\mathcal{L}_{\sigma_j}\mathcal{T}\mathcal{L}_{\sigma_i}\mathbf{M}\}. -``` -This defines an equivalence class of microstates denoted by $\mathcal{M}_a$. - -The probability of observing a given microstate $\mathbf M^{i,(N)}$ in the recurrence plot, denoted by $p_i^{(N)}$, is computed using **RecurrenceMicrostatesAnalysis.jl**. -To compute disorder, the probabilities of microstates belonging to the same class must be normalized. -Thus, for $\mathbf M^{i, (N)} \in \mathcal{M}_a$, the normalized probability within the class is defined as -```math -p_i^{(a, N)} = \frac{p_i^{(N)}}{\sum_{\mathbf{M}_j^{(N)} \in \mathcal{M}_a}~p_j^{(N)}}. -``` - -The information entropy associated with the probability distribution of microstates in the class $\mathcal{M}_a$ is then defined as -```math -\xi_a(\varepsilon) \stackrel{\mathrm{def}}{=} -\sum_{\mathbf{M}_i^{(N)} \in \mathcal{M}_a} p_i^{(a, N)} \ln p_i^{(a, N)}. -``` -This entropy is normalized by $\ln m_a$, where $m_a$ is the number of microstates in the class $\mathcal{M}_a$. -Using **RecurrenceMicrostatesAnalysis.jl**, the normalized quantity $\xi_a(\varepsilon) / \ln m_a$ can be computed as -```@example disorder -using Distributions, RecurrenceMicrostatesAnalysis -data = StateSpaceSet(rand(Uniform(0, 1), 1000)) -dist = distribution(data, 0.27, 4; sampling = Full()) - -class = 102 -measure(Disorder(4), class, dist) -``` - -The total entropy over all classes for a given threshold $\varepsilon$ is defined as -```math -\xi(\varepsilon) \stackrel{\mathrm{def}}{=} \frac{1}{A} \sum_{a = 1}^A \frac{\xi_a(\varepsilon)}{\ln m_a}, -``` -where $A$ is the number of contributing classes and defines the maximum possible amplitude. -This normalization factor can also be computed using **RecurrenceMicrostatesAnalysis.jl**: -```@example disorder -A = RecurrenceMicrostatesAnalysis.get_disorder_norm_factor(Disorder(4), data) -``` - -And the total entropy: -```@example disorder -measure(Disorder(4), dist, A) -``` - -Finally, the the *disorder index via symmetry in recurrence microstates* (DISREM), or simply **disorder**, is defined as -```math -\Xi = \max_{\varepsilon} \xi(\varepsilon). -``` - -In **RecurrenceMicrostatesAnalysis.jl**, this quantifier is implemented by the [`Disorder`](@ref) struct. - -```@docs -Disorder -``` - -### Computing disorder for compatible time series - -Consider a scenario in which a long time series is split into multiple windows. **RecurrenceMicrostatesAnalysis.jl** provides a compact interface to compute the disorder for each window. - -As an example, consider a time series with 10,000 points consisting of a sine wave with added white noise, alternating every five windows: -```@example disorder -data_len = 10_000 -window_len = 500 -``` -```@example disorder -function data_gen(t) - x = sin.(6*π .* t) - - count = 0 - for i in 1:window_len:data_len - if count < 5 - x[i:(i-1)+window_len] .+= rand(Normal(0, 0.25), window_len) - elseif count ≥ 9 - count = -1 - end - - count += 1 - end - - return x -end -``` -```@example disorder -using CairoMakie - -t = range(0, 20, data_len) -data = data_gen(t) - -lines(t, data) -``` - -The disorder can be computed using the following method: -```julia -measure(settings::Disorder{N}, dataset::Vector{StateSpaceSet}, th_min::Float64, th_max::Float64) -``` - -To apply it, the time series must first be split into a vector of [`StateSpaceSet`](@ref) objects: -```@example disorder -windows = [ data[(i + 1):(i + window_len)] for i in 0:window_len:(length(data) - window_len)] -dataset = Vector{StateSpaceSet}(undef, length(windows)) -for i ∈ eachindex(windows) - dataset[i] = StateSpaceSet(windows[i]) -end - -dataset -``` - -Next, the threshold range `th_min` and `th_max` must be defined. There are two possible approaches: -1. Use the full range of admissible threshold values by setting `th_min = 0` and `th_max = maximum(pairwise(Euclidean(), data, data))`, and choosing a small step size via the `num_tests` keywork argument (e.g., `num_tests = 1000`). This approach yields the global maximum disorder values but can be computationally expensive. - -2. Use a small interval centered around a known threshold value. This is the recommended approach and is adopted here. - -To obtain a suitable reference threshold, we select a subset of windows and compute the optimal disorder threshold using the [`optimize`](@ref) function: -```@example disorder -using Statistics - -function find_threshold(disorder, data) - ths = zeros(Float64, 10) - for i ∈ eachindex(ths) - idx = rand(1:length(windows)) - ths[i] = optimize(Threshold(), disorder, data[idx])[1] - end - - μ = mean(ths) - σ = std(ths) - - return (max(0.0, μ - 1.5 * σ), μ + 1.5 * σ) -end -``` -```@example disorder -dis = Disorder(4) -th_min, th_max = find_threshold(dis, dataset) -``` - -Finally, the disorder can be computed for all windows using the [`measure`](@ref) function: -```@example disorder -results = measure(dis, dataset, th_min, th_max) -``` - -```@example disorder -scatterlines(results) -``` - -!!! info - When computing Disorder for compatible time series, the same threshold range is used for all windows. - However, the disorder value of each window corresponds to the maximum over that range, and therefore the optimal threshold may differ between windows. - -!!! tip - Disorder can also be computed using the [`GPU`](@ref) backend: - ```julia - measure(settings::Disorder{N}, dataset::Vector{<:AbstractGPUVector{SVector{D, Float32}}}, th_min::Float32, th_max::Float32) - ``` - The procedure is identical, but each window must first be transferred to the GPU: - ```julia - for i ∈ eachindex(windows) - dataset[i] = StateSpaceSet(Float32.(windows[i])) |> CuVector - end - ``` \ No newline at end of file diff --git a/docs/src/tutorial/recurrences.md b/docs/src/tutorial/recurrences.md deleted file mode 100644 index b276b91..0000000 --- a/docs/src/tutorial/recurrences.md +++ /dev/null @@ -1,31 +0,0 @@ -# Recurrence Functions - -A recurrence function determines whether two states of a dynamical system, -$\vec{x}$ and $\vec{y}$, are recurrent. - -It is defined by an expression of the form -```math -R(\vec{x}, \vec{y}) = \Theta(\varepsilon - |\vec{x} - \vec{y}|), -``` -where $\Theta(\cdot)$ denotes the Heaviside step function and $\varepsilon$ is the -threshold parameter defining the maximum distance between two states for them to be -considered $\varepsilon$-recurrent. - -This definition differs from the expression used to construct the full recurrence -matrix, introduced in the section [A brief review](@ref). While the recurrence matrix -evaluates all pairwise recurrences in a time series, a recurrence function computes -a single recurrence value between two states. - -In **RecurrenceMicrostatesAnalysis.jl**, recurrence functions are implemented via -the [`RecurrenceExpression`](@ref) abstraction. The actual recurrence evaluation is performed -by implementing the [`recurrence`](@ref) function for the corresponding expression type. - -```@docs -RecurrenceExpression -recurrence -``` -### Implemented recurrence functions -```@docs -Standard -Corridor -``` \ No newline at end of file diff --git a/docs/src/tutorial/shapes_and_sampling.md b/docs/src/tutorial/shapes_and_sampling.md deleted file mode 100644 index 91a2588..0000000 --- a/docs/src/tutorial/shapes_and_sampling.md +++ /dev/null @@ -1,45 +0,0 @@ -# Shapes and Sampling - -This section describes the microstate shapes used in Recurrence Microstates Analysis (RMA) -and the sampling strategies employed to construct histograms and distributions. - -## Variations of microstates shapes -A microstate shape defines the local recurrence pattern extracted from a recurrence plot. -In **RecurrenceMicrostatesAnalysis.jl**, microstate shapes are represented by subtypes of -[`MicrostateShape`](@ref). - -A `MicrostateShape` specifies: -- which relative positions are sampled to evaluate recurrences, and -- how the resulting binary recurrence pattern is mapped to a decimal representation. - -The internal conversion from a microstate pattern to its decimal index is performed by the -`compute_motif` function. - -```@docs -MicrostateShape -``` - -### Implemented microstates shapes -```@docs -Rect -Triangle -Diagonal -``` - -## Sampling strategies -The sampling strategy determines which microstates are selected during histogram or -distribution construction. - -Sampling behavior is defined by subtypes of [`SamplingMode`](@ref), while the set of valid -sampling positions is determined by a [`SamplingSpace`](@ref). - -```@docs -SamplingMode -SamplingSpace -``` - -### Implemented sampling modes -```@docs -SRandom -Full -``` \ No newline at end of file diff --git a/docs/src/tutorial/utils.md b/docs/src/tutorial/utils.md deleted file mode 100644 index dc7ed18..0000000 --- a/docs/src/tutorial/utils.md +++ /dev/null @@ -1,174 +0,0 @@ -# Utils -This section describes utility functionalities provided by -**RecurrenceMicrostatesAnalysis.jl**, including parameter optimization and operations on -recurrence microstates. - -## Optimizing a parameter -When working with recurrence plots (RPs), a well-known challenge is selecting an appropriate -recurrence threshold. Within the RMA framework, this issue is addressed by optimizing an -information-theoretic or complexity-based measure with respect to the threshold parameter. - -In practice, it is common to determine the threshold that maximizes either the -[`RecurrenceEntropy`](@ref) or the [`Disorder`](@ref) quantifier -[Prado2023Sampling](@cite). - -This optimization procedure is implemented via the [`optimize`](@ref) function, which -computes the optimal value of a given [`Parameter`](@ref). - -```@docs -Parameter -optimize -``` - -### Parameters -```@docs -Threshold -``` - -## Operations on microstates - -The package also provides a set of operations that can be applied to recurrence microstates -or to their decimal representations. -```@docs -Operation -operate -``` - -### Permutations and Transposition -When working with square microstates, it is natural to consider symmetry operations such as -row and column permutations, as well as transposition. These operations are particularly -important for defining equivalence classes used in the computation of the [`Disorder`](@ref) quantifier. - -To illustrate these operations, we consider a square microstate of size $3 \times 3$: -```math -\mathbf{M} = \begin{pmatrix} -a & b & c \\ -d & e & f \\ -g & h & i -\end{pmatrix}. -``` - -!!! info - The primary purpose of these operations is to define equivalence classes of microstates - used in the computation of [`Disorder`](@ref). - -#### Permutations of Rows -Let $\sigma \in S_N$ be a permutation, where $S_N$ is the symmetric group, and let $\mathcal{L}_\sigma$ denote the operator that -permutes the rows of a microstate $\mathbf{M}$ according to $\sigma$. - -For example, for $\sigma = 132$, the third and second rows are exchanged, while the first row -remains unchanged: -```math -\mathcal{L}_{132}\mathbf{M} = \begin{pmatrix} -a & b & c \\ -g & h & i \\ -d & e & f -\end{pmatrix}. -``` - -For a given microstate size $n$, all possible row (or column) permutations can be generated using the [Combinatorics.jl](https://juliamath.github.io/Combinatorics.jl/stable/) package: -```@example permutation -using Combinatorics - -n = 3 -Sn = collect(permutations(1:n)) -``` -```@example permutation -σ = Sn[2] -``` - -The permutation is applied using [`PermuteRows`](@ref) operation. For example, consider -the square $3\times 3$ microstate with decimal index $I = 237$: -```math -\mathbf{M} = \begin{pmatrix} -0 & 0 & 1 \\ -1 & 0 & 1 \\ -1 & 1 & 0 -\end{pmatrix}. -``` -```@example permutation -using RecurrenceMicrostatesAnalysis - -shape = Rect(n, n) -row_permutation = PermuteRows(shape) - -operate(row_permutation, 237, σ) -``` - -The result is the microstate with decimal identifier $I = 239$, corresponding to: -```math -\mathcal{L}_{132}\mathbf{M} = \begin{pmatrix} -0 & 0 & 1 \\ -1 & 1 & 0 \\ -1 & 0 & 1 -\end{pmatrix}. -``` - -```@docs -PermuteRows -``` - -#### Permutations of Columns -Column permutations follow the same logic as row permutations, but are applied to the -columns of the microstate. - -Let $\mathcal{C}_\sigma$ denote the operator that permutes the columns of $\mathbf{M}$ -according to $\sigma \in S_N$. For $\sigma = 132$, the transformation is given by: -```math -\mathcal{C}_{132}\mathbf{M} = \begin{pmatrix} -a & c & b \\ -d & f & e \\ -g & i & h -\end{pmatrix}. -``` - -Using the same example of $I = 237$, column permutation is performed using the [`PermuteColumns`](@ref) operation: -```@example permutation -col_permutation = PermuteColumns(shape; S = Sn) -``` -```@example permutation -operate(col_permutation, 237, 2) -``` - -The resulting microstate has decimal identifier $I = 347$, corresponding to: -```math -\mathcal{C}_{132}\mathbf{M} = \begin{pmatrix} -0 & 1 & 0 \\ -1 & 1 & 0 \\ -1 & 0 & 1 -\end{pmatrix}. -``` - -```@docs -PermuteColumns -``` - -#### Transposition -Transposition exchanges rows and columns of a microstate. Let $\mathcal{T}$ denote the -transposition operator: -```math -\mathcal{T}\mathbf{M} = \begin{pmatrix} -a & d & g \\ -b & e & f \\ -c & f & i -\end{pmatrix}. -``` - -Using the same example microstate with identifier $I = 237$, transposition is performed via the [`Transpose`](@ref) operator: -```@example permutation -transposition = Transpose(shape) -operate(transposition, 237) -``` - -The resulting microstate has decimal identifier $I = 231$ corresponding to: -```math -\mathcal{T}\mathbf{M} = \begin{pmatrix} -0 & 1 & 1 \\ -0 & 0 & 1 \\ -1 & 1 & 0 -\end{pmatrix}. -``` - -```@docs -Transpose -``` \ No newline at end of file diff --git a/src/core/recurrence.jl b/src/core/recurrence.jl index 9139b23..edad921 100644 --- a/src/core/recurrence.jl +++ b/src/core/recurrence.jl @@ -6,14 +6,14 @@ export RecurrenceExpression, recurrence """ RecurrenceExpression -Abstract supertype for recurrence expressions implemented in the package. +Abstract supertype for recurrence expressions, \$r_{i,j}\$. Concrete subtypes of `RecurrenceExpression` must implement the [`recurrence`](@ref) function, which defines how recurrence between two states is evaluated. ### Implementations -- [`Standard`](@ref) -- [`Corridor`](@ref) +- [`ThresholdRecurrence`](@ref) +- [`CorridorRecurrence`](@ref) """ abstract type RecurrenceExpression{T, M} end @@ -23,36 +23,36 @@ abstract type RecurrenceExpression{T, M} end # Based on time series: (CPU) #......................................................................................... """ - recurrence(expr::RecurrenceExpression, [x], [y], [...]) + recurrence(expr::RecurrenceExpression, [x], [y], [...]) → UInt8 -Define how the recurrence state between `[x]` and `[y]` is computed for a given +Defines how the recurrence state between `[x]` and `[y]` is computed for a given [`RecurrenceExpression`](@ref). -The additional arguments (`[...]`) depend on whether the recurrence is computed for -time-series or spatial data. +The additional arguments (`[...]`) depend on whether recurrence is computed for +time series or spatial data. -### Time-series recurrence +## Time series recurrence ```julia function recurrence(expr::RecurrenceExpression, x::StateSpaceSet, y::StateSpaceSet, ::Int, ::Int) ``` -The two `Int` arguments correspond to the positions \$(i, j)\$ in the time series used to -evaluate recurrence. +The two `Int` arguments correspond to the positions \$(i, j)\$ in the time series +used to evaluate recurrence. -### Spatial recurrence +## Spatial recurrence ```julia function recurrence(expr::RecurrenceExpression, x::AbstractArray{<:Real}, y::AbstractArray{<:Real}, ::NTuple{N, Int}, ::NTuple{M, Int}) ``` -The two `NTuple{N, Int}` arguments correspond to the positions \$(\\vec i, \\vec j)\$ in the spatial data -used to evaluate recurrence. +The two `NTuple{N, Int}` arguments correspond to the positions \$(\\vec{i}, \\vec{j})\$ +in the spatial data used to evaluate recurrence. !!! info - To support GPU execution, recurrence expressions must implement `gpu_recurrence` instead of `recurrence`. - The arguments are equivalent, with the addition of the phase-space dimension - as an input parameter. See [`Standard`](@ref) for a reference implementation. + To support GPU execution, recurrence expressions must implement `gpu_recurrence` + instead of `recurrence`. The arguments are equivalent, with the addition of the + phase-space dimension as an input parameter. See [`ThresholdRecurrence`](@ref) for a reference implementation. -### Output +## Output The `recurrence` function must always return a `UInt8`: `0` for non-recurrence and `1` for recurrence. """ function recurrence( diff --git a/src/core/recurrence_microstates.jl b/src/core/recurrence_microstates.jl index 041dfef..3aefbf6 100644 --- a/src/core/recurrence_microstates.jl +++ b/src/core/recurrence_microstates.jl @@ -3,9 +3,117 @@ export RecurrenceMicrostates ########################################################################################## # Recurrence Microstate ########################################################################################## -# TODO: Write RecurrenceMicrostates documentation. """ - RecurrenceMicrostates + RecurrenceMicrostates <: CountBasedOutcomeSpace + +It defines an `OutcomeSpace` from **ComplexityMeasures.jl**, representing a recurrence microstate +outcome space. + +## Description + +Let \$\\mathscr{X} = (\\vec{x}_i)_{i = 1}^{K}\$ be a sequence of data elements. From \$\\mathscr{X}\$, +we can extract subsequences \$\\mathbf{X}_{(p)}\$ of length \$N\$ such that +\$\\mathbf{X}_{(p)} = (\\vec{x}_i)_{i=p+1}^{p+N}\$ for \$0 \\leq p \\leq K - N\$. + +Let \$r_{(i,j)}\$ be a recurrence function that takes two elements, \$\\vec{x}_i\$ and \$\\vec{x}_j\$, and returns whether they +are recurrent. For example, the threshold-based recurrence is given by +\$r_{(i,j)} = \\Theta(\\varepsilon - \\|\\vec{x}_i - \\vec{x}_j\\|)\$, +where \$\\Theta\$ is the Heaviside function, \$\\|\\cdot\\|\$ is an appropriate `metric` for the data, and \$\\varepsilon\$ is +the recurrence threshold. In this representation, a recurrence is denoted by **1** and a non-recurrence by **0**. + +For two subsequences belonging to \$\\mathscr{X}\$, we can generate, using the recurrence function, a +matrix \$\\mathbf{M} \\equiv \\mathbf{R}(\\mathbf{X}_{(p)}, \\mathbf{X}_{(q)})\$: +```math +\\mathbf{R}(\\mathbf{X}_{(p)}, \\mathbf{X}_{(q)}) \\coloneqq \\begin{bmatrix} +r_{p+1,q+1} & \\dots & r_{p+1,q+N} \\\\ +\\vdots & \\ddots & \\vdots \\\\ +r_{p+N,q+1} & \\dots & r_{p+N,q+N} +\\end{bmatrix}. +``` +This matrix is known as a recurrence microstate [Corso2018Entropy](@cite) of length \$N\$ when \$N \\ll K\$, and represents +a local portion of the recurrence plot \$\\mathbf{R}(\\mathscr{X}, \\mathscr{X})\$. + +## Implementation +To define a recurrence microstate, three components are required: +1. The recurrence function, \$r_{(i,j)}\$, used to compute recurrences (see [`RecurrenceExpression`](@ref)). +2. The microstate shape and its size (see [`MicrostateShape`](@ref)). +3. The method used to extract microstates from \$\\mathscr{X}\$ (see [`SamplingMode`](@ref)). + +## Constructors +```julia +RecurrenceMicrostates(expr::RecurrenceExpression, shape::MicrostateShape; kwargs...) +RecurrenceMicrostates(expr::RecurrenceExpression, N::Int; kwargs...) # It uses square microstates. +``` + +- Using [`ThresholdRecurrence`](@ref): +```julia +RecurrenceMicrostates(ε::Real, N::Int; kwargs...) +RecurrenceMicrostates(ε::Real, shape::MicrostateShape; kwargs...) +``` + +- Using [`CorridorRecurrence`](@ref): +```julia +RecurrenceMicrostates(ε_min::Real, ε_max::Real, N::Int; kwargs...) +RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShape; kwargs...) +``` + +## Spatial generalization +We also implement a spatial generalization of recurrence microstate analysis based on [Marwan2007Spatial](@cite). +It operates similarly to standard RMA, but retrieves microstates from a recurrence tensor constructed +from the data (without explicitly constructing the full tensor). **This is an experimental feature included +as an exploratory addition to the package; there is currently no established literature describing how to use +this generalization.** If you are curious, feel free to experiment with it 🙂 + +In this setting, the input data is no longer a sequence of elements, but a spatial structure with \$d\$ dimensions. +Each element of this structure can be either a `Real` or a `Vector` containing features associated with the corresponding +position. Following [Marwan2007Spatial](@cite), the resulting recurrence space has \$2 \\times d\$ dimensions. For example, +for an image we have \$d = 2\$, so the recurrence space has 4 dimensions. + +The recurrence expression is reformulated to operate on vector indices: \$r_{(\\vec{i}, \\vec{j})}\$. For example, the threshold-based +recurrence is given by +\$r_{(\\vec{i}, \\vec{j})} = \\Theta(\\varepsilon - \\|\\mathbf{x}_{\\vec{i}} - \\mathbf{x}_{\\vec{j}}\\|)\$, +where the coordinate vectors are defined as: +```math +\\vec{i} = \\sum_{q = 1}^{d} i_q \\hat{e}_q, +``` +where \$\\hat{e}_q\$ is the unit vector representing the corresponding dimension \$q\$. + +Using this formulation, recurrence microstates are defined as before. However, since the recurrence plot is now a tensor, +microstates may have hypergeometric shapes, such as hypercubes or hyperrectangles, or correspond to projections onto +lower-dimensional subspaces. This provides greater flexibility in capturing structure, but also increases the complexity +of using the method. + +Moreover, since RQA is defined differently for spatial recurrence plots, the implementations in +**RecurrenceMicrostatesAnalysis.jl** are generally not compatible with this generalization, except for +[`RecurrenceRate`](@ref) and **Entropy**, which can be estimated from any recurrence microstate distribution. + +!!! compat "Shape and sampling compatibility" + Some shapes and sampling modes are not compatible with the spatial generalization, e.g. [`TriangleMicrostate`](@ref) and [`Full`](@ref). + +### Spatial constructors +These constructors use a hypergeometric version of [`RectMicrostate`](@ref), defined by an `NTuple` called `structure`. + +```julia +RecurrenceMicrostates(expr::RecurrenceExpression, structure::NTuple; kwargs...) +RecurrenceMicrostates(ε::Real, structure::NTuple; kwargs...) +RecurrenceMicrostates(ε_min::Real, ε_max::Real, structure::NTuple; kwargs...) +``` + +## Keyword arguments +- `metric::Metric`: The metric used to compute recurrence. +- `ratio`: The sampling ratio. The default is `0.1`. +- `sampling`: The sampling mode. The default is [`SRandom`](@ref). + +!!! warning "Memory" + Note that the number of possible microstates is \$2^\\sigma\$, where + \$\\sigma\$ is the number of recurrence entries in the microstate structure. + This means that as the number of entries increases, the memory required to + store the full distribution quickly becomes impractical. For example, a square + \$6 \\times 6\$ microstate has 36 entries, resulting in \$2^{36} = 68,719,476,736\$ + possible microstates. As another example, a 4-dimensional hypercubic microstate + with side length 3 has \$3^4 = 81\$ entries, leading to \$2^{81} \\approx 2.42 \\times 10^{24}\$ + possible microstates. Clearly, allocating memory for such distributions is not feasible. + """ struct RecurrenceMicrostates{MS <: MicrostateShape, RE <: RecurrenceExpression, SM <: SamplingMode} <: ComplexityMeasures.CountBasedOutcomeSpace shape::MS @@ -16,50 +124,55 @@ end ########################################################################################## # Recurrence Microstate: Convenience constructors ########################################################################################## -function RecurrenceMicrostates(expr::RecurrenceExpression, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio)) - shape = RectMicrostate(N) +function RecurrenceMicrostates(expr::RecurrenceExpression, shape::MicrostateShape; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio)) return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) +function RecurrenceMicrostates(expr::RecurrenceExpression, N::Int; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio)) shape = RectMicrostate(N) - expr = Standard(ε; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end - -function RecurrenceMicrostates(expr::RecurrenceExpression, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio)) - shape = RectMicrostate(structure) +########################################################################################## +function RecurrenceMicrostates(ε::Real, N::Int; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio), metric::Metric = DEFAULT_METRIC) + shape = RectMicrostate(N) + expr = ThresholdRecurrence(ε; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε::Real, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) - shape = RectMicrostate(structure) - expr = Standard(ε; metric = metric) +function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio), metric::Metric = DEFAULT_METRIC) + expr = ThresholdRecurrence(ε; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(expr::RecurrenceExpression, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio)) +########################################################################################## +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, N::Int; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio), metric::Metric = DEFAULT_METRIC) + shape = RectMicrostate(N) + expr = CorridorRecurrence(ε_min, ε_max; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) - expr = Standard(ε; metric = metric) +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShape; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio), metric::Metric = DEFAULT_METRIC) + expr = CorridorRecurrence(ε_min, ε_max; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end - -function RecurrenceMicrostates(ε_min::Real, ε_max::Real, N::Int; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) - shape = RectMicrostate(N) - expr = Corridor(ε_min, ε_max; metric = metric) +########################################################################################## +function RecurrenceMicrostates(expr::RecurrenceExpression, structure::NTuple; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio)) + shape = RectMicrostate(structure) return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε_min::Real, ε_max::Real, structure::NTuple; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) +########################################################################################## +function RecurrenceMicrostates(ε::Real, structure::NTuple; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio), metric::Metric = DEFAULT_METRIC) shape = RectMicrostate(structure) - expr = Corridor(ε_min, ε_max; metric = metric) + expr = ThresholdRecurrence(ε; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end -function RecurrenceMicrostates(ε_min::Real, ε_max::Real, shape::MicrostateShape; sampling_ratio::Real = 0.05, sampling::SamplingMode = SRandom(sampling_ratio), metric::Metric = DEFAULT_METRIC) - expr = Corridor(ε_min, ε_max; metric = metric) +########################################################################################## +function RecurrenceMicrostates(ε_min::Real, ε_max::Real, structure::NTuple; ratio::Real = 0.05, sampling::SamplingMode = SRandom(ratio), metric::Metric = DEFAULT_METRIC) + shape = RectMicrostate(structure) + expr = CorridorRecurrence(ε_min, ε_max; metric = metric) return RecurrenceMicrostates(shape, expr, sampling) end + + diff --git a/src/core/shape.jl b/src/core/shape.jl index 374958a..7cf020b 100644 --- a/src/core/shape.jl +++ b/src/core/shape.jl @@ -16,9 +16,9 @@ All subtypes of `MicrostateShape` must include a field `expr`, which defines the [`RecurrenceExpression`](@ref) used to compute recurrences. # Implementations -- [`Diagonal`](@ref) -- [`Rect`](@ref) -- [`Triangle`](@ref) +- [`DiagonalMicrostate`](@ref) +- [`RectMicrostate`](@ref) +- [`TriangleMicrostate`](@ref) """ abstract type MicrostateShape end diff --git a/src/recurrences/corridor.jl b/src/recurrences/corridor.jl index d6b7b27..fbf8383 100644 --- a/src/recurrences/corridor.jl +++ b/src/recurrences/corridor.jl @@ -1,47 +1,51 @@ -export Corridor +export CorridorRecurrence ########################################################################################## # RecurrenceExpression + Constructors ########################################################################################## """ - Corridor <: RecurrenceExpression + CorridorRecurrence <: RecurrenceExpression Recurrence expression defined by the corridor criterion introduced in [Iwanski1998Corridor](@cite): ```math -R(\\vec{x}, \\vec{y}) = \\Theta(|\\vec{x} - \\vec{y}| - \\varepsilon_{min}) \\cdot \\Theta(\\varepsilon_{max} - |\\vec{x} - \\vec{y}|), +r_{(i, j)} = \\Theta(|\\vec{x}_i - \\vec{y}_j| - \\varepsilon_{min}) \\cdot \\Theta(\\varepsilon_{max} - |\\vec{x}_i - \\vec{y}_j|), ``` -where \$\\Theta(\\cdot)\$ denotes the Heaviside function and \$(\\varepsilon_{min}, \\varepsilon_{max})\$ define the minimum and maximum distance -thresholds for two states to be considered recurrent. +where \$\\Theta(\\cdot)\$ denotes the Heaviside function and \$(\\varepsilon_{\\min}, \\varepsilon_{\\max})\$ +define the minimum and maximum distance thresholds for two states to be considered recurrent. -The `Corridor` struct stores the corridor thresholds `ε_min` and `ε_max`, as well as the -distance `metric` used to evaluate \$|\\vec{x} - \\vec{y}|\$. The metric must be defined using -the [Distances.jl](https://github.com/JuliaStats/Distances.jl) package. +The `CorridorRecurrence` struct stores the corridor thresholds `ε_min` and `ε_max`, as well as the +distance `metric` used to evaluate \$\\|\\vec{x}_i - \\vec{y}_j\\|\$. The metric must be defined using +the **Distances.jl** package. + +If the data for \$\\vec{x}\$ and \$\\vec{y}\$ are the same, the result is a recurrence plot; +otherwise, it is a cross-recurrence plot. # Constructor ```julia -Corridor(ε_min::Real, ε_max::Real; metric::Metric = Euclidean()) +CorridorRecurrence(ε_min::Real, ε_max::Real; metric::Metric = Euclidean()) ``` # Examples ```julia -Corridor(0.05, 0.27) -Corridor(0.05, 0.27; metric = Cityblock()) +CorridorRecurrence(0.05, 0.27) +CorridorRecurrence(0.05, 0.27; metric = Cityblock()) +CorridorRecurrence(0.05, 0.27; metric = GPUEuclidean()) ``` -The recurrence evaluation is performed via the [`recurrence`](@ref) function. +The recurrence evaluation is performed via the [`recurrence`](@ref) function. For GPU execution, the corresponding implementation is provided by `gpu_recurrence`. """ -struct Corridor{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} +struct CorridorRecurrence{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} ε_min::T ε_max::T metric::M end #......................................................................................... -function Corridor(ε_min::Real, ε_max::Real; metric::Metric = DEFAULT_METRIC) +function CorridorRecurrence(ε_min::Real, ε_max::Real; metric::Metric = DEFAULT_METRIC) @assert ε_min >= 0 throw(ArgumentError("threshold must be greater than zero.")) @assert ε_min < ε_max throw(ArgumentError("ε_min must be less than ε_max.")) - return Corridor(ε_min, ε_max, metric) + return CorridorRecurrence(ε_min, ε_max, metric) end ########################################################################################## @@ -50,7 +54,7 @@ end # Based on time series: (CPU) #......................................................................................... @inline function recurrence( - expr::Corridor, + expr::CorridorRecurrence, x::StateSpaceSet, y::StateSpaceSet, i::Int, @@ -62,7 +66,7 @@ end #......................................................................................... # Based on time series: (GPU) #......................................................................................... -@inline function gpu_recurrence(expr::Corridor, x, y, i, j, n) +@inline function gpu_recurrence(expr::CorridorRecurrence, x, y, i, j, n) distance = gpu_evaluate(expr.metric, x, y, i, j, n) return UInt8(distance ≥ expr.ε_min && distance ≤ expr.ε_max) end @@ -70,7 +74,7 @@ end # Based on spatial data: (CPU only) #......................................................................................... @inline function recurrence( - expr::Corridor, + expr::CorridorRecurrence, x::AbstractArray{<:Real}, y::AbstractArray{<:Real}, i::NTuple{N, Int}, diff --git a/src/recurrences/recurrences.jl b/src/recurrences/recurrences.jl index a4285ab..bfb1846 100644 --- a/src/recurrences/recurrences.jl +++ b/src/recurrences/recurrences.jl @@ -2,6 +2,6 @@ # Include recurrence expressions ########################################################################################## include("corridor.jl") -include("standard.jl") +include("threshold.jl") ########################################################################################## \ No newline at end of file diff --git a/src/recurrences/standard.jl b/src/recurrences/threshold.jl similarity index 67% rename from src/recurrences/standard.jl rename to src/recurrences/threshold.jl index 13d67ba..5d27ce0 100644 --- a/src/recurrences/standard.jl +++ b/src/recurrences/threshold.jl @@ -1,44 +1,48 @@ -export Standard +export ThresholdRecurrence ########################################################################################## # RecurrenceExpression + Constructors ########################################################################################## """ - Standard <: RecurrenceExpression + ThresholdRecurrence <: RecurrenceExpression Recurrence expression defined by the standard threshold criterion: ```math -R(\\vec{x}, \\vec{y}) = \\Theta(\\varepsilon - |\\vec{x} - \\vec{y}|), +r_{(i,j)} = \\Theta(\\varepsilon - |\\vec{x}_i - \\vec{y}_j|), ``` where \$\\Theta(\\cdot)\$ denotes the Heaviside function and \$\\varepsilon\$ is the distance threshold defining the maximum separation for two states to be considered recurrent. -The `Standard` struct stores the threshold parameter `ε`, as well as the distance `metric` -used to evaluate \$|\\vec{x} - \\vec{y}|\$. The metric must be defined using the [Distances.jl](https://github.com/JuliaStats/Distances.jl) -package. +The `ThresholdRecurrence` struct stores the threshold parameter `ε`, as well as the distance +`metric` used to evaluate \$\\|\\vec{x} - \\vec{y}\\|\$. The metric must be defined using the +**Distances.jl** package. -# Constructor +If the data for \$\\vec{x}\$ and \$\\vec{y}\$ are the same, the result is a recurrence plot; +otherwise, it is a cross-recurrence plot. + +## Constructor ```julia -Standard(ε::Real; metric::Metric = Euclidean()) +ThresholdRecurrence(ε::Real; metric::Metric = Euclidean()) ``` -# Examples +## Examples ```julia -Standard(0.27) -Standard(0.27; metric = Cityblock()) +ThresholdRecurrence(0.27) +ThresholdRecurrence(0.27; metric = Cityblock()) +ThresholdRecurrence(0.27; metric = GPUEuclidean()) ``` The recurrence evaluation is performed via the [`recurrence`](@ref) function. For GPU execution, the corresponding implementation is provided by `gpu_recurrence`. """ -struct Standard{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} +struct ThresholdRecurrence{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} ε::T metric::M end #......................................................................................... -function Standard(ε::Real; metric::Metric = DEFAULT_METRIC) - @assert ε >= 0 throw(ArgumentError("threshold must be greater than zero.")) - return Standard(ε, metric) +function ThresholdRecurrence(ε::Real; metric::Metric = DEFAULT_METRIC) + @assert ε >= 0 throw(ArgumentError("Threshold must be greater than zero.")) + return ThresholdRecurrence(ε, metric) end ########################################################################################## @@ -47,7 +51,7 @@ end # Based on time series: (CPU) #......................................................................................... @inline function recurrence( - expr::Standard, + expr::ThresholdRecurrence, x::StateSpaceSet, y::StateSpaceSet, i::Int, @@ -59,7 +63,7 @@ end #......................................................................................... # Based on time series: (GPU) #......................................................................................... -@inline function gpu_recurrence(expr::Standard, x, y, i, j, n) +@inline function gpu_recurrence(expr::ThresholdRecurrence, x, y, i, j, n) distance = gpu_evaluate(expr.metric, x, y, i, j, n) return UInt8(distance ≤ expr.ε) end @@ -67,7 +71,7 @@ end # Based on spatial data: (CPU only) #......................................................................................... @inline function recurrence( - expr::Standard, + expr::ThresholdRecurrence, x::AbstractArray{<:Real}, y::AbstractArray{<:Real}, i::NTuple{N, Int}, diff --git a/src/rqa/disorder.jl b/src/rqa/disorder.jl index 284700e..b3cccd2 100644 --- a/src/rqa/disorder.jl +++ b/src/rqa/disorder.jl @@ -374,4 +374,19 @@ function compute_labels(N::Int; S = collect(permutations(1:N))) return labels end +########################################################################################## + +# TODO: I'm not sure about how to fix this problem following the same model +# used to ComplexityMeasures... It is printing all labels from the vector, +# probably must it print only the vector length? Something like: +# "labels = 26 elements of type Vector{Int}" ?? + +function Base.show(io::IO, ::MIME"text/plain", x::Disorder) + print(io, "TODO: Disorder") +end + +function Base.show(io::IO, ::MIME"text/plain", x::WindowedDisorder) + print(io, "TODO: WindowedDisorder") +end + ########################################################################################## \ No newline at end of file diff --git a/src/rqa/lam.jl b/src/rqa/lam.jl index 6d1873c..07dd3a8 100644 --- a/src/rqa/lam.jl +++ b/src/rqa/lam.jl @@ -88,7 +88,7 @@ function complexity( y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; ) - rmspace = RecurrenceMicrostates(c.ε, RectMicrostate(3, 1); metric = c.metric, sampling = c.sampling) + rmspace = RecurrenceMicrostates(c.ε, RectMicrostate(1, 3); metric = c.metric, sampling = c.sampling) probs = probabilities(rmspace, x, y) return measure(c, rmspace, probs) end @@ -127,7 +127,7 @@ function measure(c::RecurrenceLaminarity, rmspace::RecurrenceMicrostates, probs: return 1 - ((1/rr) * pl) - elseif (rmspace.shape isa Rect2Microstate{3, 1} && length(probs) == 8) + elseif (rmspace.shape isa Rect2Microstate{1, 3} && length(probs) == 8) rr = measure(rrc, probs) return 1 - ((1/rr) * probs[3]) else diff --git a/src/utils/operations/permute_cols.jl b/src/utils/operations/permute_cols.jl index 9107722..5a9dc4d 100644 --- a/src/utils/operations/permute_cols.jl +++ b/src/utils/operations/permute_cols.jl @@ -9,7 +9,7 @@ export PermuteColumns Operation that permutes the columns of a microstate \$\\mathbf{M}\$. To initialize a `PermuteColumns` operation, a rectangular microstate shape must be -provided via a [`Rect`](@ref) structure: +provided via a [`RectMicrostate`](@ref) structure: ```julia PermuteColumns(::Rect2Microstate{R, C, B}; S::Vector{Vector{Int}} = collect(permutations(1:C)) ``` diff --git a/src/utils/operations/permute_rows.jl b/src/utils/operations/permute_rows.jl index 04f5f95..bbe0df1 100644 --- a/src/utils/operations/permute_rows.jl +++ b/src/utils/operations/permute_rows.jl @@ -9,7 +9,7 @@ export PermuteRows Operation that permutes the rows of a microstate \$\\mathbf{M}\$. To initialize a `PermuteRows` operation, a rectangular microstate shape must be -provided via a [`Rect`](@ref) structure: +provided via a [`RectMicrostate`](@ref) structure: ```julia PermuteRows(::Rect2Microstate{R, C, B}) ``` diff --git a/src/utils/operations/transpose.jl b/src/utils/operations/transpose.jl index a438d0e..9389791 100644 --- a/src/utils/operations/transpose.jl +++ b/src/utils/operations/transpose.jl @@ -9,7 +9,7 @@ export Transpose Operation that transposes a microstate \$\\mathbf{M}\$. To initialize a `Transpose` operation, a rectangular microstate shape must be -provided via a [`Rect`](@ref) structure: +provided via a [`RectMicrostate`](@ref) structure: ```julia Transpose(::Rect2Microstate{R, C, B}) ``` @@ -25,7 +25,7 @@ operate(::Transpose, I::Int) ``` # Arguments - `op`: A `Transpose` operation. -- `I`: DEcima identifier of the microstate (1-based). +- `I`: Decima identifier of the microstate (1-based). # Returns The resulting microstate decimal identifier (1-based). From ce1fd1bfa6803e16b9e3e4fd8ddba09db355c0b8 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Tue, 14 Apr 2026 19:28:37 -0300 Subject: [PATCH 17/19] update docs --- .gitignore | 3 +- docs/make.jl | 1 + docs/refs.bib | 10 ++ docs/src/dev.md | 233 ++++++++++++++++++++++++++++++++++++- docs/src/examples.jl | 158 +++++++++++++++++++++++++ docs/src/examples.md | 1 - docs/src/gpu.md | 95 ++++++++++++++- docs/src/index.md | 2 +- docs/src/tutorial.jl | 114 ++++++++++++++++-- src/core/gpu/gpu_metric.jl | 6 +- src/rqa/det.jl | 18 +-- src/rqa/disorder.jl | 2 +- src/rqa/lam.jl | 18 +-- src/rqa/rr.jl | 10 +- src/shapes/triangle.jl | 12 +- 15 files changed, 633 insertions(+), 50 deletions(-) create mode 100644 docs/src/examples.jl delete mode 100644 docs/src/examples.md diff --git a/.gitignore b/.gitignore index 1d3ba3a..adb238c 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,5 @@ Manifest.toml test/*.png test/*.bson *style.jl -tutorial.md \ No newline at end of file +tutorial.md +examples.md \ No newline at end of file diff --git a/docs/make.jl b/docs/make.jl index c6bf61a..e5ab2a4 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,6 +8,7 @@ using StateSpaceSets # Convert tutorial file to markdown import Literate Literate.markdown("src/tutorial.jl", "src"; credit = false) +Literate.markdown("src/examples.jl", "src"; credit = false) pages = [ "Welcome" => "index.md", diff --git a/docs/refs.bib b/docs/refs.bib index 7b69dd8..16d391f 100644 --- a/docs/refs.bib +++ b/docs/refs.bib @@ -139,3 +139,13 @@ @article{Ferreira2025RMALib volume = {35}, year = {2025} } + +@article{Silveira2025ML, + author = {Silveira, J. V. M and Costa, H. C and Spezzatto, G. S and Prado, T. L and Lopes, S. R}, + journal = {Braz. J. Phys.}, + number = {56}, + title = {Classifying Complex Dynamical and Stochastic Systems via Physics-Based Recurrence Features}, + volume = {37}, + year = {2025}, + doi = {10.1007/s13538-025-01969-6} +} \ No newline at end of file diff --git a/docs/src/dev.md b/docs/src/dev.md index 5fa737b..32d3b47 100644 --- a/docs/src/dev.md +++ b/docs/src/dev.md @@ -1 +1,232 @@ -# RecurrenceMicrostatesAnalysis.jl for devs \ No newline at end of file +# RecurrenceMicrostatesAnalysis.jl for devs + +!!! tip + All pull requests that introduce new functionality must be thoroughly tested and documented. + Tests are required only for the methods you extend. Always remember to add docstrings to + your implementations, as well as tests to validate them. + + We recommend reading the + [Good Scientific Code Workshop](https://github.com/JuliaDynamics/GoodScientificCodeWorkshop) + +## Backend of RecurrenceMicrostatesAnalysis.jl + +**RecurrenceMicrostatesAnalysis.jl** has multiple backends, depending on the usage context. +Each backend is implemented based on an internal struct `RMACore` or on the input data type. + +There are three backend implementations: + +- For sequential data using the CPU: it internally uses a `CPUCore <: RMACore`, which defines that + the process must run on the CPU, and the input data is a [`StateSpaceSet`](@ref) (or a `Vector{<:Real}`). + +- For spatial data using the CPU: it internally uses a `CPUCore <: RMACore`, which defines that + the process must run on the CPU, and the input data is an `Array`. + +- For sequential data using the GPU: it internally uses a `GPUCore <: RMACore`, which defines that + the process must run on the GPU, and the input data is an `AbstractGPUVector{<:SVector}`. + +Note that there is a significant difference between the input data types, in such a way that +`RMACore` is just an auxiliary struct used to differentiate the hardware backend internally. + +!!! info "Backend" + The package backends are located at `src/core/cpu_core.jl` (CPU) and `src/core/gpu/gpu_core.jl` (GPU). + +You do not need to fully understand how the backend operates to define something new in the package. +However, it is important to understand how the backend requires internal functions based on +`RMACore` or the input data type. For example, when implementing a [`RecurrenceExpression`](@ref), +the CPU structure uses a [`recurrence`](@ref) function, while the GPU structure uses a +`gpu_recurrence` function. + +## Adding a new Recurrence Expression + +### Steps + +1. Define the mathematical expression of your recurrence expression. It must return a binary value: + `UInt(0)` for non-recurrence and `UInt(1)` for recurrence. +2. Define a new type `YourType <: RecurrenceExpression`. Constant parameters (e.g., recurrence threshold and metric) + should be fields of this type. +3. Implement the appropriate `recurrence` dispatch: + - Sequential data: `recurrence(expr::YourType, x::StateSpaceSet, y::StateSpaceSet, i::Int, j::Int)` + - Spatial data: `recurrence(expr::YourType, x::AbstractArray{<:Real}, y::AbstractArray{<:Real}, i::NTuple{N,Int}, j::NTuple{M,Int})` + - GPU: `gpu_recurrence(expr::YourType, x, y, i, j, n)` +4. Add a docstring describing the mathematical definition and relevant references. +5. Add the recurrence expression to `docs/src/api.md`. +6. Add the expression to the [`RecurrenceExpression`](@ref) docstring. +7. Add tests to `test/distributions.jl` under the test set `recurrence expressions`. + +### Example + +Let's define a "recurrence" expression as: +```math +r_{(i,j)} = \Theta(\|\vec{x}_i - \vec{x}_j\| - \varepsilon). +``` + +First, we define our struct: +```@example dev +using RecurrenceMicrostatesAnalysis, Distances + +struct MyRecurrenceExpr{T <: Real, M <: Metric} <: RecurrenceExpression{T, M} + ε::T + metric::M +end + +MyRecurrenceExpr(ε) = MyRecurrenceExpr(ε, Euclidean()) +``` + +Next, we define the recurrence function: +```@example dev +@inline function RecurrenceMicrostatesAnalysis.recurrence( + expr::MyRecurrenceExpr, + x::StateSpaceSet, + y::StateSpaceSet, + i::Int, + j::Int, +) + distance = @inbounds evaluate(expr.metric, x.data[i], y.data[j]) + return UInt8(distance ≥ expr.ε) +end +``` + +And that's it: +```@example dev +rmspace = RecurrenceMicrostates(MyRecurrenceExpr(0.27), 3) +``` + +```@example dev +X = randn(1000) |> StateSpaceSet +probabilities(rmspace, X) +``` + +## Adding a new Sampling Mode + +### Steps + +1. Define how the sampling mode operates: which microstates are sampled, from which regions, and in what quantity. + The [`SamplingSpace`](@ref) must be taken into account when designing the sampling logic. +2. Define a new struct that is a subtype of [`SamplingMode`](@ref). The struct may be empty (e.g., [`Full`](@ref)) or + contain parameters such as a sampling ratio (e.g., [`SRandom`](@ref)). +3. Implement the dispatch `get_num_samples(mode::YourType, ::SamplingSpace)`, which determines the number of samples + to be drawn given the sampling mode and the sampling space. +4. Implement the dispatch `get_sample(::RMACore, ::YourType, space::SamplingSpace, rng, m)`, which returns the starting + pair $(i, j)$ to construct the microstate. Here, `RMACore` defines whether it is running on the CPU or GPU, `rng` is + a random number generator, and `m` is a counter of microstates. +5. Add a docstring to your sampling mode describing its behavior and initialization. +6. Add your sampling mode to `docs/src/api.md`. +7. Add the expression to the [`SamplingMode`](@ref) docstring. +8. Add tests to `test/distributions.jl` under the test set `sampling modes`. + +## Adding a new Microstate Shape + +### Steps + +1. Define your microstate design. It essentially determines the microstate structure and reading order. For example, + square microstates are read row-wise, while triangular microstates may be read column-wise. Each position in + the microstate structure must be associated with a power of two in order to convert the binary microstate into + a decimal index. +2. Define a new struct that is a subtype of [`MicrostateShape`](@ref). +3. Implement the dispatch `get_histogram_size(::MyShape)`, which returns the histogram length. +4. Implement the dispatch `get_power_vector(::RMACore, ::MyShape)`, which returns the power vector used to + read the microstate as an integer. +5. Implement the dispatch `get_offsets(::RMACore, ::MyShape)`, which returns which positions are accessed from $(i, j)$ + to construct the microstate. +6. Define how your shape reacts to a [`SamplingSpace`](@ref) by implementing `SamplingSpace(::MyType, x, y)`. +7. Add a docstring to your microstate shape describing its behavior and initialization. +8. Add your microstate shape to `docs/src/api.md`. +9. Add the expression to the [`MicrostateShape`](@ref) docstring. +10. Add tests to `test/distributions.jl` under the test set `microstate shapes`. + +### Example + +As an example, let's try to construct the struct to obtain the microstate: +```math +\begin{matrix} +r_{(i,j)} & & r_{(i, j+2)} \\ + & r_{(i+1, j+1)} & \\ + r_{(i+2,j)} & & r_{(i+2, j+2)} +\end{matrix} +``` + +For simplicity, we are not going to generalize it for arbitrary sizes 😅. First, let's define our struct: +```@example dev +struct MyMicrostateShape <: MicrostateShape end +``` + +Next, we need to define the histogram size, which will be returned by `get_histogram_size`. It is $2^\sigma$, +where $\sigma$ is the number of recurrences contained within the microstate shape. For a square microstate we +have $\sigma = N^2$, and for a triangular microstate $\sigma = N(N - 1)\div 2$. In our example, it is simple since +our shape is fixed: $\sigma = 5$, so: +```@example dev +RecurrenceMicrostatesAnalysis.get_histogram_size(::MyMicrostateShape) = 2^5 +``` + +The next step is to define our power vector. It determines how we read our microstate as an integer. Each position +should be associated with a power of 2: +```math +\begin{matrix} +2^0r_{(i,j)} & & 2^1r_{(i, j+2)} \\ + & 2^2r_{(i+1, j+1)} & \\ + 2^3r_{(i+2,j)} & & 2^4r_{(i+2, j+2)} +\end{matrix} +``` + +Then, we write: +```@example dev +RecurrenceMicrostatesAnalysis.get_power_vector(::RecurrenceMicrostatesAnalysis.CPUCore, ::MyMicrostateShape) = SVector{5, Int}([1, 2, 4, 8, 16]) +``` + +Finally, we need to define the set of offsets used to construct the microstate from the trajectory. +Note that each offset must have the same index as the corresponding element in the power vector. +```@example dev +function RecurrenceMicrostatesAnalysis.get_offsets(::RecurrenceMicrostatesAnalysis.CPUCore, ::MyMicrostateShape) + elems = [ + SVector{2, Int}([0, 0]), + SVector{2, Int}([0, 2]), + SVector{2, Int}([1, 1]), + SVector{2, Int}([2, 0]), + SVector{2, Int}([2, 2]) + ] + + return SVector{5, SVector{2, Int}}(elems) +end +``` + +Now, we just need to define how our microstate will behave with respect to a sampling space. It is not necessary to define it +for all available sampling spaces, but you need to do so for at least one of them. +```@example dev +RecurrenceMicrostatesAnalysis.SamplingSpace( + ::MyMicrostateShape, + x::StateSpaceSet, + y::StateSpaceSet +) = RecurrenceMicrostatesAnalysis.SSRect2(length(x) - 2, length(y) - 2) +``` + +And that's it, we can now use our new microstate shape 🙂 (and why not combine it with the previous recurrence expression?!) +```@example dev +rmspace = RecurrenceMicrostates(MyRecurrenceExpr(0.27), MyMicrostateShape()) +``` + +```@example dev +probabilities(rmspace, X) +``` + +## Adding a new quantity estimator + +Since **RecurrenceMicrostatesAnalysis.jl** uses the same structure as **ComplexityMeasures.jl** +to estimate or measure complexity values (e.g., determinism, disorder, etc.), the method to implement new +features is very similar. + +To add new quantity estimators, refer to the [ComplexityMeasures.jl Dev Docs](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/devdocs/). + +## Adding a new GPU metric + +Due to the incompatibility of **Distances.jl** with GPUs, it may be necessary to redefine some metrics +to use them with the **RecurrenceMicrostatesAnalysis.jl** GPU backend. + +### Steps + +1. Define a new type that is a subtype of [`GPUMetric`](@ref). +2. Implement the dispatch `gpu_evaluate(::YourMetric, x, y, i, j, n)`. Here, `i` and `j` indicate which + positions of the `AbstractGPUVector` are accessed (`i` for `x`, and `j` for `y`), and `n` is the number + of dimensions of the system. +3. Add a docstring to your metric describing it. +4. Add your metric to `docs/src/api.md`. +5. Add the expression to the [`GPUMetric`](@ref) docstring. \ No newline at end of file diff --git a/docs/src/examples.jl b/docs/src/examples.jl new file mode 100644 index 0000000..73b48b4 --- /dev/null +++ b/docs/src/examples.jl @@ -0,0 +1,158 @@ +# # Examples + +# In this section, we provide some examples where it is possible +# to apply RMA to analyze data. + +# ## Classifying data with a multi-layer perceptron and RMA + +# In this example, we demonstrate how to use a multi-layer perceptron with +# RMA to classify time series based on a parameter used to generate them. +# It is based on [Spezzatto2024ML](@cite), and we will use the +# package **Flux.jl** to perform the machine learning tasks. + +# ### Generating data + +# Let's begin by generating a dataset. Here, we will use the Lorenz system. +# First, we can prepare some utilities to help generate our data: + +using DynamicalSystemsBase, PredefinedDynamicalSystems + +ρ_cls = [26.0, 27.0, 28.0, 29.0, 30.0] +num_samples_to_test = 50 +num_samples_to_train = 200 + +train_timeseries = Vector{StateSpaceSet}(undef, length(ρ_cls) * num_samples_to_train) +test_timeseries = Vector{StateSpaceSet}(undef, length(ρ_cls) * num_samples_to_test) + +train_labels = Vector{Float64}(undef, length(ρ_cls) * num_samples_to_train) +test_labels = Vector{Float64}(undef, length(ρ_cls) * num_samples_to_test) + +function data_traj(ρ; u0 = rand(3), t = 250.0, Ttr = 1200.0, Δt = 0.2) + lorenz = PredefinedDynamicalSystems.lorenz(u0; ρ) + x, _ = trajectory(lorenz, t; Ttr, Δt) + return x +end + +function generate_data!(labels, data, classes, num_elements_per_class) + c = 1 + for i ∈ eachindex(labels) + labels[i] = classes[c] + data[i] = data_traj(classes[c]) + + if (i % num_elements_per_class == 0) + c += 1 + end + end +end + +# Then, we can generate a training dataset: + +generate_data!(train_labels, train_timeseries, ρ_cls, num_samples_to_train) +train_timeseries + +# And a test dataset: + +generate_data!(test_labels, test_timeseries, ρ_cls, num_samples_to_test) +test_timeseries + +# ### Preparing the input features + +# Our feature is the recurrence microstate distribution. Therefore, for each time series +# we must compute the probabilities and store them as a feature vector: + +using RecurrenceMicrostatesAnalysis + +N = 3 +train_dataset = Matrix{Float64}(undef, 2^(N * N) + 2, length(train_labels)) +test_dataset = Matrix{Float64}(undef, 2^(N * N) + 2, length(test_labels)) + +function get_probs!(dataset, timeseries, N) + for i ∈ eachindex(timeseries) + th, s = optimize(Threshold(), Shannon(), N, timeseries[i]) + rmspace = RecurrenceMicrostates(th, N) + dist = probabilities(rmspace, timeseries[i]) + dataset[1, i] = th + dataset[2, i] = s + dataset[3:end, i] = dist[1:end] + end +end + +# Note that we are also using the recurrence threshold and the recurrence entropy as features. +# This follows [Spezzatto2024ML](@cite), but it is not strictly necessary. If your input +# has a stable threshold range that maximizes the recurrence entropy, it is possible to use +# a mean threshold to compute all probability distributions and use only these distributions +# as features for the machine learning model. + +# Computing our feature vectors: + +# - Train + +get_probs!(train_dataset, train_timeseries, N) +train_dataset + +# - Test + +get_probs!(test_dataset, test_timeseries, N) +test_dataset + +# ### Defining the neural network model + +# Here, we use **Flux.jl** to define our model. As mentioned before, we are using a +# multi-layer perceptron. However, it is also possible to use other approaches, e.g., +# random forests [Silveira2025ML](@cite). + +using Flux + +model = Chain( + Dense(2^(N * N) + 2 => 512, identity), + Dense(512 => 256, selu), + Dense(256 => 64, selu), + Dense(64 => length(ρ_cls)), + softmax +) + +model = f64(model) + +# ### Training the MLP + +# Now it is just standard machine learning procedure... + +train_labels = Flux.onehotbatch(train_labels, ρ_cls) +test_labels = Flux.onehotbatch(test_labels, ρ_cls) + +loader = Flux.DataLoader((train_dataset, train_labels), batchsize = 32, shuffle = true) +opt = Flux.setup(Flux.Adam(0.005), model) + +for epc ∈ 1:50 + for (x, y) ∈ loader + _, grads = Flux.withgradient(model) do m + y_hat = m(x) + Flux.crossentropy(y_hat, y) + end + + Flux.update!(opt, model, grads[1]) + end +end + +# ### Model evaluation + +# Finally, let's check our accuracy 🙂 + +using LinearAlgebra + +function get_quantifiers(predict, trusty, classes) + conf = zeros(Int, length(classes), length(classes)) + sz = size(predict, 2) + + for i in 1:sz + mx_prd = findmax(predict[:, i]) + mx_trt = findmax(trusty[:, i]) + + conf[mx_prd[2], mx_trt[2]] += 1 + end + + return tr(conf) / (sum(conf) + eps()) +end + +accuracy = get_quantifiers(model(test_dataset), test_labels, ρ_cls) +accuracy * 100 \ No newline at end of file diff --git a/docs/src/examples.md b/docs/src/examples.md deleted file mode 100644 index f3aeb33..0000000 --- a/docs/src/examples.md +++ /dev/null @@ -1 +0,0 @@ -# Examples \ No newline at end of file diff --git a/docs/src/gpu.md b/docs/src/gpu.md index 1421af0..2ab630e 100644 --- a/docs/src/gpu.md +++ b/docs/src/gpu.md @@ -1 +1,94 @@ -# GPU \ No newline at end of file +# GPU + +Computation of recurrence microstate distributions via **RecurrenceMicrostatesAnalysis.jl** +is compatible with different GPU devices due to an abstract kernel written using +**KernelAbstractions.jl**. + +The use of the GPU backend is very similar to the CPU backend; however, the input type +must be an `AbstractGPUVector{<:SVector}`, instead of a [`StateSpaceSet`](@ref) or a `Vector{<:Real}`. + +!!! compat "Spatial data" + The current GPU backend is not compatible with spatial data. It only works with + time series. + +## Computing recurrence microstate distributions + +The first step to compute a recurrence microstate distribution using a GPU is to +import the package associated with your device, such as **CUDA.jl** or **Metal.jl**. +Next, you need to move your [`StateSpaceSet`](@ref) to the GPU. For example: + +```julia +using CUDA, DynamicalSystemsBase, PredefinedDynamicalSystems + +logistic = PredefinedDynamicalSystems.logistic(0.4; r = 4.0) +X, t = trajectory(logistic, 1000; Ttr = 2000) + +X_gpu = Float32.(X[:, 1]) |> StateSpaceSet |> CuVector +``` + +!!! compat "Float type" + GPUs usually only accept `Float32`. + +This GPU vector `X_gpu` can be used as input for the `probabilities` function: + +```julia +using RecurrenceMicrostatesAnalysis + +ε = 0.27f0 +N = 3 +rmspace = RecurrenceMicrostates(ε, N; metric = GPUEuclidean()) +probs = probabilities(rmspace, X_gpu) +``` + +Note that the recurrence microstate outcome space has two specifications: + +1. The `threshold` must have the same type as the input. If you have an `AbstractGPUVector{<:SVector{D, T}}` as input, + where `T` is the data type (e.g., `Float32` or `Float64`), your `threshold` must also be of type `T`. + +2. The `metric` must be a [`GPUMetric`](@ref). This is required because **Distances.jl** is not fully compatible with GPUs. + +!!! compat "Sampling ratio" + When using a random sampling mode (e.g., [`SRandom`](@ref)), the samples are extracted + on the CPU, and only the microstates are computed on the GPU. Therefore, the sampling ratio + can be `Float64`, even if the GPU is not compatible with this data type. + +## Estimating RQA and disorder + +Just as the distribution computation keeps the same structure when using a GPU instead of a CPU, +the estimation of RQA or the computation of disorder or recurrence entropy is similar. + +- Entropy +```julia +entropy(Shannon(), rmspace, X_gpu) +``` + +- Recurrence rate +```julia +complexity(RecurrenceRate(ε; metric = GPUEuclidean()), X_gpu) +``` + +- Determinism +```julia +complexity(RecurrenceDeterminism(ε; metric = GPUEuclidean()), X_gpu) +``` + +- Laminarity +```julia +complexity(RecurrenceLaminarity(ε; metric = GPUEuclidean()), X_gpu) +``` + +- Disorder +```julia +complexity(Disorder(N; metric = GPUEuclidean()), X_gpu) +``` + +- Windowed disorder +```julia +W = 100 +complexity(WindowedDisorder(W, N; metric = GPUEuclidean()), X_gpu) +``` + +!!! info "Performance" + Working with RMA on a GPU is faster than using a CPU, as expected. However, it is + important to note that GPUs require initialization time; therefore, they perform + better for long time series 🙂 \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index 38a2f00..a559bb4 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -20,7 +20,7 @@ sampling modes, recurrence functions, or complexity estimators. ### Input data for RecurrenceMicrostatesAnalysis.jl **RecurrenceMicrostatesAnalysis.jl** accepts three types of input, each associated with a different backend: -- [`StateSpaceSet`] or `Vector{<:Real}`: used for multivariate time series, datasets, or state-space representations. +- [`StateSpaceSet`](@ref) or `Vector{<:Real}`: used for multivariate time series, datasets, or state-space representations. - `AbstractArray{<:Real}`: used for spatial data, enabling RMA to be applied within the generalized framework of Spatial Recurrence Plots (SRP) [Marwan2007Spatial](@cite). We give some examples about its use in [Spatial data](@ref). - `AbstractGPUVector`: used for time series analysis with the GPU backend. We provide some explanations about it in [GPU](@ref). diff --git a/docs/src/tutorial.jl b/docs/src/tutorial.jl index 946d8ed..95ce1ed 100644 --- a/docs/src/tutorial.jl +++ b/docs/src/tutorial.jl @@ -20,7 +20,7 @@ # where $K$ is the length of the time series and $d$ is the dimension of the phase space. # The recurrence plot is defined by the recurrence matrix # ```math -# r_{(i,j)} = \Theta(\varepsilon - \|\vec x_i - \vec x_j\|), +# R_{i,j} = \Theta(\varepsilon - \|\vec x_i - \vec x_j\|), # ``` # where $\Theta(\cdot)$ denotes the Heaviside step function and $\varepsilon$ is the recurrence # threshold. @@ -179,10 +179,9 @@ disorder = Disorder() complexity(disorder, X) # Note that disorder is free of parameters (except for the microstate length and -# the number of thresholds used, or its range). This is because the quantifier is -# defined as the maximum total entropy of recurrence microstate classes, considering -# a large range of thresholds. Moreover, this quantifier can only be estimated -# through recurrence microstates. +# the number of thresholds used). This is because the quantifier is +# defined using the maximum total entropy of recurrence microstate classes, considering +# a large range of thresholds. # It is also possible to estimate disorder while splitting the data into windows. # We prepared a special complexity estimator for this, aiming to facilitate @@ -192,7 +191,7 @@ window_len = 1000 win_disorder = WindowedDisorder(window_len; step = 100) # Here, we are using windows of length 1000 points, moved in steps of 100 points. -# Finally, we compute the quantifier: +# Finally, we compute the quantifier for each window: wd = complexity(win_disorder, X) @@ -200,8 +199,108 @@ wd = complexity(win_disorder, X) using CairoMakie lines(wd) +# !!! compat "Disorder specifications" +# Disorder is only available for the standard outcome space extracted +# from an RP using square microstates. Therefore, it is not compatible +# with different microstate shapes or input variations such as CRP +# and SRP, which will be introduced later. + +# !!! info "Performance" +# Disorder uses several recurrence microstate distributions computed +# using a full sampling mode. It is a heavy quantifier that can have +# a high computational cost. For large time series, we recommend +# computing this quantifier using [GPU](@ref). + # ## Optimization of free parameters +# It is known that recurrence analysis has some free parameters. The most +# important of them is the recurrence threshold, $\varepsilon$, used to +# define which elements from a sequence are recurrent and which are not. +# In practice, these free parameters are not a major issue and allow +# different analyses of the same system. Of course, **RecurrenceMicrostateAnalysis.jl** +# considers this and allows you to use any value as your threshold. + +# Nevertheless, RMA has two situations where it is necessary to set +# a specific recurrence threshold: when computing disorder and when using +# RMA distributions as input for machine learning. + +# In both of these situations, it is important to use a threshold that +# maximizes a certain quantity. Disorder is defined as the maximum total +# entropy per class; therefore, the recurrence threshold is not truly a free +# parameter in this case, since there is an optimal threshold that results +# in the maximum observable disorder. Moreover, when working with RMA and +# machine learning (see [Classifying data with a multi-layer perceptron and RMA](@ref)), +# in most cases there is a correlation between the accuracy and the distribution +# that maximizes the recurrence entropy [Spezzatto2024ML](@cite). Thus, it is a good +# idea to use this as a basis for defining an optimal value for the recurrence threshold. + +# With this in mind, we provide a function to optimize some [`Parameter`](@ref) +# based on a given quantity. Currently, the only available parameter is +# [`Threshold`](@ref), which can be optimized using the function [`optimize`](@ref) +# by maximizing recurrence entropy or disorder. + +# Using recurrence entropy as an example: +ε, S = optimize(Threshold(), Shannon(), N, X) +rmspace = RecurrenceMicrostates(ε, N) +h = entropy(Shannon(), rmspace, X) +(h, S) + +# Or for disorder: +ε, ξ = optimize(Threshold(), disorder, X) +Ξ = complexity(disorder, X) + +(Ξ, ξ) + +# Note that there is a difference between `Ξ` and `ξ`. The `optimize` function uses +# a sampling ratio of $10\%$, which can result in a considerably different value. +# Internally, this optimization structure is used to define an optimal threshold range, +# from which we extract the disorder using the sampling mode [`Full`](@ref). +# If you want to compute the "disorder" for a specific threshold, it is possible using +# the internal struct `PartialDisorder`: + +partial = RecurrenceMicrostatesAnalysis.PartialDisorder(ThresholdRecurrence(ε), N) +complexity(partial, X) + +# ## Custom specification of recurrence microstates + +# When we write `rmspace = RecurrenceMicrostates(ε, N)`, +# we are in fact accepting a default definition for both what counts as a recurrence +# and which recurrence microstates to examine. +# We can alter either by choosing the recurrence expression, the specific +# microstate(s) we wish to analyze, or the sampling method used to extract these +# microstates from the input data. For example: + +expr = CorridorRecurrence(0.05, 0.27) +shape = TriangleMicrostate(3) +sampling = Full() +rmspace = RecurrenceMicrostates(expr, shape; sampling) +probabilities(rmspace, X) + +# **RecurrenceMicrostateAnalysis.jl** supports several configurations for the recurrence outcome space +# while leveraging the same backend (see [`RecurrenceMicrostates`](@ref)). +# If you want to contribute with new recurrence expressions, microstate shapes, or sampling modes, +# read the section [RecurrenceMicrostatesAnalysis.jl for devs](@ref) and open an +# [issue](https://github.com/JuliaDynamics/RecurrenceMicrostatesAnalysis.jl/issues) +# if you encounter any difficulties 🙂 + +# ## Cross recurrence plots + +# For cross-recurrences, nearly nothing changes for you, nor for the source code +# of the code base! Simply call `function(..., rmspace, X, Y)`, adding an additional +# final argument `Y` corresponding to the second trajectory from which cross recurrences are estimated. + +# For example, here are the cross recurrence microstate distribution for +# the original Henon map trajectory and one at slightly different parameters + +set_parameter!(henon, 1, 1.35) +Y, t = trajectory(henon, 10_000) +probabilities(rmspace, X, Y) + +# This augmentation from one to two input data +# works for most functions discussed in this tutorial. +# Coincidentally, the same extension of `probabilities` to multivariate data +# is done in [Associations.jl](https://juliadynamics.github.io/Associations.jl/stable/). + # ## Spatial data # Finally, let's discuss spatial data. This is an exploratory @@ -260,4 +359,5 @@ entropy(Shannon(), probs) # Note that if you are using a grayscale image, you need to use an # `Array` with size `(1, H, W)`. The first dimension stores the # features of the data, which are used to compute the recurrences, -# i.e., $\vec{x}_{\vec{i}}$. \ No newline at end of file +# i.e., $\vec{x}_{\vec{i}}$. The same principle must be applied +# to other types of spatial data. \ No newline at end of file diff --git a/src/core/gpu/gpu_metric.jl b/src/core/gpu/gpu_metric.jl index 523cf80..d1924af 100644 --- a/src/core/gpu/gpu_metric.jl +++ b/src/core/gpu/gpu_metric.jl @@ -20,9 +20,9 @@ abstract type GPUMetric <: Metric end ########################################################################################## # Implementation: evaluate ########################################################################################## -function gpu_evaluate(metric::GPUMetric, ::SVector{N, Float32}, ::SVector{N, Float32}) where {N} - T = typeof(metric) - msg = "`gpu_evaluate` not implemented for $T." +function gpu_evaluate(metric::GPUMetric, ::SVector{N, T}, ::SVector{N, T}) where {N, T} + Tm = typeof(metric) + msg = "`gpu_evaluate` not implemented for $Tm." throw(ArgumentError(msg)) end diff --git a/src/rqa/det.jl b/src/rqa/det.jl index 4251c9e..775d11a 100644 --- a/src/rqa/det.jl +++ b/src/rqa/det.jl @@ -20,7 +20,7 @@ Determinism is estimated for a threshold `ε`. Recurrence determinism (DET) is defined as [Webber2015Recurrence](@cite) ```math -DET = \\frac{\\sum_{l=l_{min}}^{K} l H_D(l)}{\\sum_{i,j=1}^{K} r_{i,j}}, +DET = \\frac{\\sum_{l=l_{min}}^{K} l H_D(l)}{\\sum_{i,j=1}^{K} r_{(i,j)}}, ``` where \$H_D(l)\$ is the histogram of diagonal line lengths: ```math @@ -28,7 +28,7 @@ H_D(l) = \\sum_{i,j=1}^{K} (1 - r_{i-1, j-1})(1 - r_{i+l,j+l})\\prod_{k=0}^{l-1} ``` By inverting the determinism expression, we can rewrite it as [daCruz2025RQAMeasures](@cite) ```math -DET = 1 - \\frac{1}{K^2 \\sum_{i,j=1}^{K} r_{i,j}} \\sum_{l=1}^{l_{min} - 1} l H_D(l). +DET = 1 - \\frac{1}{K^2 \\sum_{i,j=1}^{K} r_{(i,j)}} \\sum_{l=1}^{l_{min} - 1} l H_D(l). ``` An approximate value for DET can be estimated using recurrence microstates, as introduced by @@ -41,7 +41,7 @@ square microstates of size \$3 \\times 3\$. Here, we use the relation: For the commonly used case \$l_{min} = 2\$, this leads to the approximation ```math -DET \\approx 1 - \\frac{\\vec{d}^{(1)}\\cdot\\mathcal{R}^{(3)}\\vec{p}^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. +DET \\approx 1 - \\frac{\\vec{d}^{(1)}\\cdot\\mathcal{R}^{(3)}\\vec{p}^{(3)}}{\\sum_{i,j=1}^{K} r_{(i,j)}}. ``` The correlation term \$\\vec{d}^{(1)} \\cdot \\mathcal{R}^{(3)} \\vec{p}^{(3)}\$ can be @@ -58,7 +58,7 @@ where \$\\xi\$ denotes an unconstrained entry. There are 64 microstates with thi the 512 possible \$3 \\times 3\$ microstates. Defining the class \$C_D\$ as the set of microstates with this structure, DET can be estimated as: ```math -DET \\approx 1 - \\frac{\\sum_{i\\in C_D} p_i^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. +DET \\approx 1 - \\frac{\\sum_{i\\in C_D} p_i^{(3)}}{\\sum_{i,j=1}^{K} r_{(i,j)}}. ``` The implementation used by [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity) @@ -66,7 +66,7 @@ is an optimized version of this process using [`DiagonalMicrostate`](@ref) [Ferr Since this microstate shape is symmetric with respect to the desired information, it is not necessary to account for \$\\xi\$ values as in the square microstate case. Thus, determinism can be estimated as ```math -DET \\approx 1 - \\frac{p_3^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}, +DET \\approx 1 - \\frac{p_3^{(3)}}{\\sum_{i,j=1}^{K} r_{(i,j)}}, ``` where \$p_3^{(3)}\$ is the probability of observing the microstate \$0~1~0\$. @@ -77,15 +77,15 @@ where \$p_3^{(3)}\$ is the probability of observing the microstate \$0~1~0\$. [RecurrenceAnalysis.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/quantification/#RecurrenceAnalysis.determinism). """ struct RecurrenceDeterminism <: ComplexityEstimator - ε::Float64 + ε::Real metric::Metric sampling::SamplingMode end function complexity( c::RecurrenceDeterminism, - x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}}, - y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}}, + y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}} = x; ) rmspace = RecurrenceMicrostates(c.ε, DiagonalMicrostate(3); metric = c.metric, sampling = c.sampling) @@ -94,7 +94,7 @@ function complexity( end # -- Constructors -RecurrenceDeterminism(ε; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceDeterminism(ε, metric, sampling) +RecurrenceDeterminism(ε::Real; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceDeterminism(ε, metric, sampling) ########################################################################################## # Internal: measure from probabilities diff --git a/src/rqa/disorder.jl b/src/rqa/disorder.jl index b3cccd2..ee7331b 100644 --- a/src/rqa/disorder.jl +++ b/src/rqa/disorder.jl @@ -206,7 +206,7 @@ function complexity( end # We need to define the threshold range here. - s = ceil(Int, length(windowed_data) * 0.1) + s = ceil(Int, length(windowed_data) * 0.1) + 2 opt_ths = zeros(Float64, s) for i ∈ eachindex(s) diff --git a/src/rqa/lam.jl b/src/rqa/lam.jl index 07dd3a8..4054612 100644 --- a/src/rqa/lam.jl +++ b/src/rqa/lam.jl @@ -20,7 +20,7 @@ Laminarity is estimated for a threshold `ε`. Recurrence laminarity (LAM) is defined as [Webber2015Recurrence](@cite) ```math -LAM = \\frac{\\sum_{l=l_{min}}^{K} l H_V(l)}{\\sum_{i,j=1}^{K} r_{i,j}}, +LAM = \\frac{\\sum_{l=l_{min}}^{K} l H_V(l)}{\\sum_{i,j=1}^{K} r_{(i,j)}}, ``` where \$H_V(l)\$ is the histogram of vertical line lengths: ```math @@ -28,7 +28,7 @@ H_V(l) = \\sum_{i,j=1}^{K} (1 - r_{i, j-1})(1 - r_{i,j+l})\\prod_{k=0}^{l-1} r_{ ``` By inverting the laminarity expression, we can rewrite it as [daCruz2025RQAMeasures](@cite) ```math -LAM = 1 - \\frac{1}{K^2 \\sum_{i,j=1}^{K} r_{i,j}} \\sum_{l=1}^{l_{min} - 1} l H_V(l). +LAM = 1 - \\frac{1}{K^2 \\sum_{i,j=1}^{K} r_{(i,j)}} \\sum_{l=1}^{l_{min} - 1} l H_V(l). ``` An approximate value for LAM can be estimated using recurrence microstates, as introduced by @@ -41,7 +41,7 @@ square microstates of size \$3 \\times 3\$. Here, we use the relation: For the commonly used case \$l_{min} = 2\$, this leads to the approximation ```math -LAM \\approx 1 - \\frac{\\vec{v}^{(1)}\\cdot\\mathcal{R}^{(3)}\\vec{p}^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. +LAM \\approx 1 - \\frac{\\vec{v}^{(1)}\\cdot\\mathcal{R}^{(3)}\\vec{p}^{(3)}}{\\sum_{i,j=1}^{K} r_{(i,j)}}. ``` The correlation term \$\\vec{v}^{(1)} \\cdot \\mathcal{R}^{(3)} \\vec{p}^{(3)}\$ can be @@ -58,7 +58,7 @@ where \$\\xi\$ denotes an unconstrained entry. There are 64 microstates with thi the 512 possible \$3 \\times 3\$ microstates. Defining the class \$C_V\$ as the set of microstates with this structure, LAM can be estimated as: ```math -LAM \\approx 1 - \\frac{\\sum_{i\\in C_V} p_i^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}. +LAM \\approx 1 - \\frac{\\sum_{i\\in C_V} p_i^{(3)}}{\\sum_{i,j=1}^{K} r_{(i,j)}}. ``` The implementation used by [complexity](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/complexitymeasures/stable/complexity/#ComplexityMeasures.complexity) @@ -66,7 +66,7 @@ is an optimized version of this process using \$1 \\times 3\$ [`RectMicrostate`] Since this microstate shape is symmetric with respect to the desired information, it is not necessary to account for \$\\xi\$ values as in the square microstate case. Thus, laminarity can be estimated as ```math -LAM \\approx 1 - \\frac{p_3^{(3)}}{\\sum_{i,j=1}^{K} r_{i,j}}, +LAM \\approx 1 - \\frac{p_3^{(3)}}{\\sum_{i,j=1}^{K} r_{(i,j)}}, ``` where \$p_3^{(3)}\$ is the probability of observing the microstate \$0~1~0\$. @@ -77,15 +77,15 @@ where \$p_3^{(3)}\$ is the probability of observing the microstate \$0~1~0\$. [RecurrenceAnalysis.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/quantification/#RecurrenceAnalysis.laminarity). """ struct RecurrenceLaminarity <: ComplexityEstimator - ε::Float64 + ε::Real metric::Metric sampling::SamplingMode end function complexity( c::RecurrenceLaminarity, - x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}}, - y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}}, + y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}} = x; ) rmspace = RecurrenceMicrostates(c.ε, RectMicrostate(1, 3); metric = c.metric, sampling = c.sampling) @@ -94,7 +94,7 @@ function complexity( end # -- Constructors -RecurrenceLaminarity(ε; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceLaminarity(ε, metric, sampling) +RecurrenceLaminarity(ε::Real; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceLaminarity(ε, metric, sampling) ########################################################################################## # Internal: measure from probabilities diff --git a/src/rqa/rr.jl b/src/rqa/rr.jl index 8759182..daae21f 100644 --- a/src/rqa/rr.jl +++ b/src/rqa/rr.jl @@ -21,7 +21,7 @@ via the `N` parameter. The recurrence rate is estimated for a threshold `ε`. Recurrence rate (RR) is defined as [Webber2015Recurrence](@cite) ```math -RR = \\frac{1}{K^2}\\sum_{i,j=1}^{K} R_{i,j}. +RR = \\frac{1}{K^2}\\sum_{i,j=1}^{K} r_{(i,j)}. ``` When estimating it using RMA, the recurrence rate is defined as the expected value @@ -38,15 +38,15 @@ where \$RR_i^{(N)}\$ denotes the recurrence rate of the \$i\$-th microstate. [RecurrenceAnalysis.jl](https://juliadynamics.github.io/DynamicalSystemsDocs.jl/recurrenceanalysis/stable/quantification/#RecurrenceAnalysis.recurrencerate). """ struct RecurrenceRate{N} <: ComplexityEstimator - ε::Float64 + ε::Real metric::Metric sampling::SamplingMode end function complexity( c::RecurrenceRate{N}, - x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}}, - y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{SVector}} = x; + x::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}}, + y::Union{StateSpaceSet, Vector{<:Real}, <:AbstractGPUVector{<:SVector}} = x; ) where {N} probs = probabilities(RecurrenceMicrostates(c.ε, N; metric = c.metric, sampling = c.sampling), x, y) @@ -54,7 +54,7 @@ function complexity( end # -- Constructors -RecurrenceRate(ε::Float64, N::Int = 1; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceRate{N}(ε, metric, sampling) +RecurrenceRate(ε::Real, N::Int = 1; metric::Metric = DEFAULT_METRIC, ratio::Float64 = 0.1, sampling::SamplingMode = SRandom(ratio)) = RecurrenceRate{N}(ε, metric, sampling) ########################################################################################## # Internal: measure from probabilities diff --git a/src/shapes/triangle.jl b/src/shapes/triangle.jl index e88509c..8d7a297 100644 --- a/src/shapes/triangle.jl +++ b/src/shapes/triangle.jl @@ -4,9 +4,7 @@ export TriangleMicrostate # MicrostateShape: Triangle + Constructors and sub-types ########################################################################################## """ - TriangleMicrostate{N, B} <: MicrostateShape - -TriangleMicrostate{N, B} <: MicrostateShape + TriangleMicrostate <: MicrostateShape Define a triangular microstate shape, originally introduced by Hirata in 2021 [Hirata2021Triangle](@cite). @@ -22,14 +20,6 @@ where `N` defines the size of the triangular microstate. N = 3 triangle = TriangleMicrostate(N) ``` -The corresponding microstate structure is given by: -```math -\\begin{pmatrix} -R_{i,j} & R_{i,j + 1} & R_{i,j + 2} \\\\ - & R_{i + 1,j + 1} & R_{i + 1,j + 2} \\\\ - & & R_{i + 2,j + 2} \\\\ -\\end{pmatrix} -``` !!! compat Triangular microstate shape is not compatible with spatial data. From 5230b431f3eb07c654648dc74e2439cfafd3be10 Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Tue, 14 Apr 2026 22:18:18 -0300 Subject: [PATCH 18/19] add PredefinedDynamicalSystems to Project --- docs/Project.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/Project.toml b/docs/Project.toml index a415af2..b3f5426 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -15,6 +15,7 @@ GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" +PredefinedDynamicalSystems = "31e2f376-db9e-427a-b76e-a14f56347a14" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecurrenceAnalysis = "639c3291-70d9-5ea2-8c5b-839eba1ee399" RecurrenceMicrostatesAnalysis = "cb83a08b-85c6-4e94-91aa-4e946c7d4f0c" From c25e81abafed286403712f2a5d508438e434d38e Mon Sep 17 00:00:00 2001 From: Gabriel Ferreira Date: Wed, 15 Apr 2026 14:27:27 -0300 Subject: [PATCH 19/19] Remove Adapt.jl --- Project.toml | 2 -- src/RecurrenceMicrostatesAnalysis.jl | 1 - 2 files changed, 3 deletions(-) diff --git a/Project.toml b/Project.toml index 4a52c7b..3888508 100644 --- a/Project.toml +++ b/Project.toml @@ -4,7 +4,6 @@ version = "0.4.0" repo = "https://github.com/DynamicsUFPR/RecurrenceMicrostatesAnalysis.jl" [deps] -Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Atomix = "a9b6321e-bd34-4604-b9c9-b65b8de01458" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" ComplexityMeasures = "ab4b797d-85ee-42ba-b621-05d793b346a2" @@ -18,7 +17,6 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [compat] -Adapt = "4.4.0" Atomix = "1.1.2" Combinatorics = "1.1.0" ComplexityMeasures = "3.8.5" diff --git a/src/RecurrenceMicrostatesAnalysis.jl b/src/RecurrenceMicrostatesAnalysis.jl index 45ed34b..b9d2ca9 100644 --- a/src/RecurrenceMicrostatesAnalysis.jl +++ b/src/RecurrenceMicrostatesAnalysis.jl @@ -19,7 +19,6 @@ using Reexport using StaticArrays using Statistics -@reexport using Adapt @reexport using ComplexityMeasures @reexport using StateSpaceSets