Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ReservoirComputing"
uuid = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294"
authors = ["Francesco Martinuzzi"]
version = "0.12.7"
version = "0.12.8"

[deps]
ArrayInterface = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ reservoir computing models. More specifically the software offers:
+ Deep echo state networks `DeepESN`
+ Echo state networks with delayed states `DelayESN`
+ Edge of stability echo state networks `ES2N`
+ Euler state networks `Eu2N`
+ Hybrid echo state networks `HybridESN`
+ Next generation reservoir computing `NGRC`
- 15+ reservoir initializers and 5+ input layer initializers
Expand Down
3 changes: 2 additions & 1 deletion docs/src/api/layers.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@
## Echo State Networks

```@docs
ESNCell
ES2NCell
ESNCell
EuSNCell
```

## Reservoir computing with cellular automata
Expand Down
1 change: 1 addition & 0 deletions docs/src/api/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
```@docs
ES2N
ESN
EuSN
DeepESN
DelayESN
HybridESN
Expand Down
1 change: 1 addition & 0 deletions docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ or `dev` the package.
+ Deep echo state networks [`DeepESN`](@ref)
+ Echo state networks with delayed states [`DelayESN`](@ref)
+ Edge of stability echo state networks [`ES2N`](@ref)
+ Euler state networks [`Eu2N`](@ref)
+ Hybrid echo state networks [`HybridESN`](@ref)
+ Next generation reservoir computing [`NGRC`](@ref)
- 15+ reservoir initializers and 5+ input layer initializers
Expand Down
14 changes: 14 additions & 0 deletions docs/src/refs.bib
Original file line number Diff line number Diff line change
Expand Up @@ -384,3 +384,17 @@ @article{Ceni2025
month = apr,
pages = {7555–7564}
}

@article{Gallicchio2024,
title = {Euler State Networks: Non-dissipative Reservoir Computing},
volume = {579},
ISSN = {0925-2312},
url = {http://dx.doi.org/10.1016/j.neucom.2024.127411},
DOI = {10.1016/j.neucom.2024.127411},
journal = {Neurocomputing},
publisher = {Elsevier BV},
author = {Gallicchio, Claudio},
year = {2024},
month = apr,
pages = {127411}
}
6 changes: 4 additions & 2 deletions src/ReservoirComputing.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ include("layers/basic.jl")
include("layers/lux_layers.jl")
include("layers/esn_cell.jl")
include("layers/es2n_cell.jl")
include("layers/eusn_cell.jl")
include("layers/svmreadout.jl")
#general
include("states.jl")
Expand All @@ -42,12 +43,13 @@ include("models/esn.jl")
include("models/esn_deep.jl")
include("models/esn_delay.jl")
include("models/esn_hybrid.jl")
include("models/eusn.jl")
include("models/ngrc.jl")
#extensions
include("extensions/reca.jl")

export ReservoirComputer
export ESNCell, ES2NCell
export ESNCell, ES2NCell, EuSNCell
export StatefulLayer, LinearReadout, ReservoirChain, Collect, collectstates,
DelayLayer, NonlinearFeaturesLayer
export SVMReadout
Expand All @@ -62,7 +64,7 @@ export block_diagonal, chaotic_init, cycle_jumps, delay_line, delayline_backward
export add_jumps!, backward_connection!, delay_line!, reverse_simple_cycle!,
scale_radius!, self_loop!, simple_cycle!
export train, train!, predict, resetcarry!, polynomial_monomials
export ES2N, ESN, DeepESN, DelayESN, HybridESN
export ES2N, ESN, EuSN, DeepESN, DelayESN, HybridESN
export NGRC
#ext
export RECACell, RECA
Expand Down
118 changes: 118 additions & 0 deletions src/layers/eusn_cell.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
@doc raw"""
EuSNCell(in_dims => out_dims, [activation];
use_bias=false, init_bias=rand32,
init_reservoir=rand_sparse, init_input=scaled_rand,
init_state=randn32, leak_coefficient=1.0, diffusion=1.0)

Euler State Network (EuSN) cell [Gallicchio2024](@cite).

## Equations

```math
\begin{aligned}
\mathbf{h}(t) = \mathbf{h}(t-1) + \varepsilon \tanh\!\left((\mathbf{W}_h
- \gamma \mathbf{I})\mathbf{h}(t-1) + \mathbf{W}_x \mathbf{x}(t)
+ \mathbf{b}\right)
\end{aligned}
```
## Arguments

- `in_dims`: Input dimension.
- `out_dims`: Reservoir (hidden state) dimension.
- `activation`: Activation function. Default: `tanh`.

## Keyword arguments

- `use_bias`: Whether to include a bias term. Default: `false`.
- `init_bias`: Initializer for the bias. Used only if `use_bias=true`.
Default is `rand32`.
- `init_reservoir`: Initializer for the reservoir matrix `W_res`.
Default is [`rand_sparse`](@ref).
- `init_input`: Initializer for the input matrix `W_in`.
Default is [`scaled_rand`](@ref).
- `init_state`: Initializer for the hidden state when an external
state is not provided. Default is `randn32`.
- `leak_coefficient`: Leak rate `α ∈ (0,1]`. Default: `1.0`.
- `diffusion`: Diffusiona parameter `∈ (0,1]`. Default: `1.0`.

## Inputs

- **Case 1:** `x :: AbstractArray (in_dims, batch)`
A fresh state is created via `init_state`; the call is forwarded to Case 2.
- **Case 2:** `(x, (h,))` where `h :: AbstractArray (out_dims, batch)`
Computes the update and returns the new state.

In both cases, the forward returns `((h_new, (h_new,)), st_out)` where `st_out`
contains any updated internal state.

## Returns

- Output/hidden state `h_new :: out_dims` and state tuple `(h_new,)`.
- Updated layer state (NamedTuple).

## Parameters

Created by `initialparameters(rng, esn)`:

- `input_matrix :: (out_dims × in_dims)` — `W_in`
- `reservoir_matrix :: (out_dims × out_dims)` — `W_res`
- `bias :: (out_dims,)` — present only if `use_bias=true`

## States

Created by `initialstates(rng, esn)`:

- `rng`: a replicated RNG used to sample initial hidden states when needed.
"""
@concrete struct EuSNCell <: AbstractEchoStateNetworkCell
activation
in_dims <: IntegerType
out_dims <: IntegerType
init_bias
init_reservoir
init_input
#init_feedback::F
init_state
leak_coefficient
diffusion
use_bias <: StaticBool
end

function EuSNCell((in_dims, out_dims)::Pair{<:IntegerType, <:IntegerType},
activation = tanh; use_bias::BoolType = False(), init_bias = zeros32,
init_reservoir = rand_sparse, init_input = scaled_rand,
init_state = randn32, leak_coefficient::AbstractFloat = 1.0,
diffusion::AbstractFloat = 1.0)
return EuSNCell(activation, in_dims, out_dims, init_bias, init_reservoir,
init_input, init_state, leak_coefficient, diffusion, static(use_bias))
end

function (esn::EuSNCell)((inp, (hidden_state,))::InputType, ps, st::NamedTuple)
T = eltype(inp)
bias = safe_getproperty(ps, Val(:bias))
t_lc = T(esn.leak_coefficient)
t_diff = T(esn.diffusion)
asynm_matrix = compute_asym_recurrent(ps.reservoir_matrix, t_diff)
win_inp = dense_bias(ps.input_matrix, inp, nothing)
w_state = dense_bias(asynm_matrix, hidden_state, bias)
candidate_h = esn.activation.(win_inp .+ w_state)
h_new = (one(T) - t_lc) .* hidden_state .+ t_lc .* candidate_h
return (h_new, (h_new,)), st
end

function Base.show(io::IO, esn::EuSNCell)
print(io, "EuSNCell($(esn.in_dims) => $(esn.out_dims)")
if esn.leak_coefficient != eltype(esn.leak_coefficient)(1.0)
print(io, ", leak_coefficient=$(esn.leak_coefficient)")
end
if esn.diffusion != eltype(esn.diffusion)(1.0)
print(io, ", diffusion=$(esn.diffusion)")
end
has_bias(esn) || print(io, ", use_bias=false")
print(io, ")")
end

function compute_asym_recurrent(weight_hh::AbstractMatrix, gamma::AbstractFloat)
return weight_hh .- transpose(weight_hh) .-
gamma .* Matrix{eltype(weight_hh)}(I, size(weight_hh, 1), size(weight_hh, 1))
end
117 changes: 117 additions & 0 deletions src/models/eusn.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
@doc raw"""
EuSN(in_dims, res_dims, out_dims, activation=tanh;
leak_coefficient=1.0, diffusion = 1.0, init_reservoir=rand_sparse, init_input=scaled_rand,
init_bias=zeros32, init_state=randn32, use_bias=false,
state_modifiers=(), readout_activation=identity)

Euler State Network (ESN) [Gallicchio2024](@cite).

## Equations

```math
\begin{aligned}
\mathbf{h}(t) = \mathbf{h}(t-1) + \varepsilon \tanh\!\left((\mathbf{W}_h
- \gamma \mathbf{I})\mathbf{h}(t-1) + \mathbf{W}_x \mathbf{x}(t)
+ \mathbf{b}\right)
\end{aligned}
```

## Arguments

- `in_dims`: Input dimension.
- `res_dims`: Reservoir (hidden state) dimension.
- `out_dims`: Output dimension.
- `activation`: Reservoir activation (for [`ESNCell`](@ref)). Default: `tanh`.

## Keyword arguments

- `leak_coefficient`: Leak rate `α ∈ (0,1]`. Default: `1.0`.
- `diffusion`: diffusion coefficient `∈ (0,1]`. Default: `1.0`.
- `init_reservoir`: Initializer for `W_res`. Default: [`rand_sparse`](@ref).
- `init_input`: Initializer for `W_in`. Default: [`scaled_rand`](@ref).
- `init_bias`: Initializer for reservoir bias (used if `use_bias=true`).
Default: `zeros32`.
- `init_state`: Initializer used when an external state is not provided.
Default: `randn32`.
- `use_bias`: Whether the reservoir uses a bias term. Default: `false`.
- `state_modifiers`: A layer or collection of layers applied to the reservoir
state before the readout. Accepts a single layer, an `AbstractVector`, or a
`Tuple`. Default: empty `()`.
- `readout_activation`: Activation for the linear readout. Default: `identity`.

## Inputs

- `x :: AbstractArray (in_dims, batch)`

## Returns

- Output `y :: (out_dims, batch)`.
- Updated layer state (NamedTuple).

## Parameters

- `reservoir` — parameters of the internal [`ESNCell`](@ref), including:
- `input_matrix :: (res_dims × in_dims)` — `W_in`
- `reservoir_matrix :: (res_dims × res_dims)` — `W_res`
- `bias :: (res_dims,)` — present only if `use_bias=true`
- `states_modifiers` — a `Tuple` with parameters for each modifier layer (may be empty).
- `readout` — parameters of [`LinearReadout`](@ref), typically:
- `weight :: (out_dims × res_dims)` — `W_out`
- `bias :: (out_dims,)` — `b_out` (if the readout uses bias)

> Exact field names for modifiers/readout follow their respective layer
> definitions.

## States

- `reservoir` — states for the internal [`ESNCell`](@ref) (e.g. `rng` used to sample initial hidden states).
- `states_modifiers` — a `Tuple` with states for each modifier layer.
- `readout` — states for [`LinearReadout`](@ref).

"""
@concrete struct EuSN <:
AbstractEchoStateNetwork{(:reservoir, :states_modifiers, :readout)}
reservoir
states_modifiers
readout
end

function EuSN(in_dims::IntegerType, res_dims::IntegerType,
out_dims::IntegerType, activation = tanh;
readout_activation = identity,
state_modifiers = (),
kwargs...)
cell = StatefulLayer(EuSNCell(in_dims => res_dims, activation; kwargs...))
mods_tuple = state_modifiers isa Tuple || state_modifiers isa AbstractVector ?
Tuple(state_modifiers) : (state_modifiers,)
mods = _wrap_layers(mods_tuple)
ro = LinearReadout(res_dims => out_dims, readout_activation)
return EuSN(cell, mods, ro)
end

function Base.show(io::IO, esn::EuSN)
print(io, "EuSN(\n")

print(io, " reservoir = ")
show(io, esn.reservoir)
print(io, ",\n")

print(io, " state_modifiers = ")
if isempty(esn.states_modifiers)
print(io, "()")
else
print(io, "(")
for (i, m) in enumerate(esn.states_modifiers)
i > 1 && print(io, ", ")
show(io, m)
end
print(io, ")")
end
print(io, ",\n")

print(io, " readout = ")
show(io, esn.readout)
print(io, "\n)")

return
end
7 changes: 5 additions & 2 deletions test/layers/test_esncell.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,19 @@ cell_name(::Type{C}) where {C} = string(nameof(C))

mix_kw(::Type{ESNCell}) = :leak_coefficient
mix_kw(::Type{ES2NCell}) = :proximity
mix_kw(::Type{EuSNCell}) = :leak_coefficient

# Whatever show() actually prints:
mix_label(::Type{ESNCell}) = "leak_coefficient"
mix_label(::Type{ES2NCell}) = "proximity"
mix_label(::Type{EuSNCell}) = "leak_coefficient"

default_extra_ctor_kwargs(::Type{ESNCell}) = NamedTuple()
default_extra_ctor_kwargs(::Type{ES2NCell}) = (init_orthogonal = _W_I,)
default_extra_ctor_kwargs(::Type{EuSNCell}) = NamedTuple() # diffusion exists but we leave default unless overridden

extra_param_keys(::Type{ESNCell}) = ()
extra_param_keys(::Type{ES2NCell}) = (:orthogonal_matrix,)
extra_param_keys(::Type{EuSNCell}) = ()

function build_cell(::Type{C}, in_dims::Integer, out_dims::Integer;
activation = tanh,
Expand Down Expand Up @@ -192,7 +195,7 @@ function test_echo_state_cell_contract(::Type{C}) where {C}
end

@testset "AbstractEchoStateNetworkCell contract" begin
for C in (ESNCell, ES2NCell)
for C in (ESNCell, ES2NCell, EuSNCell)
test_echo_state_cell_contract(C)
end
end
Loading
Loading