From e9401ba7268ea32e2f2aae8c95a2b4ef3e977c5f Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Sun, 1 Feb 2026 11:14:16 +0400 Subject: [PATCH 1/7] rebase onto fast cmp --- frac/processor/aggregator.go | 2 +- frac/processor/aggregator_test.go | 16 ++ frac/sealed/lids/iterator_asc.go | 31 ++++ frac/sealed/lids/iterator_desc.go | 32 ++++ node/bench_test.go | 81 ++++++++-- node/cmp_lid.go | 20 +++ node/node.go | 5 + node/node_and.go | 31 +++- node/node_and_test.go | 65 ++++++++ node/node_nand.go | 4 + node/node_or.go | 78 ++++++++- node/node_or_test.go | 257 ++++++++++++++++++++++++++++++ node/node_range.go | 4 + node/node_static.go | 42 ++++- node/node_static_test.go | 69 ++++++++ node/node_test.go | 9 ++ node/sourced_node_wrapper.go | 5 + util/algorithms.go | 49 ++++++ util/algorithms_test.go | 254 +++++++++++++++++++++++++++++ 19 files changed, 1033 insertions(+), 21 deletions(-) create mode 100644 node/node_and_test.go create mode 100644 node/node_or_test.go create mode 100644 node/node_static_test.go create mode 100644 util/algorithms.go create mode 100644 util/algorithms_test.go diff --git a/frac/processor/aggregator.go b/frac/processor/aggregator.go index ec5b0e94..317e91b9 100644 --- a/frac/processor/aggregator.go +++ b/frac/processor/aggregator.go @@ -404,7 +404,7 @@ func NewSourcedNodeIterator(sourced node.Sourced, ti tokenIndex, tids []uint32, func (s *SourcedNodeIterator) ConsumeTokenSource(lid node.CmpLID) (uint32, bool, error) { for !s.lastID.IsNull() && s.lastID.Less(lid) { - s.lastID, s.lastSource = s.sourcedNode.NextSourced() + s.lastID, s.lastSource = s.sourcedNode.NextSourcedGeq(lid) } exists := !s.lastID.IsNull() && s.lastID == lid diff --git a/frac/processor/aggregator_test.go b/frac/processor/aggregator_test.go index 6d7f375a..bd5e453e 100644 --- a/frac/processor/aggregator_test.go +++ b/frac/processor/aggregator_test.go @@ -166,6 +166,22 @@ func (m *MockNode) String() string { return reflect.TypeOf(m).String() } +/*func (m *MockNode) NextSourced() (uint32, uint32, bool) { + return m.NextSourcedGeq(0) +} + +func (m *MockNode) NextSourcedGeq(minLID uint32) (uint32, uint32, bool) { + for len(m.Pairs) > 0 && m.Pairs[0].LID < minLID { + m.Pairs = m.Pairs[1:] + } + if len(m.Pairs) == 0 { + return 0, 0, false + } + first := m.Pairs[0] + m.Pairs = m.Pairs[1:] + return first.LID, first.Source, true +}*/ + func (m *MockNode) NextSourced() (node.CmpLID, uint32) { if len(m.Pairs) == 0 { return node.NullCmpLID(), 0 diff --git a/frac/sealed/lids/iterator_asc.go b/frac/sealed/lids/iterator_asc.go index 368adc65..44bfbab2 100644 --- a/frac/sealed/lids/iterator_asc.go +++ b/frac/sealed/lids/iterator_asc.go @@ -7,6 +7,7 @@ import ( "github.com/ozontech/seq-db/logger" "github.com/ozontech/seq-db/node" + "github.com/ozontech/seq-db/util" ) type IteratorAsc Cursor @@ -72,3 +73,33 @@ func (it *IteratorAsc) Next() node.CmpLID { it.lids = it.lids[:i] return node.NewCmpLIDOrderAsc(lid) } + +// NextGeq returns the next (in reverse iteration order) LID that is <= maxLID. +func (it *IteratorAsc) NextGeq(nextID node.CmpLID) node.CmpLID { + for { + for len(it.lids) == 0 { + if !it.tryNextBlock { + return node.NewCmpLIDOrderAsc(0) + } + + it.loadNextLIDsBlock() + it.lids, it.tryNextBlock = it.narrowLIDsRange(it.lids, it.tryNextBlock) + it.counter.AddLIDsCount(len(it.lids)) + } + + // fast path: smallest remaining > nextID => skip entire block + if it.lids[0] > nextID.Unpack() { + it.lids = it.lids[:0] + continue + } + + idx, found := util.GallopSearchLeq(it.lids, nextID.Unpack()) + if found { + lid := it.lids[idx] + it.lids = it.lids[:idx] + return node.NewCmpLIDOrderAsc(lid) + } + + it.lids = it.lids[:0] + } +} diff --git a/frac/sealed/lids/iterator_desc.go b/frac/sealed/lids/iterator_desc.go index 20ea1552..8bef3913 100644 --- a/frac/sealed/lids/iterator_desc.go +++ b/frac/sealed/lids/iterator_desc.go @@ -8,6 +8,7 @@ import ( "github.com/ozontech/seq-db/logger" "github.com/ozontech/seq-db/node" + "github.com/ozontech/seq-db/util" ) type IteratorDesc Cursor @@ -72,3 +73,34 @@ func (it *IteratorDesc) Next() node.CmpLID { it.lids = it.lids[1:] return node.NewCmpLIDOrderDesc(lid) } + +// NextGeq finds next greater or equal +func (it *IteratorDesc) NextGeq(nextID node.CmpLID) node.CmpLID { + for { + for len(it.lids) == 0 { + if !it.tryNextBlock { + return node.NewCmpLIDOrderDesc(math.MaxUint32) + } + + it.loadNextLIDsBlock() // last chunk in block but not last for tid; need load next block + it.lids, it.tryNextBlock = it.narrowLIDsRange(it.lids, it.tryNextBlock) + it.counter.AddLIDsCount(len(it.lids)) // inc loaded LIDs count + } + + // fast path: last LID < nextID => skip the entire block + if nextID.Unpack() > it.lids[len(it.lids)-1] { + it.lids = it.lids[:0] + continue + } + + idx, found := util.GallopSearchGeq(it.lids, nextID.Unpack()) + if found { + it.lids = it.lids[idx:] + lid := it.lids[0] + it.lids = it.lids[1:] + return node.NewCmpLIDOrderDesc(lid) + } + + it.lids = it.lids[:0] + } +} diff --git a/node/bench_test.go b/node/bench_test.go index 9f15db1f..8006a15c 100644 --- a/node/bench_test.go +++ b/node/bench_test.go @@ -8,11 +8,16 @@ import ( "github.com/stretchr/testify/assert" ) -func newNodeStaticSize(size int) *staticAsc { +func newNodeStaticSizeRand(size int) *staticAsc { data, _ := Generate(size) return &staticAsc{staticCursor: staticCursor{data: data}} } +func newNodeStaticSizeFixedDelta(size int, start int, delta int) *staticAsc { + data, _ := GenerateFixedDelta(size, start, delta) + return &staticAsc{staticCursor: staticCursor{data: data}} +} + func Generate(n int) ([]uint32, uint32) { v := make([]uint32, n) last := uint32(1) @@ -23,6 +28,16 @@ func Generate(n int) ([]uint32, uint32) { return v, last } +func GenerateFixedDelta(n, start, step int) ([]uint32, uint32) { + v := make([]uint32, n) + last := uint32(start) + for i := 0; i < len(v); i++ { + v[i] = last + last += uint32(step) + } + return v, last +} + func BenchmarkNot(b *testing.B) { sizes := []int{1000, 10_000, 1_000_000} @@ -65,7 +80,7 @@ func BenchmarkOr(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s*2) - n := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) + n := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) for b.Loop() { res = readAllInto(n, res) @@ -82,7 +97,7 @@ func BenchmarkAnd(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s) - n := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) for b.Loop() { res = readAllInto(n, res) @@ -99,7 +114,7 @@ func BenchmarkNAnd(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s) - n := NewNAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n := NewNAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) for b.Loop() { res = readAllInto(n, res) @@ -115,10 +130,10 @@ func BenchmarkAndTree(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { - n1 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) - n2 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) - n3 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) - n4 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n1 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n2 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n3 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n4 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) n12 := NewAnd(n1, n2) n34 := NewAnd(n3, n4) n := NewAnd(n12, n34) @@ -138,10 +153,10 @@ func BenchmarkOrTree(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { - n1 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) - n2 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) - n3 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) - n4 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) + n1 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n2 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n3 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n4 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) n12 := NewOr(n1, n2) n34 := NewOr(n3, n4) n := NewOr(n12, n34) @@ -157,15 +172,51 @@ func BenchmarkOrTree(b *testing.B) { } } +// BenchmarkOrTreeNextGeq checks the performance of NextGeq vs Next when no skipping occur and all node +// yield distinct values (no intersection between nodes) +func BenchmarkOrTreeNextGeq(b *testing.B) { + sizes := []int{1000, 10_000, 1_000_000} + // step is equal to total number of nodes, so that every node produces distinct values + step := 8 + + for _, s := range sizes { + b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { + n1 := NewOr( + newNodeStaticSizeFixedDelta(s, 1, step), + newNodeStaticSizeFixedDelta(s, 5, step)) + n2 := NewOr( + newNodeStaticSizeFixedDelta(s, 2, step), + newNodeStaticSizeFixedDelta(s, 6, step)) + n3 := NewOr( + newNodeStaticSizeFixedDelta(s, 3, step), + newNodeStaticSizeFixedDelta(s, 8, step)) + n4 := NewOr( + newNodeStaticSizeFixedDelta(s, 4, step), + newNodeStaticSizeFixedDelta(s, 7, step)) + n12 := NewOr(n1, n2) + n34 := NewOr(n3, n4) + n := NewOr(n12, n34) + res := make([]uint32, 0, s*8) + + for b.Loop() { + res = readAllIntoGeq(n, res) + } + + assert.Equal(b, cap(res), s*8) + + }) + } +} + func BenchmarkComplex(b *testing.B) { sizes := []int{1000, 10_000, 1_000_000} for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s*2) - n1 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) - n2 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) - n3 := NewNAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n1 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n2 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n3 := NewNAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) n12 := NewOr(n1, n2) n := NewAnd(n12, n3) diff --git a/node/cmp_lid.go b/node/cmp_lid.go index 20bd46e7..f27c8314 100644 --- a/node/cmp_lid.go +++ b/node/cmp_lid.go @@ -49,6 +49,10 @@ func (c CmpLID) Less(other CmpLID) bool { return c.lid < other.lid } +func (c CmpLID) LessOrEq(other CmpLID) bool { + return c.lid <= other.lid +} + func (c CmpLID) Inc() CmpLID { c.lid++ return c @@ -58,6 +62,22 @@ func (c CmpLID) Eq(other CmpLID) bool { return c.lid == other.lid } +func Max(left CmpLID, right CmpLID) CmpLID { + if left.lid > right.lid { + return left + } else { + return right + } +} + +func Min(left CmpLID, right CmpLID) CmpLID { + if left.lid < right.lid { + return left + } else { + return right + } +} + func (c CmpLID) Unpack() uint32 { return c.lid ^ c.mask } diff --git a/node/node.go b/node/node.go index 4f5bc0b2..934cf4bb 100644 --- a/node/node.go +++ b/node/node.go @@ -7,10 +7,15 @@ import ( type Node interface { fmt.Stringer // for testing Next() CmpLID + // NextGeq returns next greater or equal (GEQ) lid. Currently, some nodes do not support it + // so the caller must check the output and be ready call it again if needed, like when using Next. + // Therefore, nextID is more like a hint. + NextGeq(nextID CmpLID) CmpLID } type Sourced interface { fmt.Stringer // for testing // aggregation need source NextSourced() (id CmpLID, source uint32) + NextSourcedGeq(nextLID CmpLID) (id CmpLID, source uint32) } diff --git a/node/node_and.go b/node/node_and.go index 1ecc454b..4088c4b5 100644 --- a/node/node_and.go +++ b/node/node_and.go @@ -31,13 +31,40 @@ func (n *nodeAnd) readRight() { n.rightID = n.right.Next() } +func (n *nodeAnd) readLeftGeq(nextID CmpLID) { + n.leftID = n.left.NextGeq(nextID) +} + +func (n *nodeAnd) readRightGeq(nextID CmpLID) { + n.rightID = n.right.NextGeq(nextID) +} + func (n *nodeAnd) Next() CmpLID { for !n.leftID.IsNull() && !n.rightID.IsNull() && !n.leftID.Eq(n.rightID) { for !n.rightID.IsNull() && n.leftID.Less(n.rightID) { - n.readLeft() + n.readLeftGeq(n.rightID) + } + for !n.rightID.IsNull() && n.rightID.Less(n.leftID) { + n.readRightGeq(n.leftID) + } + } + if n.leftID.IsNull() || n.rightID.IsNull() { + return NullCmpLID() + } + cur := n.leftID + n.readLeft() + n.readRight() + return cur +} + +func (n *nodeAnd) NextGeq(nextID CmpLID) CmpLID { + // TODO first skip not interesting values, then call Next() + for !n.leftID.IsNull() && !n.rightID.IsNull() && !n.leftID.Eq(n.rightID) { + for !n.rightID.IsNull() && n.leftID.Less(n.rightID) { + n.readLeftGeq(Max(n.rightID, nextID)) } for !n.rightID.IsNull() && n.rightID.Less(n.leftID) { - n.readRight() + n.readRightGeq(Max(n.leftID, nextID)) } } if n.leftID.IsNull() || n.rightID.IsNull() { diff --git a/node/node_and_test.go b/node/node_and_test.go new file mode 100644 index 00000000..7a737263 --- /dev/null +++ b/node/node_and_test.go @@ -0,0 +1,65 @@ +package node + +import ( + "math" + "math/rand/v2" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNodeAnd_NextGeqAscending(t *testing.T) { + left := NewStatic([]uint32{1, 2, 7, 10, 20, 25, 26, 30, 50, 80, 90, 100}, false) + right := NewStatic([]uint32{1, 3, 4, 7, 9, 30, 40, 45, 60, 80, 110}, false) + + node := NewAnd(left, right) + + // Currently, nodes instantiate their state on creation, which will be fixed later. + // Thus, the first LID returned is the first from left and right + id := node.NextGeq(NewCmpLIDOrderDesc(7)) + assert.Equal(t, uint32(1), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(7)) + assert.Equal(t, uint32(7), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(50)) + assert.Equal(t, uint32(80), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(50)) + assert.True(t, id.IsNull()) +} + +// TestNodeAnd_NextGeqCompatibility tests that just calling NextGeq with 0 passed as argument is equivalent to +// calling Next +func TestNodeAnd_NextGeqCompatibility(t *testing.T) { + for _, rev := range []bool{true, false} { + left := []uint32{rand.Uint32N(10)} + right := []uint32{rand.Uint32N(10)} + + for i := 1; i < 1000; i++ { + left = append(left, left[i-1]+rand.Uint32N(10)) + right = append(right, right[i-1]+rand.Uint32N(10)) + } + + node := NewAnd(NewStatic(left, rev), NewStatic(right, rev)) + nodeGeq := NewAnd(NewStatic(left, rev), NewStatic(right, rev)) + + var zero uint32 + if rev { + zero = math.MaxUint32 + } else { + zero = 0 + } + + for { + lid := node.Next() + lidGeq := nodeGeq.NextGeq(NewCmpLID(zero, rev)) + + assert.Equal(t, lid, lidGeq) + + if lid.IsNull() { + break + } + } + } +} diff --git a/node/node_nand.go b/node/node_nand.go index 0acdb3e0..5bcf0345 100644 --- a/node/node_nand.go +++ b/node/node_nand.go @@ -43,3 +43,7 @@ func (n *nodeNAnd) Next() CmpLID { } return NullCmpLID() } + +func (n *nodeNAnd) NextGeq(nextID CmpLID) CmpLID { + return n.Next() +} diff --git a/node/node_or.go b/node/node_or.go index 31285d3b..4b08f139 100644 --- a/node/node_or.go +++ b/node/node_or.go @@ -1,6 +1,8 @@ package node -import "fmt" +import ( + "fmt" +) type nodeOr struct { left Node @@ -29,6 +31,14 @@ func (n *nodeOr) readRight() { n.rightID = n.right.Next() } +func (n *nodeOr) readLeftGeq(nextID CmpLID) { + n.leftID = n.left.NextGeq(nextID) +} + +func (n *nodeOr) readRightGeq(nextID CmpLID) { + n.rightID = n.right.NextGeq(nextID) +} + func (n *nodeOr) Next() CmpLID { if n.leftID.IsNull() && n.rightID.IsNull() { return n.leftID @@ -50,6 +60,36 @@ func (n *nodeOr) Next() CmpLID { return cur } +func (n *nodeOr) NextGeq(nextID CmpLID) CmpLID { + // fast path: if we have both branches and nothing to skip, then choose lowest and return + if !n.leftID.IsNull() && !n.rightID.IsNull() && nextID.Less(Min(n.leftID, n.rightID)) { + if n.leftID.Less(n.rightID) { + cur := n.leftID + n.readLeft() + return cur + } else if n.rightID.Less(n.leftID) { + cur := n.rightID + n.readRight() + return cur + } + + cur := n.leftID + n.readLeft() + n.readRight() + return cur + } + + // skip past nextID + if n.leftID.Less(nextID) { + n.readLeftGeq(nextID) + } + if n.rightID.Less(nextID) { + n.readRightGeq(nextID) + } + + return n.Next() +} + type nodeOrAgg struct { reverse bool @@ -82,6 +122,14 @@ func (n *nodeOrAgg) readRight() { n.rightID, n.rightSource = n.right.NextSourced() } +func (n *nodeOrAgg) readLeftGeq(nextID CmpLID) { + n.leftID, n.leftSource = n.left.NextSourcedGeq(nextID) +} + +func (n *nodeOrAgg) readRightGeq(nextID CmpLID) { + n.rightID, n.rightSource = n.right.NextSourcedGeq(nextID) +} + func (n *nodeOrAgg) NextSourced() (CmpLID, uint32) { if n.leftID.IsNull() && n.rightID.IsNull() { return n.leftID, 0 @@ -97,3 +145,31 @@ func (n *nodeOrAgg) NextSourced() (CmpLID, uint32) { n.readRight() return cur, curSource } + +func (n *nodeOrAgg) NextSourcedGeq(nextID CmpLID) (CmpLID, uint32) { + // Fast path: if we at least left or right and there is nothing to skip, then choose lowest and return. + minID := Min(n.leftID, n.rightID) + if nextID.LessOrEq(minID) { + if n.leftID.Less(n.rightID) { + cur := n.leftID + curSource := n.leftSource + n.readLeft() + return cur, curSource + } else { + // we don't need deduplication + cur := n.rightID + curSource := n.rightSource + n.readRight() + return cur, curSource + } + } + + if n.leftID.Less(nextID) { + n.readLeftGeq(nextID) + } + if n.rightID.Less(nextID) { + n.readRightGeq(nextID) + } + + return n.NextSourced() +} diff --git a/node/node_or_test.go b/node/node_or_test.go new file mode 100644 index 00000000..f5bf1fd3 --- /dev/null +++ b/node/node_or_test.go @@ -0,0 +1,257 @@ +package node + +import ( + "math" + "math/rand/v2" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNodeOr_NextGeqAscending(t *testing.T) { + left := NewStatic([]uint32{2, 7, 10, 20, 25, 26, 30, 50}, false) + right := NewStatic([]uint32{1, 3, 4, 7, 9, 30, 40}, false) + + node := NewOr(left, right) + + id := node.NextGeq(NewCmpLIDOrderDesc(7)) + assert.Equal(t, uint32(7), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(7)) + assert.Equal(t, uint32(9), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(24)) + assert.Equal(t, uint32(25), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(30)) + assert.Equal(t, uint32(30), id.Unpack()) + + id = node.NextGeq(NewCmpLIDOrderDesc(51)) + assert.True(t, id.IsNull()) +} + +// TestNodeOr_NextGeqCompatibility tests that just calling NextGeq with LID zero value passed as argument is equivalent to +// calling Next +func TestNodeOr_NextGeqCompatibility(t *testing.T) { + for _, rev := range []bool{true, false} { + left := []uint32{rand.Uint32N(10)} + right := []uint32{rand.Uint32N(10)} + + for i := 1; i < 1000; i++ { + left = append(left, left[i-1]+rand.Uint32N(10)) + right = append(right, right[i-1]+rand.Uint32N(10)) + } + + node := NewOr(NewStatic(left, rev), NewStatic(right, rev)) + nodeGeq := NewOr(NewStatic(left, rev), NewStatic(right, rev)) + + var zero uint32 + if rev { + zero = math.MaxUint32 + } else { + zero = 0 + } + + for { + lid := node.Next() + lidGeq := nodeGeq.NextGeq(NewCmpLID(zero, rev)) + + assert.Equal(t, lid, lidGeq) + + if lid.IsNull() { + break + } + } + } +} + +// TestNodeOrAgg_NoDedup tests that nodeOrAgg yields values both from left and right for same lid. +func TestNodeOrAgg_NoDedup(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 5, 7}, false), 1) + right := NewSourcedNodeWrapper(NewStatic([]uint32{5, 8}, false), 2) + + orAgg := NewNodeOrAgg(left, right, false) + pairs := readAllSourced(orAgg) + + // expected sources for lid=5 + var sources []uint32 + + for _, p := range pairs { + id, src := p[0], p[1] + if id == 5 { + sources = append(sources, src) + } + } + + require.Len(t, sources, 2, "expected id 5 to be returned twice from both children") + assert.ElementsMatch(t, []uint32{1, 2}, sources, "expected id 5 from both left and right sources") +} + +func TestNodeOrAgg_MergeAscending(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 3, 5}, false), 0) + right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 4, 6}, false), 1) + + orAgg := NewNodeOrAgg(left, right, false) + got := readAllSourced(orAgg) + + want := [][2]uint32{ + {1, 0}, + {2, 1}, + {3, 0}, + {4, 1}, + {5, 0}, + {6, 1}, + } + + assert.Equal(t, want, got) +} + +func TestNodeOrAgg_MergeAscendingWithDups(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 2, 3, 5, 8}, false), 0) + right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 3, 4, 6, 8}, false), 1) + + orAgg := NewNodeOrAgg(left, right, false) + got := readAllSourced(orAgg) + + want := [][2]uint32{ + {1, 0}, + {2, 1}, + {2, 0}, + {3, 1}, + {3, 0}, + {4, 1}, + {5, 0}, + {6, 1}, + {8, 1}, + {8, 0}, + } + + assert.Equal(t, want, got) +} + +// TestNodeOrAgg_NextSourcedGeq tests we can navigate to a lid with NextGeq and do not skip it from +// both left and right sides (no deduplication like in ordinary OR tree) +func TestNodeOrAgg_NextSourcedGeq(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 2, 3, 5, 8, 15, 19}, false), 0) + right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 3, 4, 6, 8, 14, 20}, false), 1) + + orAgg := NewNodeOrAgg(left, right, false) + + id, source := orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(3)) + assert.Equal(t, uint32(3), id.Unpack()) + assert.Equal(t, uint32(1), source) + + // 3 returned again, but with different source - no deduplication + id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(3)) + assert.Equal(t, uint32(3), id.Unpack()) + assert.Equal(t, uint32(0), source) + + id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(6)) + assert.Equal(t, uint32(6), id.Unpack()) + assert.Equal(t, uint32(1), source) + + id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(17)) + assert.Equal(t, uint32(19), id.Unpack()) + assert.Equal(t, uint32(0), source) +} + +// TestNodeOrAgg_NextSourcedGeq tests we can navigate to a lid with NextGeq in reverse way and do not skip it from +// both left and right sides (no deduplication like in ordinary OR tree) +func TestNodeOrAgg_NextSourcedGeq_Reverse(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 2, 3, 5, 8, 15, 19}, true), 0) + right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 3, 4, 6, 8, 14, 20}, true), 1) + + orAgg := NewNodeOrAgg(left, right, true) + + id, source := orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(8)) + assert.Equal(t, uint32(8), id.Unpack()) + assert.Equal(t, uint32(1), source) + + // 8 returned again, but with different source - no deduplication + id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(8)) + assert.Equal(t, uint32(8), id.Unpack()) + assert.Equal(t, uint32(0), source) + + id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(4)) + assert.Equal(t, uint32(4), id.Unpack()) + assert.Equal(t, uint32(1), source) + + id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(1)) + assert.Equal(t, uint32(1), id.Unpack()) + assert.Equal(t, uint32(0), source) + + id, _ = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(1)) + assert.True(t, id.IsNull()) +} + +func TestNodeOrAgg_MergeDescending(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 3, 5}, true), 0) + right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 4, 6}, true), 1) + + orAgg := NewNodeOrAgg(left, right, true) + got := readAllSourced(orAgg) + + want := [][2]uint32{ + {6, 1}, + {5, 0}, + {4, 1}, + {3, 0}, + {2, 1}, + {1, 0}, + } + + assert.Equal(t, want, got) +} + +func TestNodeOrAgg_EmptySide(t *testing.T) { + t.Run("empty_left", func(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic(nil, false), 0) + right := NewSourcedNodeWrapper(NewStatic([]uint32{10, 20}, false), 1) + + orAgg := NewNodeOrAgg(left, right, false) + got := readAllSourced(orAgg) + + want := [][2]uint32{ + {10, 1}, + {20, 1}, + } + + assert.Equal(t, want, got) + }) + + t.Run("empty_right", func(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic([]uint32{10, 20}, false), 0) + right := NewSourcedNodeWrapper(NewStatic(nil, false), 1) + + orAgg := NewNodeOrAgg(left, right, false) + got := readAllSourced(orAgg) + + want := [][2]uint32{ + {10, 0}, + {20, 0}, + } + + assert.Equal(t, want, got) + }) + + t.Run("both_empty", func(t *testing.T) { + left := NewSourcedNodeWrapper(NewStatic(nil, false), 0) + right := NewSourcedNodeWrapper(NewStatic(nil, false), 1) + + orAgg := NewNodeOrAgg(left, right, false) + id, _ := orAgg.NextSourced() + + assert.True(t, id.IsNull()) + }) +} + +func readAllSourced(n Sourced) [][2]uint32 { + var res [][2]uint32 + id, src := n.NextSourced() + for !id.IsNull() { + res = append(res, [2]uint32{id.Unpack(), src}) + id, src = n.NextSourced() + } + return res +} diff --git a/node/node_range.go b/node/node_range.go index 3a4b70dd..09335636 100644 --- a/node/node_range.go +++ b/node/node_range.go @@ -24,3 +24,7 @@ func (n *nodeRange) Next() CmpLID { n.curID = n.curID.Inc() return result } + +func (n *nodeRange) NextGeq(nextID CmpLID) CmpLID { + return n.Next() +} diff --git a/node/node_static.go b/node/node_static.go index a6dabaf4..3960a94d 100644 --- a/node/node_static.go +++ b/node/node_static.go @@ -1,16 +1,21 @@ package node -import "math" +import "github.com/ozontech/seq-db/util" +import ( + "math" +) type staticCursor struct { ptr int data []uint32 } +// staticAsc stores lids in data slice in ascending order, and iterates in increasing order type staticAsc struct { staticCursor } +// staticAsc stores lids in data slice in ascending order, but iterates from the end (in descending order) type staticDesc struct { staticCursor } @@ -43,6 +48,24 @@ func (n *staticAsc) Next() CmpLID { return NewCmpLIDOrderDesc(cur) } +// NextGeq finds next greater or equals since iteration is in ascending order +func (n *staticAsc) NextGeq(nextID CmpLID) CmpLID { + if n.ptr >= len(n.data) { + return NewCmpLIDOrderDesc(math.MaxUint32) + } + + from := n.ptr + idx, found := util.GallopSearchGeq(n.data[from:], nextID.Unpack()) + if !found { + return NewCmpLIDOrderDesc(math.MaxUint32) + } + + i := from + idx + cur := n.data[i] + n.ptr = i + 1 + return NewCmpLIDOrderDesc(cur) +} + func (n *staticDesc) Next() CmpLID { // staticDesc is used in docs order asc, hence we return CmpLID with asc order if n.ptr < 0 { @@ -53,7 +76,22 @@ func (n *staticDesc) Next() CmpLID { return NewCmpLIDOrderAsc(cur) } -// MakeStaticNodes is currently used only for tests +// NextGeq finds next less or equals since iteration is in descending order +func (n *staticDesc) NextGeq(nextID CmpLID) CmpLID { + if n.ptr < 0 { + return NewCmpLIDOrderAsc(0) + } + idx, found := util.GallopSearchLeq(n.data[:n.ptr+1], nextID.Unpack()) + if !found { + return NewCmpLIDOrderAsc(0) + } + + cur := n.data[idx] + n.ptr = idx - 1 + return NewCmpLIDOrderAsc(cur) +} + +// MakeStaticNodes is currently used only for tests func MakeStaticNodes(data [][]uint32) []Node { nodes := make([]Node, len(data)) for i, values := range data { diff --git a/node/node_static_test.go b/node/node_static_test.go new file mode 100644 index 00000000..56d2a0b2 --- /dev/null +++ b/node/node_static_test.go @@ -0,0 +1,69 @@ +package node + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStaticAscNextGeq(t *testing.T) { + lids := []uint32{1, 3, 5, 7, 9} + n := NewStatic(lids, false).(*staticAsc) + + id := n.NextGeq(NewCmpLIDOrderDesc(0)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(1), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(4)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(5), id.Unpack()) + + // 5 has already been returned, so the next value >= 5 is 7. + id = n.NextGeq(NewCmpLIDOrderDesc(5)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(7), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(10)) + assert.True(t, id.IsNull()) +} + +func TestStaticDescNextGeq(t *testing.T) { + lids := []uint32{1, 3, 5, 7, 9} + n := NewStatic(lids, true).(*staticDesc) + + id := n.NextGeq(NewCmpLIDOrderDesc(10)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(9), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(10)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(7), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(10)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(5), id.Unpack()) +} + +func TestStaticDescNextGeq_WithThreshold(t *testing.T) { + lids := []uint32{1, 3, 5, 7, 9} + n := NewStatic(lids, true).(*staticDesc) + + id := n.NextGeq(NewCmpLIDOrderDesc(8)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(7), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(8)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(5), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(8)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(3), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(8)) + assert.False(t, id.IsNull()) + assert.Equal(t, uint32(1), id.Unpack()) + + id = n.NextGeq(NewCmpLIDOrderDesc(8)) + assert.True(t, id.IsNull()) +} diff --git a/node/node_test.go b/node/node_test.go index bd0a2f8f..ecbf4865 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -14,6 +14,15 @@ func readAllInto(node Node, ids []uint32) []uint32 { return ids } +func readAllIntoGeq(node Node, ids []uint32) []uint32 { + id := node.Next() + for !id.IsNull() { + ids = append(ids, id.Unpack()) + id = node.NextGeq(id) + } + return ids +} + func readAll(node Node) []uint32 { return readAllInto(node, nil) } diff --git a/node/sourced_node_wrapper.go b/node/sourced_node_wrapper.go index 0023f4ac..45b351ef 100644 --- a/node/sourced_node_wrapper.go +++ b/node/sourced_node_wrapper.go @@ -14,6 +14,11 @@ func (w *sourcedNodeWrapper) NextSourced() (CmpLID, uint32) { return cmp, w.source } +func (w *sourcedNodeWrapper) NextSourcedGeq(nextID CmpLID) (CmpLID, uint32) { + id := w.node.NextGeq(nextID) + return id, w.source +} + func NewSourcedNodeWrapper(d Node, source int) Sourced { return &sourcedNodeWrapper{node: d, source: uint32(source)} } diff --git a/util/algorithms.go b/util/algorithms.go new file mode 100644 index 00000000..ce5d9801 --- /dev/null +++ b/util/algorithms.go @@ -0,0 +1,49 @@ +package util + +import "sort" + +// GallopSearchGeq returns the smallest index i in ascending sorted vals such that vals[i] >= geq +func GallopSearchGeq(vals []uint32, x uint32) (idx int, found bool) { + n := len(vals) + if n == 0 { + return 0, false + } + if vals[0] >= x { + return 0, true + } + hi := 1 + for hi < n && vals[hi] < x { + hi *= 2 + } + searchLen := min(n, hi+1) + idx = sort.Search(searchLen, func(i int) bool { return vals[i] >= x }) + if idx >= searchLen { + return 0, false + } + return idx, true +} + +// GallopSearchLeq returns the largest index i in ascending sorted vals such that vals[i] <= geq +func GallopSearchLeq(vals []uint32, x uint32) (idx int, found bool) { + n := len(vals) + if n == 0 { + return 0, false + } + if vals[n-1] <= x { + return n - 1, true + } + left := n - 1 + step := 1 + for left >= 0 && vals[left] > x { + left -= step + step *= 2 + } + + left = max(0, left) + searchLen := n - left + j := sort.Search(searchLen, func(j int) bool { return vals[left+j] > x }) + if j == 0 { + return 0, false + } + return left + j - 1, true +} diff --git a/util/algorithms_test.go b/util/algorithms_test.go new file mode 100644 index 00000000..a7c27cef --- /dev/null +++ b/util/algorithms_test.go @@ -0,0 +1,254 @@ +package util + +import ( + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGallopSearchGeq(t *testing.T) { + tests := []struct { + name string + vals []uint32 + geq uint32 + expectedIdx int + expectedFound bool + }{ + { + name: "empty", + vals: nil, + geq: 1, + expectedIdx: 0, + expectedFound: false, + }, + { + name: "single_found", + vals: []uint32{5}, + geq: 3, + expectedIdx: 0, + expectedFound: true, + }, + { + name: "single_not_found", + vals: []uint32{2}, + geq: 5, + expectedIdx: 0, + expectedFound: false, + }, + { + name: "first_element_greater", + vals: []uint32{1, 3, 5, 7, 9}, + geq: 0, + expectedIdx: 0, + expectedFound: true, + }, + { + name: "first_element_equals", + vals: []uint32{1, 3, 5, 7, 9}, + geq: 1, + expectedIdx: 0, + expectedFound: true, + }, + { + name: "middle_found_greater", + vals: []uint32{1, 3, 5, 7, 9}, + geq: 4, + expectedIdx: 2, + expectedFound: true, + }, + { + name: "mid_found_exact", + vals: []uint32{1, 3, 5, 7, 9}, + geq: 5, + expectedIdx: 2, + expectedFound: true, + }, + { + name: "last_found", + vals: []uint32{1, 3, 5, 7, 9}, + geq: 9, + expectedIdx: 4, + expectedFound: true, + }, + { + name: "last_not_found", + vals: []uint32{1, 3, 5, 7, 9}, + geq: 10, + expectedIdx: 0, + expectedFound: false, + }, + { + name: "gallop_then_binary_search", + vals: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + geq: 17, + expectedIdx: 16, + expectedFound: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + idx, ok := GallopSearchGeq(tt.vals, tt.geq) + assert.Equal(t, tt.expectedFound, ok, "found") + if tt.expectedFound { + require.Less(t, idx, len(tt.vals), "index in range") + assert.GreaterOrEqual(t, tt.vals[idx], tt.geq, "vals[idx] >= geq") + if idx > 0 { + assert.Less(t, tt.vals[idx-1], tt.geq, "element before is < geq") + } + } + assert.Equal(t, tt.expectedIdx, idx) + }) + } +} + +func TestGallopSearchLeq(t *testing.T) { + tests := []struct { + name string + vals []uint32 + leq uint32 + expectedIdx int + expectedFound bool + }{ + { + name: "empty", + vals: nil, + leq: 5, + expectedIdx: 0, + expectedFound: false, + }, + { + name: "single_found", + vals: []uint32{5}, + leq: 10, + expectedIdx: 0, + expectedFound: true, + }, + { + name: "single_not_found", + vals: []uint32{5}, + leq: 3, + expectedIdx: 0, + expectedFound: false, + }, + { + name: "last_element_less", + vals: []uint32{1, 3, 5, 7, 9}, + leq: 10, + expectedIdx: 4, + expectedFound: true, + }, + { + name: "first_element_equal", + vals: []uint32{1, 3, 5, 7, 9}, + leq: 1, + expectedIdx: 0, + expectedFound: true, + }, + { + name: "mid_less", + vals: []uint32{1, 3, 5, 7, 9}, + leq: 6, + expectedIdx: 2, + expectedFound: true, + }, + { + name: "mid_equal", + vals: []uint32{1, 3, 5, 7, 9}, + leq: 5, + expectedIdx: 2, + expectedFound: true, + }, + { + name: "below_first", + vals: []uint32{1, 3, 5, 7, 9}, + leq: 0, + expectedIdx: 0, + expectedFound: false, + }, + { + name: "gallop_from_right_large", + vals: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + leq: 17, + expectedIdx: 16, + expectedFound: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + idx, ok := GallopSearchLeq(tt.vals, tt.leq) + assert.Equal(t, tt.expectedFound, ok, "found") + if tt.expectedFound { + require.Less(t, idx, len(tt.vals), "index in range") + assert.LessOrEqual(t, tt.vals[idx], tt.leq, "vals[idx] <= geq") + if idx < len(tt.vals)-1 { + assert.Greater(t, tt.vals[idx+1], tt.leq, "element after is > geq") + } + } + assert.Equal(t, tt.expectedIdx, idx, "index") + }) + } +} + +func pickRandom(from, to uint32) uint32 { + if from == to { + return from + } + span := to - from + 1 + if span == 0 { + return rand.Uint32() + } + return from + rand.Uint32()%span +} + +// TestGallopSearchGeqVsSortSearch uses both gallop search and ordinary bin search to find a random number in a slice, then compares +func TestGallopSearchGeqVsSortSearch(t *testing.T) { + const size = 100 + const numSearches = 50 + + vals := make([]uint32, size) + for i := range vals { + vals[i] = rand.Uint32() + } + sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] }) + + from, to := vals[0], vals[size-1] + for i := 0; i < numSearches; i++ { + x := pickRandom(from, to) + expectedIdx := sort.Search(size, func(i int) bool { return vals[i] >= x }) + expectedFound := expectedIdx < size + + idx, found := GallopSearchGeq(vals, x) + assert.Equal(t, expectedFound, found) + if expectedFound { + assert.Equal(t, expectedIdx, idx) + } + } +} + +// TestGallopSearchLeqVsSortSearch uses both gallop search and ordinary bin search to find a random number in a slice, then compares +func TestGallopSearchLeqVsSortSearch(t *testing.T) { + const size = 100 + const numSearches = 50 + + vals := make([]uint32, size) + for i := range vals { + vals[i] = rand.Uint32() + } + sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] }) + + from, to := vals[0], vals[size-1] + for i := 0; i < numSearches; i++ { + x := pickRandom(from, to) + refIdx := sort.Search(size, func(i int) bool { return vals[i] > x }) - 1 + refFound := refIdx >= 0 + + idx, found := GallopSearchLeq(vals, x) + assert.Equal(t, refFound, found, "x=%d", x) + if refFound { + assert.Equal(t, refIdx, idx, "x=%d", x) + } + } +} From 4a8237e25787e39eed1dc7e78cba2b2b29f15f53 Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Wed, 18 Feb 2026 13:17:06 +0400 Subject: [PATCH 2/7] fixes --- frac/processor/aggregator_test.go | 20 ++++++++------------ node/bench_test.go | 30 +++++++++++++++--------------- 2 files changed, 23 insertions(+), 27 deletions(-) diff --git a/frac/processor/aggregator_test.go b/frac/processor/aggregator_test.go index bd5e453e..908b9398 100644 --- a/frac/processor/aggregator_test.go +++ b/frac/processor/aggregator_test.go @@ -166,23 +166,19 @@ func (m *MockNode) String() string { return reflect.TypeOf(m).String() } -/*func (m *MockNode) NextSourced() (uint32, uint32, bool) { - return m.NextSourcedGeq(0) -} - -func (m *MockNode) NextSourcedGeq(minLID uint32) (uint32, uint32, bool) { - for len(m.Pairs) > 0 && m.Pairs[0].LID < minLID { - m.Pairs = m.Pairs[1:] - } +func (m *MockNode) NextSourced() (node.CmpLID, uint32) { if len(m.Pairs) == 0 { - return 0, 0, false + return node.NullCmpLID(), 0 } first := m.Pairs[0] m.Pairs = m.Pairs[1:] - return first.LID, first.Source, true -}*/ + return first.LID, first.Source +} -func (m *MockNode) NextSourced() (node.CmpLID, uint32) { +func (m *MockNode) NextSourcedGeq(minLID node.CmpLID) (node.CmpLID, uint32) { + for len(m.Pairs) > 0 && m.Pairs[0].LID.Less(minLID) { + m.Pairs = m.Pairs[1:] + } if len(m.Pairs) == 0 { return node.NullCmpLID(), 0 } diff --git a/node/bench_test.go b/node/bench_test.go index 8006a15c..d4e14e7a 100644 --- a/node/bench_test.go +++ b/node/bench_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" ) -func newNodeStaticSizeRand(size int) *staticAsc { +func newNodeStaticSize(size int) *staticAsc { data, _ := Generate(size) return &staticAsc{staticCursor: staticCursor{data: data}} } @@ -80,7 +80,7 @@ func BenchmarkOr(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s*2) - n := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) for b.Loop() { res = readAllInto(n, res) @@ -97,7 +97,7 @@ func BenchmarkAnd(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s) - n := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) for b.Loop() { res = readAllInto(n, res) @@ -114,7 +114,7 @@ func BenchmarkNAnd(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s) - n := NewNAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n := NewNAnd(newNodeStaticSize(s), newNodeStaticSize(s)) for b.Loop() { res = readAllInto(n, res) @@ -130,10 +130,10 @@ func BenchmarkAndTree(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { - n1 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n2 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n3 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n4 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n1 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n2 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n3 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n4 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) n12 := NewAnd(n1, n2) n34 := NewAnd(n3, n4) n := NewAnd(n12, n34) @@ -153,10 +153,10 @@ func BenchmarkOrTree(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { - n1 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n2 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n3 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n4 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n1 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) + n2 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) + n3 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) + n4 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) n12 := NewOr(n1, n2) n34 := NewOr(n3, n4) n := NewOr(n12, n34) @@ -214,9 +214,9 @@ func BenchmarkComplex(b *testing.B) { for _, s := range sizes { b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) { res := make([]uint32, 0, s*2) - n1 := NewAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n2 := NewOr(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) - n3 := NewNAnd(newNodeStaticSizeRand(s), newNodeStaticSizeRand(s)) + n1 := NewAnd(newNodeStaticSize(s), newNodeStaticSize(s)) + n2 := NewOr(newNodeStaticSize(s), newNodeStaticSize(s)) + n3 := NewNAnd(newNodeStaticSize(s), newNodeStaticSize(s)) n12 := NewOr(n1, n2) n := NewAnd(n12, n3) From e2e89f800a15a2ea5cc94de7e9c88bdf419572cc Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Wed, 18 Feb 2026 13:20:30 +0400 Subject: [PATCH 3/7] implement String --- node/cmp_lid.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/node/cmp_lid.go b/node/cmp_lid.go index f27c8314..ba382aac 100644 --- a/node/cmp_lid.go +++ b/node/cmp_lid.go @@ -1,6 +1,14 @@ package node -import "math" +import ( + "fmt" + "math" +) + +const ( + DescMask = uint32(0) + AscMask = uint32(0xFFFFFFFF) +) // CmpLID is an encoded representation of LID and reverse flag made specifically for fast compare operations. // @@ -21,16 +29,15 @@ func NullCmpLID() CmpLID { func NewCmpLIDOrderDesc(lid uint32) CmpLID { return CmpLID{ lid: lid, - mask: uint32(0), + mask: DescMask, } } // NewCmpLIDOrderAsc returns LIDs for asc sort order func NewCmpLIDOrderAsc(lid uint32) CmpLID { - mask := uint32(0xFFFFFFFF) return CmpLID{ - lid: lid ^ mask, - mask: mask, + lid: lid ^ AscMask, + mask: AscMask, } } @@ -85,3 +92,7 @@ func (c CmpLID) Unpack() uint32 { func (c CmpLID) IsNull() bool { return c.lid == math.MaxUint32 } + +func (c CmpLID) String() string { + return fmt.Sprintf("%d, reverse=%t", c.Unpack(), c.mask == AscMask) +} From 1e942d0fce2b976d2a6cd23d8047f069fd602bf0 Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Wed, 18 Feb 2026 15:23:33 +0400 Subject: [PATCH 4/7] fix --- node/node_or.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/node_or.go b/node/node_or.go index 4b08f139..ef70b763 100644 --- a/node/node_or.go +++ b/node/node_or.go @@ -61,8 +61,9 @@ func (n *nodeOr) Next() CmpLID { } func (n *nodeOr) NextGeq(nextID CmpLID) CmpLID { - // fast path: if we have both branches and nothing to skip, then choose lowest and return - if !n.leftID.IsNull() && !n.rightID.IsNull() && nextID.Less(Min(n.leftID, n.rightID)) { + // Fast path: if we at least left or right and there is nothing to skip, then choose lowest and return. + minID := Min(n.leftID, n.rightID) + if nextID.LessOrEq(minID) { if n.leftID.Less(n.rightID) { cur := n.leftID n.readLeft() @@ -79,7 +80,6 @@ func (n *nodeOr) NextGeq(nextID CmpLID) CmpLID { return cur } - // skip past nextID if n.leftID.Less(nextID) { n.readLeftGeq(nextID) } From f62e2abbcfb2b127ce5b155d9d4b461f7888b54f Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Mon, 2 Mar 2026 08:42:02 +0300 Subject: [PATCH 5/7] remove TODO --- node/node_and.go | 1 - 1 file changed, 1 deletion(-) diff --git a/node/node_and.go b/node/node_and.go index 332f8353..0b42aac3 100644 --- a/node/node_and.go +++ b/node/node_and.go @@ -58,7 +58,6 @@ func (n *nodeAnd) Next() LID { } func (n *nodeAnd) NextGeq(nextID LID) LID { - // TODO first skip not interesting values, then call Next() for !n.leftID.IsNull() && !n.rightID.IsNull() && !n.leftID.Eq(n.rightID) { for !n.rightID.IsNull() && n.leftID.Less(n.rightID) { n.readLeftGeq(Max(n.rightID, nextID)) From f6ac1eb47a5adf6e184e4f44bf2a32658dbed70b Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Mon, 2 Mar 2026 08:56:59 +0300 Subject: [PATCH 6/7] rename node.CmpLID => node.LID --- frac/processor/aggregator_test.go | 4 +-- frac/sealed/lids/iterator_asc.go | 6 ++-- frac/sealed/lids/iterator_desc.go | 6 ++-- node/cmp_lid.go | 8 ++++++ node/node_and_test.go | 10 +++---- node/node_nand.go | 2 +- node/node_or_test.go | 48 +++++++++++++++---------------- node/node_range.go | 2 +- node/node_static.go | 14 ++++----- node/node_static_test.go | 24 ++++++++-------- node/sourced_node_wrapper.go | 2 +- 11 files changed, 67 insertions(+), 59 deletions(-) diff --git a/frac/processor/aggregator_test.go b/frac/processor/aggregator_test.go index 0dbafef6..48099922 100644 --- a/frac/processor/aggregator_test.go +++ b/frac/processor/aggregator_test.go @@ -175,12 +175,12 @@ func (m *MockNode) NextSourced() (node.LID, uint32) { return first.LID, first.Source } -func (m *MockNode) NextSourcedGeq(minLID node.CmpLID) (node.CmpLID, uint32) { +func (m *MockNode) NextSourcedGeq(minLID node.LID) (node.LID, uint32) { for len(m.Pairs) > 0 && m.Pairs[0].LID.Less(minLID) { m.Pairs = m.Pairs[1:] } if len(m.Pairs) == 0 { - return node.NullCmpLID(), 0 + return node.NullLID(), 0 } first := m.Pairs[0] m.Pairs = m.Pairs[1:] diff --git a/frac/sealed/lids/iterator_asc.go b/frac/sealed/lids/iterator_asc.go index b509f41b..9ff6fa4f 100644 --- a/frac/sealed/lids/iterator_asc.go +++ b/frac/sealed/lids/iterator_asc.go @@ -75,11 +75,11 @@ func (it *IteratorAsc) Next() node.LID { } // NextGeq returns the next (in reverse iteration order) LID that is <= maxLID. -func (it *IteratorAsc) NextGeq(nextID node.CmpLID) node.CmpLID { +func (it *IteratorAsc) NextGeq(nextID node.LID) node.LID { for { for len(it.lids) == 0 { if !it.tryNextBlock { - return node.NewCmpLIDOrderAsc(0) + return node.NewLIDOrderAsc(0) } it.loadNextLIDsBlock() @@ -97,7 +97,7 @@ func (it *IteratorAsc) NextGeq(nextID node.CmpLID) node.CmpLID { if found { lid := it.lids[idx] it.lids = it.lids[:idx] - return node.NewCmpLIDOrderAsc(lid) + return node.NewLIDOrderAsc(lid) } it.lids = it.lids[:0] diff --git a/frac/sealed/lids/iterator_desc.go b/frac/sealed/lids/iterator_desc.go index 630a00c6..d9a1fc26 100644 --- a/frac/sealed/lids/iterator_desc.go +++ b/frac/sealed/lids/iterator_desc.go @@ -75,11 +75,11 @@ func (it *IteratorDesc) Next() node.LID { } // NextGeq finds next greater or equal -func (it *IteratorDesc) NextGeq(nextID node.CmpLID) node.CmpLID { +func (it *IteratorDesc) NextGeq(nextID node.LID) node.LID { for { for len(it.lids) == 0 { if !it.tryNextBlock { - return node.NewCmpLIDOrderDesc(math.MaxUint32) + return node.NewLIDOrderDesc(math.MaxUint32) } it.loadNextLIDsBlock() // last chunk in block but not last for tid; need load next block @@ -98,7 +98,7 @@ func (it *IteratorDesc) NextGeq(nextID node.CmpLID) node.CmpLID { it.lids = it.lids[idx:] lid := it.lids[0] it.lids = it.lids[1:] - return node.NewCmpLIDOrderDesc(lid) + return node.NewLIDOrderDesc(lid) } it.lids = it.lids[:0] diff --git a/node/cmp_lid.go b/node/cmp_lid.go index b68625e7..ffe094f9 100644 --- a/node/cmp_lid.go +++ b/node/cmp_lid.go @@ -41,6 +41,14 @@ func NewLIDOrderAsc(lid uint32) LID { } } +func NewLID(lid uint32, reverse bool) LID { + if reverse { + return NewLIDOrderAsc(lid) + } else { + return NewLIDOrderDesc(lid) + } +} + // Less compares two values. It also does an implicit null check, since we store math.MaxUint32 for null values. // Which means if we call x.Less(y), then we now for sure that x is not null. Therefore, this Less call can work // as both "null check + less" combo. diff --git a/node/node_and_test.go b/node/node_and_test.go index 7a737263..0d4f41f3 100644 --- a/node/node_and_test.go +++ b/node/node_and_test.go @@ -16,16 +16,16 @@ func TestNodeAnd_NextGeqAscending(t *testing.T) { // Currently, nodes instantiate their state on creation, which will be fixed later. // Thus, the first LID returned is the first from left and right - id := node.NextGeq(NewCmpLIDOrderDesc(7)) + id := node.NextGeq(NewLIDOrderDesc(7)) assert.Equal(t, uint32(1), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(7)) + id = node.NextGeq(NewLIDOrderDesc(7)) assert.Equal(t, uint32(7), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(50)) + id = node.NextGeq(NewLIDOrderDesc(50)) assert.Equal(t, uint32(80), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(50)) + id = node.NextGeq(NewLIDOrderDesc(50)) assert.True(t, id.IsNull()) } @@ -53,7 +53,7 @@ func TestNodeAnd_NextGeqCompatibility(t *testing.T) { for { lid := node.Next() - lidGeq := nodeGeq.NextGeq(NewCmpLID(zero, rev)) + lidGeq := nodeGeq.NextGeq(NewLID(zero, rev)) assert.Equal(t, lid, lidGeq) diff --git a/node/node_nand.go b/node/node_nand.go index a2de19d1..d4a9bd73 100644 --- a/node/node_nand.go +++ b/node/node_nand.go @@ -44,6 +44,6 @@ func (n *nodeNAnd) Next() LID { return NullLID() } -func (n *nodeNAnd) NextGeq(nextID CmpLID) CmpLID { +func (n *nodeNAnd) NextGeq(nextID LID) LID { return n.Next() } diff --git a/node/node_or_test.go b/node/node_or_test.go index f5bf1fd3..e37b032f 100644 --- a/node/node_or_test.go +++ b/node/node_or_test.go @@ -15,19 +15,19 @@ func TestNodeOr_NextGeqAscending(t *testing.T) { node := NewOr(left, right) - id := node.NextGeq(NewCmpLIDOrderDesc(7)) + id := node.NextGeq(NewLIDOrderDesc(7)) assert.Equal(t, uint32(7), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(7)) + id = node.NextGeq(NewLIDOrderDesc(7)) assert.Equal(t, uint32(9), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(24)) + id = node.NextGeq(NewLIDOrderDesc(24)) assert.Equal(t, uint32(25), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(30)) + id = node.NextGeq(NewLIDOrderDesc(30)) assert.Equal(t, uint32(30), id.Unpack()) - id = node.NextGeq(NewCmpLIDOrderDesc(51)) + id = node.NextGeq(NewLIDOrderDesc(51)) assert.True(t, id.IsNull()) } @@ -55,7 +55,7 @@ func TestNodeOr_NextGeqCompatibility(t *testing.T) { for { lid := node.Next() - lidGeq := nodeGeq.NextGeq(NewCmpLID(zero, rev)) + lidGeq := nodeGeq.NextGeq(NewLID(zero, rev)) assert.Equal(t, lid, lidGeq) @@ -71,7 +71,7 @@ func TestNodeOrAgg_NoDedup(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 5, 7}, false), 1) right := NewSourcedNodeWrapper(NewStatic([]uint32{5, 8}, false), 2) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) pairs := readAllSourced(orAgg) // expected sources for lid=5 @@ -92,7 +92,7 @@ func TestNodeOrAgg_MergeAscending(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 3, 5}, false), 0) right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 4, 6}, false), 1) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) got := readAllSourced(orAgg) want := [][2]uint32{ @@ -111,7 +111,7 @@ func TestNodeOrAgg_MergeAscendingWithDups(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 2, 3, 5, 8}, false), 0) right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 3, 4, 6, 8}, false), 1) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) got := readAllSourced(orAgg) want := [][2]uint32{ @@ -136,22 +136,22 @@ func TestNodeOrAgg_NextSourcedGeq(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 2, 3, 5, 8, 15, 19}, false), 0) right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 3, 4, 6, 8, 14, 20}, false), 1) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) - id, source := orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(3)) + id, source := orAgg.NextSourcedGeq(NewLIDOrderDesc(3)) assert.Equal(t, uint32(3), id.Unpack()) assert.Equal(t, uint32(1), source) // 3 returned again, but with different source - no deduplication - id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(3)) + id, source = orAgg.NextSourcedGeq(NewLIDOrderDesc(3)) assert.Equal(t, uint32(3), id.Unpack()) assert.Equal(t, uint32(0), source) - id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(6)) + id, source = orAgg.NextSourcedGeq(NewLIDOrderDesc(6)) assert.Equal(t, uint32(6), id.Unpack()) assert.Equal(t, uint32(1), source) - id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderDesc(17)) + id, source = orAgg.NextSourcedGeq(NewLIDOrderDesc(17)) assert.Equal(t, uint32(19), id.Unpack()) assert.Equal(t, uint32(0), source) } @@ -162,26 +162,26 @@ func TestNodeOrAgg_NextSourcedGeq_Reverse(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 2, 3, 5, 8, 15, 19}, true), 0) right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 3, 4, 6, 8, 14, 20}, true), 1) - orAgg := NewNodeOrAgg(left, right, true) + orAgg := NewNodeOrAgg(left, right) - id, source := orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(8)) + id, source := orAgg.NextSourcedGeq(NewLIDOrderAsc(8)) assert.Equal(t, uint32(8), id.Unpack()) assert.Equal(t, uint32(1), source) // 8 returned again, but with different source - no deduplication - id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(8)) + id, source = orAgg.NextSourcedGeq(NewLIDOrderAsc(8)) assert.Equal(t, uint32(8), id.Unpack()) assert.Equal(t, uint32(0), source) - id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(4)) + id, source = orAgg.NextSourcedGeq(NewLIDOrderAsc(4)) assert.Equal(t, uint32(4), id.Unpack()) assert.Equal(t, uint32(1), source) - id, source = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(1)) + id, source = orAgg.NextSourcedGeq(NewLIDOrderAsc(1)) assert.Equal(t, uint32(1), id.Unpack()) assert.Equal(t, uint32(0), source) - id, _ = orAgg.NextSourcedGeq(NewCmpLIDOrderAsc(1)) + id, _ = orAgg.NextSourcedGeq(NewLIDOrderAsc(1)) assert.True(t, id.IsNull()) } @@ -189,7 +189,7 @@ func TestNodeOrAgg_MergeDescending(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{1, 3, 5}, true), 0) right := NewSourcedNodeWrapper(NewStatic([]uint32{2, 4, 6}, true), 1) - orAgg := NewNodeOrAgg(left, right, true) + orAgg := NewNodeOrAgg(left, right) got := readAllSourced(orAgg) want := [][2]uint32{ @@ -209,7 +209,7 @@ func TestNodeOrAgg_EmptySide(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic(nil, false), 0) right := NewSourcedNodeWrapper(NewStatic([]uint32{10, 20}, false), 1) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) got := readAllSourced(orAgg) want := [][2]uint32{ @@ -224,7 +224,7 @@ func TestNodeOrAgg_EmptySide(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic([]uint32{10, 20}, false), 0) right := NewSourcedNodeWrapper(NewStatic(nil, false), 1) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) got := readAllSourced(orAgg) want := [][2]uint32{ @@ -239,7 +239,7 @@ func TestNodeOrAgg_EmptySide(t *testing.T) { left := NewSourcedNodeWrapper(NewStatic(nil, false), 0) right := NewSourcedNodeWrapper(NewStatic(nil, false), 1) - orAgg := NewNodeOrAgg(left, right, false) + orAgg := NewNodeOrAgg(left, right) id, _ := orAgg.NextSourced() assert.True(t, id.IsNull()) diff --git a/node/node_range.go b/node/node_range.go index ed5ce921..8ebaca3b 100644 --- a/node/node_range.go +++ b/node/node_range.go @@ -25,6 +25,6 @@ func (n *nodeRange) Next() LID { return result } -func (n *nodeRange) NextGeq(nextID CmpLID) CmpLID { +func (n *nodeRange) NextGeq(nextID LID) LID { return n.Next() } diff --git a/node/node_static.go b/node/node_static.go index bb79b38d..d4c61954 100644 --- a/node/node_static.go +++ b/node/node_static.go @@ -51,19 +51,19 @@ func (n *staticAsc) Next() LID { // NextGeq finds next greater or equals since iteration is in ascending order func (n *staticAsc) NextGeq(nextID LID) LID { if n.ptr >= len(n.data) { - return NewCmpLIDOrderDesc(math.MaxUint32) + return NewLIDOrderDesc(math.MaxUint32) } from := n.ptr idx, found := util.GallopSearchGeq(n.data[from:], nextID.Unpack()) if !found { - return NewCmpLIDOrderDesc(math.MaxUint32) + return NewLIDOrderDesc(math.MaxUint32) } i := from + idx cur := n.data[i] n.ptr = i + 1 - return NewCmpLIDOrderDesc(cur) + return NewLIDOrderDesc(cur) } func (n *staticDesc) Next() LID { @@ -77,18 +77,18 @@ func (n *staticDesc) Next() LID { } // NextGeq finds next less or equals since iteration is in descending order -func (n *staticDesc) NextGeq(nextID LID) CmpLID { +func (n *staticDesc) NextGeq(nextID LID) LID { if n.ptr < 0 { - return NewCmpLIDOrderAsc(0) + return NewLIDOrderAsc(0) } idx, found := util.GallopSearchLeq(n.data[:n.ptr+1], nextID.Unpack()) if !found { - return NewCmpLIDOrderAsc(0) + return NewLIDOrderAsc(0) } cur := n.data[idx] n.ptr = idx - 1 - return NewCmpLIDOrderAsc(cur) + return NewLIDOrderAsc(cur) } // MakeStaticNodes is currently used only for tests diff --git a/node/node_static_test.go b/node/node_static_test.go index 56d2a0b2..98ec8bfb 100644 --- a/node/node_static_test.go +++ b/node/node_static_test.go @@ -10,20 +10,20 @@ func TestStaticAscNextGeq(t *testing.T) { lids := []uint32{1, 3, 5, 7, 9} n := NewStatic(lids, false).(*staticAsc) - id := n.NextGeq(NewCmpLIDOrderDesc(0)) + id := n.NextGeq(NewLIDOrderDesc(0)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(1), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(4)) + id = n.NextGeq(NewLIDOrderDesc(4)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(5), id.Unpack()) // 5 has already been returned, so the next value >= 5 is 7. - id = n.NextGeq(NewCmpLIDOrderDesc(5)) + id = n.NextGeq(NewLIDOrderDesc(5)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(7), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(10)) + id = n.NextGeq(NewLIDOrderDesc(10)) assert.True(t, id.IsNull()) } @@ -31,15 +31,15 @@ func TestStaticDescNextGeq(t *testing.T) { lids := []uint32{1, 3, 5, 7, 9} n := NewStatic(lids, true).(*staticDesc) - id := n.NextGeq(NewCmpLIDOrderDesc(10)) + id := n.NextGeq(NewLIDOrderDesc(10)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(9), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(10)) + id = n.NextGeq(NewLIDOrderDesc(10)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(7), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(10)) + id = n.NextGeq(NewLIDOrderDesc(10)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(5), id.Unpack()) } @@ -48,22 +48,22 @@ func TestStaticDescNextGeq_WithThreshold(t *testing.T) { lids := []uint32{1, 3, 5, 7, 9} n := NewStatic(lids, true).(*staticDesc) - id := n.NextGeq(NewCmpLIDOrderDesc(8)) + id := n.NextGeq(NewLIDOrderDesc(8)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(7), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(8)) + id = n.NextGeq(NewLIDOrderDesc(8)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(5), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(8)) + id = n.NextGeq(NewLIDOrderDesc(8)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(3), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(8)) + id = n.NextGeq(NewLIDOrderDesc(8)) assert.False(t, id.IsNull()) assert.Equal(t, uint32(1), id.Unpack()) - id = n.NextGeq(NewCmpLIDOrderDesc(8)) + id = n.NextGeq(NewLIDOrderDesc(8)) assert.True(t, id.IsNull()) } diff --git a/node/sourced_node_wrapper.go b/node/sourced_node_wrapper.go index 34384a57..7cc60015 100644 --- a/node/sourced_node_wrapper.go +++ b/node/sourced_node_wrapper.go @@ -14,7 +14,7 @@ func (w *sourcedNodeWrapper) NextSourced() (LID, uint32) { return cmp, w.source } -func (w *sourcedNodeWrapper) NextSourcedGeq(nextID CmpLID) (CmpLID, uint32) { +func (w *sourcedNodeWrapper) NextSourcedGeq(nextID LID) (LID, uint32) { id := w.node.NextGeq(nextID) return id, w.source } From 35219c8ce3454a554c4729f5fb19ffd3ea884443 Mon Sep 17 00:00:00 2001 From: Andrei Cheboksarov <37665782+cheb0@users.noreply.github.com> Date: Mon, 2 Mar 2026 08:59:29 +0300 Subject: [PATCH 7/7] rename node.CmpLID => node.LID (filename) --- node/node_and.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/node_and.go b/node/node_and.go index 39fa02f0..7c7d4aa8 100644 --- a/node/node_and.go +++ b/node/node_and.go @@ -49,7 +49,7 @@ func (n *nodeAnd) Next() LID { } } if n.leftID.IsNull() || n.rightID.IsNull() { - return NullCmpLID() + return NullLID() } cur := n.leftID n.readLeft()