Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
147 changes: 147 additions & 0 deletions src/utils/__tests__/calculateAdaptiveWeights.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
import { expect, test } from 'vitest';

import { calculateAdaptiveWeights } from '../calculateAdaptiveWeights.ts';

test('basic functionality with default options', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1.1, 2.1, 3.1, 4.1, 5.1]);
const weights = new Float64Array([1, 1, 1, 1, 1]);

const result = calculateAdaptiveWeights(yData, baseline, weights, {});

expect(result).toBeInstanceOf(Float64Array);
expect(result).toHaveLength(5);
// First and last weights should be 1
expect(result[0]).toBe(1);
expect(result[4]).toBe(1);
// Middle weights should be updated
expect(result[1]).toBeLessThan(1);
expect(result[2]).toBeLessThan(1);
expect(result[3]).toBeLessThan(1);
});

test('learning rate = 0 returns same weights', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1.1, 2.1, 3.1, 4.1, 5.1]);
const weights = new Float64Array([1, 1, 1, 1, 1]);

const result = calculateAdaptiveWeights(yData, baseline, weights, {
learningRate: 0,
});

expect(result).toStrictEqual(weights);
});

test('high learning rate affects weights more', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1.1, 2.1, 3.1, 4.1, 5.1]);
const weights1 = new Float64Array([1, 1, 1, 1, 1]);
const weights2 = new Float64Array([1, 1, 1, 1, 1]);

calculateAdaptiveWeights(yData, baseline, weights1, { learningRate: 0.3 });
calculateAdaptiveWeights(yData, baseline, weights2, { learningRate: 0.8 });

// Higher learning rate should result in more different weights from original
for (let i = 1; i < 4; i++) {
expect(Math.abs(1 - weights2[i])).toBeGreaterThan(
Math.abs(1 - weights1[i]),
);
}
});

test('custom factorStd affects weight threshold', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1.1, 2.1, 3.1, 4.1, 5.1]);
const weights1 = new Float64Array([1, 1, 1, 1, 1]);
const weights2 = new Float64Array([1, 1, 1, 1, 1]);

calculateAdaptiveWeights(yData, baseline, weights1, { factorStd: 2 });
calculateAdaptiveWeights(yData, baseline, weights2, { factorStd: 5 });

// Different factorStd should produce different results
let different = false;
for (let i = 1; i < 4; i++) {
if (weights1[i] !== weights2[i]) {
different = true;
break;
}
}

expect(different).toBe(true);
});

test('control points increase weights', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1.1, 2.1, 3.1, 4.1, 5.1]);
const weights1 = new Float64Array([1, 1, 1, 1, 1]);
const weights2 = new Float64Array([1, 1, 1, 1, 1]);

calculateAdaptiveWeights(yData, baseline, weights1, {});
calculateAdaptiveWeights(yData, baseline, weights2, {
controlPoints: new Float64Array([0, 1, 0, 1, 0]),
});

// Weights at control points (index 1 and 3) should be higher
expect(weights2[1]).toBeGreaterThan(weights1[1]);
expect(weights2[3]).toBeGreaterThan(weights1[3]);
// Other weights should remain similar
expect(weights2[2]).toBeCloseTo(weights1[2], 3);
});

test('perfect fit (baseline equals yData)', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1, 2, 3, 4, 5]);
const weights = new Float64Array([1, 1, 1, 1, 1]);

const result = calculateAdaptiveWeights(yData, baseline, weights, {});

expect(result).toBeInstanceOf(Float64Array);
expect(result[0]).toBe(1);
expect(result[4]).toBe(1);

// With perfect fit, weights should be high (close to 1 after normalization)
for (let i = 1; i < 4; i++) {
expect(result[i]).toBeGreaterThan(0.2);
}
});

test('large residuals reduce weights', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1, 2, 10, 4, 5]); // Large residual at index 2
const weights = new Float64Array([1, 1, 1, 1, 1]);

const result = calculateAdaptiveWeights(yData, baseline, weights, {});

expect(result[0]).toBe(1);
expect(result[4]).toBe(1);
// The weight at the large residual point should be reduced
expect(result[2]).toBeLessThan(result[1]);
});

test('modifies input weights array', () => {
const yData = new Float64Array([1, 2, 3, 4, 5]);
const baseline = new Float64Array([1.1, 2.1, 3.1, 4.1, 5.1]);
const weights = new Float64Array([1, 1, 1, 1, 1]);

const result = calculateAdaptiveWeights(yData, baseline, weights, {});

// Should return the same array (modified in place)
expect(result).toStrictEqual(weights);
});

test('works with different array types', () => {
const yData = [1, 2, 3, 4, 5];
const baseline = [1.1, 2.1, 3.1, 4.1, 5.1];
const weights = [1, 1, 1, 1, 1];

const result = calculateAdaptiveWeights(
yData as any,
baseline as any,
weights as any,
{},
);

expect(result).toBeDefined();
expect(result[0]).toBe(1);
expect(result[4]).toBe(1);
});
46 changes: 20 additions & 26 deletions src/utils/calculateAdaptiveWeights.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,7 @@ export interface CalculateAdaptiveWeightsOptions extends WeightsAndControlPoints
* @default 0.5
*/
learningRate?: number;
/**
* The minimum allowed weight value to prevent weights from becoming too small.
* @default 0.01
*/
minWeight?: number;

/**
* Factor used to calculate the threshold for determining outliers in the residuals.
* Higher values mean more tolerance for outliers. The default value is based on noise follow the normal distribution
Expand Down Expand Up @@ -55,37 +51,35 @@ export function calculateAdaptiveWeights(
weights: NumberArray,
options: CalculateAdaptiveWeightsOptions,
) {
const {
controlPoints,
factorStd = 3,
learningRate = 0.5,
minWeight = 0.01,
} = options;
const { controlPoints, factorStd = 3, learningRate = 0.5 } = options;

if (learningRate === 0) {
return weights;
}

const absResiduals = xAbsolute(xSubtract(yData, baseline));

const medAbsRes = xMedian(absResiduals);
const mad = 1.4826 * medAbsRes;
const threshold = factorStd * mad;
const threshold = mad > 0 ? factorStd * mad : 1;

const rawWeights = new Float64Array(absResiduals.length);
for (let i = 0; i < absResiduals.length; i++) {
rawWeights[i] = Math.exp(-((absResiduals[i] / threshold) ** 2));
}

let maxWeight = Number.MIN_SAFE_INTEGER;
const newWeights = Float64Array.from(weights);
const oneMinusLearningRate = 1 - learningRate;
for (let i = 0; i < newWeights.length; i++) {
if (controlPoints && controlPoints[i] > 0) continue;
const weight = Math.max(
minWeight,
oneMinusLearningRate * weights[i] + learningRate * rawWeights[i],
);
newWeights[i] = weight;
maxWeight = Math.max(maxWeight, weight);
}
newWeights[0] = maxWeight;
newWeights[weights.length - 1] = maxWeight;
for (let i = 0; i < weights.length; i++) {
let weight = weights[i];
weight = (oneMinusLearningRate * weight + learningRate * rawWeights[i]) / 4;

return newWeights;
if (controlPoints && controlPoints[i] > 0) {
weight *= 4;
}

weights[i] = weight;
}
weights[0] = 1;
weights[weights.length - 1] = 1;
return weights;
}
8 changes: 0 additions & 8 deletions src/x/xWhittakerSmoother.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,6 @@ export interface XWhittakerSmootherOptions extends CalculateAdaptiveWeightsOptio
* @default 0.5
*/
learningRate?: number;

/**
* Minimum weight value to avoid division by zero or extremely small weights.
* @default 0.01
*/
minWeight?: number;
}

/**
Expand All @@ -59,7 +53,6 @@ export function xWhittakerSmoother(
tolerance = 1e-6,
factorStd = 3,
learningRate = 0.5,
minWeight = 0.01,
} = options;

const size = yData.length;
Expand Down Expand Up @@ -89,7 +82,6 @@ export function xWhittakerSmoother(

weights = calculateAdaptiveWeights(yData, newBaseline, weights, {
controlPoints,
minWeight,
learningRate,
factorStd,
});
Expand Down
Loading