removed data pt size

This commit is contained in:
Doruk 2025-06-25 16:44:40 +02:00
parent 60d879f131
commit 49e181e71e
2 changed files with 18 additions and 22 deletions

View file

@ -902,16 +902,6 @@ class UptimeCalculator {
// Since data is sorted, we can optimize by tracking current bucket index
let currentBucketIndex = 0;
// Calculate data point size in minutes based on the data type
let dataPointSizeMinutes;
if (days <= 1) {
dataPointSizeMinutes = 1; // Minutely data
} else if (days <= 30) {
dataPointSizeMinutes = 60; // Hourly data
} else {
dataPointSizeMinutes = 60 * 24; // Daily data
}
for (const [ timestamp, dataPoint ] of Object.entries(availableData)) {
const timestampNum = parseInt(timestamp);
@ -929,16 +919,20 @@ class UptimeCalculator {
const bucket = buckets[currentBucketIndex];
if (timestampNum >= bucket.start && timestampNum < bucket.end) {
// Calculate scale factor to prevent double-counting when data points span multiple buckets
const scaleFactor = Math.min(1.0, bucketSizeMinutes / dataPointSizeMinutes);
// FIXME: This accounting is flawed when data points span multiple buckets.
// The correct approach would be to:
// 1. Add only the portion of the data point that fits within the current bucket
// 2. Push the remainder to the next bucket (if it exists)
// For now, we add the full data point to avoid complexity, which may cause
// some overcounting when bucket size < data point size.
bucket.up += (dataPoint.up || 0) * scaleFactor;
bucket.down += (dataPoint.down || 0) * scaleFactor;
bucket.up += (dataPoint.up || 0);
bucket.down += (dataPoint.down || 0);
if (days > 30) {
// Daily data includes maintenance and pending
bucket.maintenance += (dataPoint.maintenance || 0) * scaleFactor;
bucket.pending += (dataPoint.pending || 0) * scaleFactor;
bucket.maintenance += (dataPoint.maintenance || 0);
bucket.pending += (dataPoint.pending || 0);
} else {
// Minute/hourly data doesn't track maintenance/pending separately
bucket.maintenance += 0;

View file

@ -612,7 +612,7 @@ test("Test getAggregatedBuckets - 31-63 day edge case (daily data)", async (t) =
// Reset to current time
UptimeCalculator.currentDate = currentTime;
// Test 35-day range with buckets that match data granularity (no scale factor)
// Test 35-day range with buckets that match data granularity
let buckets = c.getAggregatedBuckets(35, 35); // 35 days with 35 buckets = 1 day per bucket
assert.strictEqual(buckets.length, 35);
@ -799,17 +799,19 @@ test("Test getAggregatedBuckets - Scale factor prevents over-counting", async (t
}
UptimeCalculator.currentDate = currentTime;
// Test: When buckets are smaller than data granularity, scale factor should reduce counts
// Test: When buckets are smaller than data granularity, may cause overcounting
// FIXME: This test reflects the current flawed behavior where data points may be counted
// multiple times when they span multiple buckets. See the FIXME in getAggregatedBuckets.
let smallBuckets = c.getAggregatedBuckets(35, 70); // Creates small buckets relative to daily data
let smallTotal = smallBuckets.reduce((sum, b) => sum + b.up, 0);
// Test: When buckets match data granularity, no scaling should occur
// Test: When buckets match data granularity, each data point is counted once
let normalBuckets = c.getAggregatedBuckets(4, 4); // 1 bucket per day
let normalTotal = normalBuckets.reduce((sum, b) => sum + b.up, 0);
// Scale factor should reduce the count when buckets are smaller
assert.ok(smallTotal < normalTotal, "Scale factor should reduce counts when buckets are smaller than data points");
assert.ok(normalTotal >= 3, "Should capture most of the data points without scaling");
// Without proper scaling, all data points are counted in their respective buckets
assert.ok(smallTotal >= normalTotal, "Without scaling, counts should be at least equal");
assert.ok(normalTotal >= 3, "Should capture most of the data points");
});
test("Test getAggregatedBuckets - Mixed data granularity", async (t) => {