mirror of
https://github.com/louislam/uptime-kuma.git
synced 2025-07-18 23:34:04 +02:00
reverted bucketing as requested & adapted tests
This commit is contained in:
parent
49e181e71e
commit
c780c2ae8b
2 changed files with 29 additions and 41 deletions
|
@ -899,44 +899,32 @@ class UptimeCalculator {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate available data into buckets
|
// Aggregate available data into buckets
|
||||||
// Since data is sorted, we can optimize by tracking current bucket index
|
|
||||||
let currentBucketIndex = 0;
|
|
||||||
|
|
||||||
for (const [ timestamp, dataPoint ] of Object.entries(availableData)) {
|
for (const [ timestamp, dataPoint ] of Object.entries(availableData)) {
|
||||||
const timestampNum = parseInt(timestamp);
|
const timestampNum = parseInt(timestamp);
|
||||||
|
|
||||||
// Move to the correct bucket (since data is sorted, we only need to move forward)
|
// Find the appropriate bucket for this data point
|
||||||
while (currentBucketIndex < buckets.length &&
|
// For daily data (> 30 days), timestamps are at start of day
|
||||||
timestampNum >= buckets[currentBucketIndex].end) {
|
// We need to find which bucket this day belongs to
|
||||||
currentBucketIndex++;
|
for (let i = 0; i < buckets.length; i++) {
|
||||||
}
|
const bucket = buckets[i];
|
||||||
|
|
||||||
// Check if we're within a valid bucket
|
if (days > 30) {
|
||||||
// currentBucketIndex can be >= buckets.length when we have data points
|
// For daily data, check if the timestamp falls within the bucket's day range
|
||||||
// that are newer than our last bucket's end time (e.g., very recent data
|
if (timestampNum >= bucket.start && timestampNum < bucket.end) {
|
||||||
// that falls outside our calculated time range)
|
bucket.up += dataPoint.up || 0;
|
||||||
if (currentBucketIndex < buckets.length) {
|
bucket.down += dataPoint.down || 0;
|
||||||
const bucket = buckets[currentBucketIndex];
|
bucket.maintenance += dataPoint.maintenance || 0;
|
||||||
|
bucket.pending += dataPoint.pending || 0;
|
||||||
if (timestampNum >= bucket.start && timestampNum < bucket.end) {
|
break;
|
||||||
// FIXME: This accounting is flawed when data points span multiple buckets.
|
}
|
||||||
// The correct approach would be to:
|
} else {
|
||||||
// 1. Add only the portion of the data point that fits within the current bucket
|
// For minute/hourly data, use exact timestamp matching
|
||||||
// 2. Push the remainder to the next bucket (if it exists)
|
if (timestampNum >= bucket.start && timestampNum < bucket.end && dataPoint) {
|
||||||
// For now, we add the full data point to avoid complexity, which may cause
|
bucket.up += dataPoint.up || 0;
|
||||||
// some overcounting when bucket size < data point size.
|
bucket.down += dataPoint.down || 0;
|
||||||
|
bucket.maintenance += 0; // UptimeCalculator treats maintenance as up
|
||||||
bucket.up += (dataPoint.up || 0);
|
bucket.pending += 0; // UptimeCalculator doesn't track pending separately
|
||||||
bucket.down += (dataPoint.down || 0);
|
break;
|
||||||
|
|
||||||
if (days > 30) {
|
|
||||||
// Daily data includes maintenance and pending
|
|
||||||
bucket.maintenance += (dataPoint.maintenance || 0);
|
|
||||||
bucket.pending += (dataPoint.pending || 0);
|
|
||||||
} else {
|
|
||||||
// Minute/hourly data doesn't track maintenance/pending separately
|
|
||||||
bucket.maintenance += 0;
|
|
||||||
bucket.pending += 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -787,7 +787,7 @@ test("Test getAggregatedBuckets - Data granularity transitions", async (t) => {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
test("Test getAggregatedBuckets - Scale factor prevents over-counting", async (t) => {
|
test("Test getAggregatedBuckets - Break statements prevent double-counting", async (t) => {
|
||||||
UptimeCalculator.currentDate = dayjs.utc("2025-08-12 12:00:00");
|
UptimeCalculator.currentDate = dayjs.utc("2025-08-12 12:00:00");
|
||||||
let c = new UptimeCalculator();
|
let c = new UptimeCalculator();
|
||||||
let currentTime = dayjs.utc("2025-08-12 12:00:00");
|
let currentTime = dayjs.utc("2025-08-12 12:00:00");
|
||||||
|
@ -799,18 +799,18 @@ test("Test getAggregatedBuckets - Scale factor prevents over-counting", async (t
|
||||||
}
|
}
|
||||||
UptimeCalculator.currentDate = currentTime;
|
UptimeCalculator.currentDate = currentTime;
|
||||||
|
|
||||||
// Test: When buckets are smaller than data granularity, may cause overcounting
|
// Test: Each data point should only be counted in one bucket (using break statements)
|
||||||
// FIXME: This test reflects the current flawed behavior where data points may be counted
|
// Use the same time range for both tests to ensure fair comparison
|
||||||
// multiple times when they span multiple buckets. See the FIXME in getAggregatedBuckets.
|
let smallBuckets = c.getAggregatedBuckets(4, 8); // Creates smaller buckets within same 4-day range
|
||||||
let smallBuckets = c.getAggregatedBuckets(35, 70); // Creates small buckets relative to daily data
|
|
||||||
let smallTotal = smallBuckets.reduce((sum, b) => sum + b.up, 0);
|
let smallTotal = smallBuckets.reduce((sum, b) => sum + b.up, 0);
|
||||||
|
|
||||||
// Test: When buckets match data granularity, each data point is counted once
|
// Test: When buckets match data granularity, each data point is counted once
|
||||||
let normalBuckets = c.getAggregatedBuckets(4, 4); // 1 bucket per day
|
let normalBuckets = c.getAggregatedBuckets(4, 4); // 1 bucket per day
|
||||||
let normalTotal = normalBuckets.reduce((sum, b) => sum + b.up, 0);
|
let normalTotal = normalBuckets.reduce((sum, b) => sum + b.up, 0);
|
||||||
|
|
||||||
// Without proper scaling, all data points are counted in their respective buckets
|
// With proper break statements, each data point is counted exactly once regardless of bucket size
|
||||||
assert.ok(smallTotal >= normalTotal, "Without scaling, counts should be at least equal");
|
// when using the same time range
|
||||||
|
assert.strictEqual(smallTotal, normalTotal, "Data points should be counted exactly once regardless of bucket size within same time range");
|
||||||
assert.ok(normalTotal >= 3, "Should capture most of the data points");
|
assert.ok(normalTotal >= 3, "Should capture most of the data points");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue