mirror of
https://github.com/louislam/uptime-kuma.git
synced 2025-07-18 23:34:04 +02:00
reverted bucketing as requested & adapted tests
This commit is contained in:
parent
49e181e71e
commit
c780c2ae8b
2 changed files with 29 additions and 41 deletions
|
@ -899,44 +899,32 @@ class UptimeCalculator {
|
|||
}
|
||||
|
||||
// Aggregate available data into buckets
|
||||
// Since data is sorted, we can optimize by tracking current bucket index
|
||||
let currentBucketIndex = 0;
|
||||
|
||||
for (const [ timestamp, dataPoint ] of Object.entries(availableData)) {
|
||||
const timestampNum = parseInt(timestamp);
|
||||
|
||||
// Move to the correct bucket (since data is sorted, we only need to move forward)
|
||||
while (currentBucketIndex < buckets.length &&
|
||||
timestampNum >= buckets[currentBucketIndex].end) {
|
||||
currentBucketIndex++;
|
||||
}
|
||||
|
||||
// Check if we're within a valid bucket
|
||||
// currentBucketIndex can be >= buckets.length when we have data points
|
||||
// that are newer than our last bucket's end time (e.g., very recent data
|
||||
// that falls outside our calculated time range)
|
||||
if (currentBucketIndex < buckets.length) {
|
||||
const bucket = buckets[currentBucketIndex];
|
||||
|
||||
if (timestampNum >= bucket.start && timestampNum < bucket.end) {
|
||||
// FIXME: This accounting is flawed when data points span multiple buckets.
|
||||
// The correct approach would be to:
|
||||
// 1. Add only the portion of the data point that fits within the current bucket
|
||||
// 2. Push the remainder to the next bucket (if it exists)
|
||||
// For now, we add the full data point to avoid complexity, which may cause
|
||||
// some overcounting when bucket size < data point size.
|
||||
|
||||
bucket.up += (dataPoint.up || 0);
|
||||
bucket.down += (dataPoint.down || 0);
|
||||
// Find the appropriate bucket for this data point
|
||||
// For daily data (> 30 days), timestamps are at start of day
|
||||
// We need to find which bucket this day belongs to
|
||||
for (let i = 0; i < buckets.length; i++) {
|
||||
const bucket = buckets[i];
|
||||
|
||||
if (days > 30) {
|
||||
// Daily data includes maintenance and pending
|
||||
bucket.maintenance += (dataPoint.maintenance || 0);
|
||||
bucket.pending += (dataPoint.pending || 0);
|
||||
// For daily data, check if the timestamp falls within the bucket's day range
|
||||
if (timestampNum >= bucket.start && timestampNum < bucket.end) {
|
||||
bucket.up += dataPoint.up || 0;
|
||||
bucket.down += dataPoint.down || 0;
|
||||
bucket.maintenance += dataPoint.maintenance || 0;
|
||||
bucket.pending += dataPoint.pending || 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// Minute/hourly data doesn't track maintenance/pending separately
|
||||
bucket.maintenance += 0;
|
||||
bucket.pending += 0;
|
||||
// For minute/hourly data, use exact timestamp matching
|
||||
if (timestampNum >= bucket.start && timestampNum < bucket.end && dataPoint) {
|
||||
bucket.up += dataPoint.up || 0;
|
||||
bucket.down += dataPoint.down || 0;
|
||||
bucket.maintenance += 0; // UptimeCalculator treats maintenance as up
|
||||
bucket.pending += 0; // UptimeCalculator doesn't track pending separately
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -787,7 +787,7 @@ test("Test getAggregatedBuckets - Data granularity transitions", async (t) => {
|
|||
}
|
||||
});
|
||||
|
||||
test("Test getAggregatedBuckets - Scale factor prevents over-counting", async (t) => {
|
||||
test("Test getAggregatedBuckets - Break statements prevent double-counting", async (t) => {
|
||||
UptimeCalculator.currentDate = dayjs.utc("2025-08-12 12:00:00");
|
||||
let c = new UptimeCalculator();
|
||||
let currentTime = dayjs.utc("2025-08-12 12:00:00");
|
||||
|
@ -799,18 +799,18 @@ test("Test getAggregatedBuckets - Scale factor prevents over-counting", async (t
|
|||
}
|
||||
UptimeCalculator.currentDate = currentTime;
|
||||
|
||||
// Test: When buckets are smaller than data granularity, may cause overcounting
|
||||
// FIXME: This test reflects the current flawed behavior where data points may be counted
|
||||
// multiple times when they span multiple buckets. See the FIXME in getAggregatedBuckets.
|
||||
let smallBuckets = c.getAggregatedBuckets(35, 70); // Creates small buckets relative to daily data
|
||||
// Test: Each data point should only be counted in one bucket (using break statements)
|
||||
// Use the same time range for both tests to ensure fair comparison
|
||||
let smallBuckets = c.getAggregatedBuckets(4, 8); // Creates smaller buckets within same 4-day range
|
||||
let smallTotal = smallBuckets.reduce((sum, b) => sum + b.up, 0);
|
||||
|
||||
// Test: When buckets match data granularity, each data point is counted once
|
||||
let normalBuckets = c.getAggregatedBuckets(4, 4); // 1 bucket per day
|
||||
let normalTotal = normalBuckets.reduce((sum, b) => sum + b.up, 0);
|
||||
|
||||
// Without proper scaling, all data points are counted in their respective buckets
|
||||
assert.ok(smallTotal >= normalTotal, "Without scaling, counts should be at least equal");
|
||||
// With proper break statements, each data point is counted exactly once regardless of bucket size
|
||||
// when using the same time range
|
||||
assert.strictEqual(smallTotal, normalTotal, "Data points should be counted exactly once regardless of bucket size within same time range");
|
||||
assert.ok(normalTotal >= 3, "Should capture most of the data points");
|
||||
});
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue