mirror of
https://github.com/louislam/uptime-kuma.git
synced 2025-06-14 16:42:35 +02:00
add support for MySQL,
convert database migrations to knex, rewrite complex queries to knex,
This commit is contained in:
parent
0ca68f791f
commit
9c069deb5d
28 changed files with 337 additions and 714 deletions
BIN
db/kuma.db
BIN
db/kuma.db
Binary file not shown.
|
@ -1,7 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE user
|
||||
ADD twofa_last_token VARCHAR(6);
|
||||
|
||||
COMMIT;
|
|
@ -1,10 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE user
|
||||
ADD twofa_secret VARCHAR(64);
|
||||
|
||||
ALTER TABLE user
|
||||
ADD twofa_status BOOLEAN default 0 NOT NULL;
|
||||
|
||||
COMMIT;
|
|
@ -1,7 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD retry_interval INTEGER default 0 not null;
|
||||
|
||||
COMMIT;
|
|
@ -1,30 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table `group`
|
||||
(
|
||||
id INTEGER not null
|
||||
constraint group_pk
|
||||
primary key autoincrement,
|
||||
name VARCHAR(255) not null,
|
||||
created_date DATETIME default (DATETIME('now')) not null,
|
||||
public BOOLEAN default 0 not null,
|
||||
active BOOLEAN default 1 not null,
|
||||
weight BOOLEAN NOT NULL DEFAULT 1000
|
||||
);
|
||||
|
||||
CREATE TABLE [monitor_group]
|
||||
(
|
||||
[id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||
[monitor_id] INTEGER NOT NULL REFERENCES [monitor] ([id]) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
[group_id] INTEGER NOT NULL REFERENCES [group] ([id]) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
weight BOOLEAN NOT NULL DEFAULT 1000
|
||||
);
|
||||
|
||||
CREATE INDEX [fk]
|
||||
ON [monitor_group] (
|
||||
[monitor_id],
|
||||
[group_id]);
|
||||
|
||||
|
||||
COMMIT;
|
|
@ -1,13 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD method TEXT default 'GET' not null;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD body TEXT default null;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD headers TEXT default null;
|
||||
|
||||
COMMIT;
|
|
@ -1,10 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
-- For sendHeartbeatList
|
||||
CREATE INDEX monitor_time_index ON heartbeat (monitor_id, time);
|
||||
|
||||
-- For sendImportantHeartbeatList
|
||||
CREATE INDEX monitor_important_time_index ON heartbeat (monitor_id, important,time);
|
||||
|
||||
COMMIT;
|
|
@ -1,18 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table incident
|
||||
(
|
||||
id INTEGER not null
|
||||
constraint incident_pk
|
||||
primary key autoincrement,
|
||||
title VARCHAR(255) not null,
|
||||
content TEXT not null,
|
||||
style VARCHAR(30) default 'warning' not null,
|
||||
created_date DATETIME default (DATETIME('now')) not null,
|
||||
last_updated_date DATETIME,
|
||||
pin BOOLEAN default 1 not null,
|
||||
active BOOLEAN default 1 not null
|
||||
);
|
||||
|
||||
COMMIT;
|
|
@ -1,10 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD basic_auth_user TEXT default null;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD basic_auth_pass TEXT default null;
|
||||
|
||||
COMMIT;
|
|
@ -1,7 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD push_token VARCHAR(20) DEFAULT NULL;
|
||||
|
||||
COMMIT;
|
|
@ -1,18 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
CREATE TABLE [notification_sent_history] (
|
||||
[id] INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||
[type] VARCHAR(50) NOT NULL,
|
||||
[monitor_id] INTEGER NOT NULL,
|
||||
[days] INTEGER NOT NULL,
|
||||
UNIQUE([type], [monitor_id], [days])
|
||||
);
|
||||
|
||||
CREATE INDEX [good_index] ON [notification_sent_history] (
|
||||
[type],
|
||||
[monitor_id],
|
||||
[days]
|
||||
);
|
||||
|
||||
COMMIT;
|
|
@ -1,22 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
-- Generated by Intellij IDEA
|
||||
create table setting_dg_tmp
|
||||
(
|
||||
id INTEGER
|
||||
primary key autoincrement,
|
||||
key VARCHAR(200) not null
|
||||
unique,
|
||||
value TEXT,
|
||||
type VARCHAR(20)
|
||||
);
|
||||
|
||||
insert into setting_dg_tmp(id, key, value, type) select id, key, value, type from setting;
|
||||
|
||||
drop table setting;
|
||||
|
||||
alter table setting_dg_tmp rename to setting;
|
||||
|
||||
|
||||
COMMIT;
|
|
@ -1,37 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
-- Change Monitor.created_date from "TIMESTAMP" to "DATETIME"
|
||||
-- SQL Generated by Intellij Idea
|
||||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table monitor_dg_tmp
|
||||
(
|
||||
id INTEGER not null
|
||||
primary key autoincrement,
|
||||
name VARCHAR(150),
|
||||
active BOOLEAN default 1 not null,
|
||||
user_id INTEGER
|
||||
references user
|
||||
on update cascade on delete set null,
|
||||
interval INTEGER default 20 not null,
|
||||
url TEXT,
|
||||
type VARCHAR(20),
|
||||
weight INTEGER default 2000,
|
||||
hostname VARCHAR(255),
|
||||
port INTEGER,
|
||||
created_date DATETIME,
|
||||
keyword VARCHAR(255)
|
||||
);
|
||||
|
||||
insert into monitor_dg_tmp(id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword from monitor;
|
||||
|
||||
drop table monitor;
|
||||
|
||||
alter table monitor_dg_tmp rename to monitor;
|
||||
|
||||
create index user_id on monitor (user_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
|
@ -1,19 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
CREATE TABLE tag (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
color VARCHAR(255) NOT NULL,
|
||||
created_date DATETIME DEFAULT (DATETIME('now')) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE monitor_tag (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
monitor_id INTEGER NOT NULL,
|
||||
tag_id INTEGER NOT NULL,
|
||||
value TEXT,
|
||||
CONSTRAINT FK_tag FOREIGN KEY (tag_id) REFERENCES tag(id) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
CONSTRAINT FK_monitor FOREIGN KEY (monitor_id) REFERENCES monitor(id) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX monitor_tag_monitor_id_index ON monitor_tag (monitor_id);
|
||||
CREATE INDEX monitor_tag_tag_id_index ON monitor_tag (tag_id);
|
|
@ -1,9 +0,0 @@
|
|||
BEGIN TRANSACTION;
|
||||
|
||||
CREATE TABLE monitor_tls_info (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
monitor_id INTEGER NOT NULL,
|
||||
info_json TEXT
|
||||
);
|
||||
|
||||
COMMIT;
|
|
@ -1,37 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
-- Add maxretries column to monitor
|
||||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table monitor_dg_tmp
|
||||
(
|
||||
id INTEGER not null
|
||||
primary key autoincrement,
|
||||
name VARCHAR(150),
|
||||
active BOOLEAN default 1 not null,
|
||||
user_id INTEGER
|
||||
references user
|
||||
on update cascade on delete set null,
|
||||
interval INTEGER default 20 not null,
|
||||
url TEXT,
|
||||
type VARCHAR(20),
|
||||
weight INTEGER default 2000,
|
||||
hostname VARCHAR(255),
|
||||
port INTEGER,
|
||||
created_date DATETIME,
|
||||
keyword VARCHAR(255),
|
||||
maxretries INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
insert into monitor_dg_tmp(id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword from monitor;
|
||||
|
||||
drop table monitor;
|
||||
|
||||
alter table monitor_dg_tmp rename to monitor;
|
||||
|
||||
create index user_id on monitor (user_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
|
@ -1,40 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
-- OK.... serious wrong, missing maxretries column
|
||||
-- Developers should patch it manually if you have missing the maxretries column
|
||||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table monitor_dg_tmp
|
||||
(
|
||||
id INTEGER not null
|
||||
primary key autoincrement,
|
||||
name VARCHAR(150),
|
||||
active BOOLEAN default 1 not null,
|
||||
user_id INTEGER
|
||||
references user
|
||||
on update cascade on delete set null,
|
||||
interval INTEGER default 20 not null,
|
||||
url TEXT,
|
||||
type VARCHAR(20),
|
||||
weight INTEGER default 2000,
|
||||
hostname VARCHAR(255),
|
||||
port INTEGER,
|
||||
created_date DATETIME,
|
||||
keyword VARCHAR(255),
|
||||
maxretries INTEGER NOT NULL DEFAULT 0,
|
||||
ignore_tls BOOLEAN default 0 not null,
|
||||
upside_down BOOLEAN default 0 not null
|
||||
);
|
||||
|
||||
insert into monitor_dg_tmp(id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword, maxretries) select id, name, active, user_id, interval, url, type, weight, hostname, port, created_date, keyword, maxretries from monitor;
|
||||
|
||||
drop table monitor;
|
||||
|
||||
alter table monitor_dg_tmp rename to monitor;
|
||||
|
||||
create index user_id on monitor (user_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
|
@ -1,70 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
PRAGMA foreign_keys = off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table monitor_dg_tmp (
|
||||
id INTEGER not null primary key autoincrement,
|
||||
name VARCHAR(150),
|
||||
active BOOLEAN default 1 not null,
|
||||
user_id INTEGER references user on update cascade on delete
|
||||
set
|
||||
null,
|
||||
interval INTEGER default 20 not null,
|
||||
url TEXT,
|
||||
type VARCHAR(20),
|
||||
weight INTEGER default 2000,
|
||||
hostname VARCHAR(255),
|
||||
port INTEGER,
|
||||
created_date DATETIME default (DATETIME('now')) not null,
|
||||
keyword VARCHAR(255),
|
||||
maxretries INTEGER NOT NULL DEFAULT 0,
|
||||
ignore_tls BOOLEAN default 0 not null,
|
||||
upside_down BOOLEAN default 0 not null
|
||||
);
|
||||
|
||||
insert into
|
||||
monitor_dg_tmp(
|
||||
id,
|
||||
name,
|
||||
active,
|
||||
user_id,
|
||||
interval,
|
||||
url,
|
||||
type,
|
||||
weight,
|
||||
hostname,
|
||||
port,
|
||||
keyword,
|
||||
maxretries,
|
||||
ignore_tls,
|
||||
upside_down
|
||||
)
|
||||
select
|
||||
id,
|
||||
name,
|
||||
active,
|
||||
user_id,
|
||||
interval,
|
||||
url,
|
||||
type,
|
||||
weight,
|
||||
hostname,
|
||||
port,
|
||||
keyword,
|
||||
maxretries,
|
||||
ignore_tls,
|
||||
upside_down
|
||||
from
|
||||
monitor;
|
||||
|
||||
drop table monitor;
|
||||
|
||||
alter table
|
||||
monitor_dg_tmp rename to monitor;
|
||||
|
||||
create index user_id on monitor (user_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys = on;
|
|
@ -1,74 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
PRAGMA foreign_keys = off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
create table monitor_dg_tmp (
|
||||
id INTEGER not null primary key autoincrement,
|
||||
name VARCHAR(150),
|
||||
active BOOLEAN default 1 not null,
|
||||
user_id INTEGER references user on update cascade on delete
|
||||
set
|
||||
null,
|
||||
interval INTEGER default 20 not null,
|
||||
url TEXT,
|
||||
type VARCHAR(20),
|
||||
weight INTEGER default 2000,
|
||||
hostname VARCHAR(255),
|
||||
port INTEGER,
|
||||
created_date DATETIME default (DATETIME('now')) not null,
|
||||
keyword VARCHAR(255),
|
||||
maxretries INTEGER NOT NULL DEFAULT 0,
|
||||
ignore_tls BOOLEAN default 0 not null,
|
||||
upside_down BOOLEAN default 0 not null,
|
||||
maxredirects INTEGER default 10 not null,
|
||||
accepted_statuscodes_json TEXT default '["200-299"]' not null
|
||||
);
|
||||
|
||||
insert into
|
||||
monitor_dg_tmp(
|
||||
id,
|
||||
name,
|
||||
active,
|
||||
user_id,
|
||||
interval,
|
||||
url,
|
||||
type,
|
||||
weight,
|
||||
hostname,
|
||||
port,
|
||||
created_date,
|
||||
keyword,
|
||||
maxretries,
|
||||
ignore_tls,
|
||||
upside_down
|
||||
)
|
||||
select
|
||||
id,
|
||||
name,
|
||||
active,
|
||||
user_id,
|
||||
interval,
|
||||
url,
|
||||
type,
|
||||
weight,
|
||||
hostname,
|
||||
port,
|
||||
created_date,
|
||||
keyword,
|
||||
maxretries,
|
||||
ignore_tls,
|
||||
upside_down
|
||||
from
|
||||
monitor;
|
||||
|
||||
drop table monitor;
|
||||
|
||||
alter table
|
||||
monitor_dg_tmp rename to monitor;
|
||||
|
||||
create index user_id on monitor (user_id);
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys = on;
|
|
@ -1,10 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD dns_resolve_type VARCHAR(5);
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD dns_resolve_server VARCHAR(255);
|
||||
|
||||
COMMIT;
|
|
@ -1,7 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE monitor
|
||||
ADD dns_last_result VARCHAR(255);
|
||||
|
||||
COMMIT;
|
|
@ -1,7 +0,0 @@
|
|||
-- You should not modify if this have pushed to Github, unless it does serious wrong with the db.
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE notification
|
||||
ADD is_default BOOLEAN default 0 NOT NULL;
|
||||
|
||||
COMMIT;
|
66
knexfile.js
Normal file
66
knexfile.js
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Update with your config settings.
|
||||
|
||||
const dbType = process.env.DB_TYPE || 'sqlite3';
|
||||
const dbHost = process.env.DB_HOST;
|
||||
const dbName = process.env.DB_NAME;
|
||||
const dbUser = process.env.DB_USER;
|
||||
const dbPass = process.env.DB_PASS;
|
||||
|
||||
let database;
|
||||
|
||||
switch (dbType) {
|
||||
case 'sqlite3':
|
||||
const dialect = require("knex/lib/dialects/sqlite3/index.js");
|
||||
dialect.prototype._driver = () => require("@louislam/sqlite3");
|
||||
|
||||
database = {
|
||||
client: dialect,
|
||||
connection: {
|
||||
filename: './data/kuma.db',
|
||||
acquireConnectionTimeout: 120 * 1000,
|
||||
},
|
||||
useNullAsDefault: true,
|
||||
pool: {
|
||||
min: 1,
|
||||
max: 1,
|
||||
idleTimeoutMillis: 120 * 1000,
|
||||
propagateCreateError: false,
|
||||
acquireTimeoutMillis: 120 * 1000,
|
||||
},
|
||||
migrations: {
|
||||
tableName: 'knex_migrations'
|
||||
}
|
||||
};
|
||||
break;
|
||||
|
||||
case 'mysql':
|
||||
|
||||
database = {
|
||||
client: "mysql",
|
||||
connection: {
|
||||
host: dbHost,
|
||||
user: dbUser,
|
||||
database: dbName,
|
||||
password: dbPass,
|
||||
}
|
||||
};
|
||||
break;
|
||||
}
|
||||
|
||||
function setPath(path) {
|
||||
if (dbType !== 'sqlite')
|
||||
return;
|
||||
|
||||
database.connection.filename = path;
|
||||
}
|
||||
|
||||
function getDialect() {
|
||||
return dbType;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
development: database,
|
||||
production: database,
|
||||
setPath: setPath,
|
||||
getDialect: getDialect,
|
||||
};
|
172
migrations/20211218111510_init.js
Normal file
172
migrations/20211218111510_init.js
Normal file
|
@ -0,0 +1,172 @@
|
|||
exports.up = function(knex) {
|
||||
return knex.schema.createTable('setting', function(table) {
|
||||
table.increments('id');
|
||||
table.string('key', 200).notNullable();
|
||||
table.string('value', 200);
|
||||
table.string('type', 20);
|
||||
|
||||
table.unique('key')
|
||||
}).then(() =>
|
||||
knex.schema.createTable('user', function(table) {
|
||||
table.increments('id');
|
||||
table.string('username', 255).notNullable();
|
||||
table.string('password', 255);
|
||||
table.boolean('active').notNullable().defaultTo(true);
|
||||
table.string('timezone', 150);
|
||||
table.string('twofa_secret', 64);
|
||||
table.boolean('twofa_status').notNullable().defaultTo(false);
|
||||
table.string('twofa_last_token', 6);
|
||||
|
||||
table.unique('username');
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('notification', function(table) {
|
||||
table.increments('id');
|
||||
table.string('name', 255).notNullable();
|
||||
table.text('config');
|
||||
table.boolean('active').notNullable().defaultTo(true);
|
||||
table.integer('user_id', 10).unsigned().references('user.id').onUpdate('CASCADE').onDelete('SET NULL');
|
||||
table.boolean('is_default').notNullable().defaultTo(false);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('monitor', function(table) {
|
||||
table.increments('id');
|
||||
table.string('name', 150).notNullable();
|
||||
table.integer('user_id', 10).unsigned().references('user.id').onUpdate('CASCADE').onDelete('SET NULL');
|
||||
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
|
||||
|
||||
table.boolean('active').notNullable().defaultTo(true);
|
||||
table.integer('interval').notNullable().defaultTo(20);
|
||||
table.string('type', 20);
|
||||
table.string('url');
|
||||
table.string('hostname', 255);
|
||||
table.integer('port');
|
||||
|
||||
table.integer('weight').defaultTo(2000);
|
||||
table.string('keyword', 255);
|
||||
|
||||
table.boolean('ignore_tls').notNullable().defaultTo(false);
|
||||
table.boolean('upside_down').notNullable().defaultTo(false);
|
||||
table.integer('maxretries').notNullable().defaultTo(0);
|
||||
table.integer('maxredirects').notNullable().defaultTo(10);
|
||||
table.string('accepted_statuscodes_json').notNullable().defaultTo('["200-299"]');
|
||||
|
||||
table.string('dns_resolve_type', 5);
|
||||
table.string('dns_resolve_server', 255);
|
||||
table.string('dns_last_result', 255);
|
||||
|
||||
table.integer('retry_interval').notNullable().defaultTo(0);
|
||||
table.string('push_token', 20);
|
||||
|
||||
table.string('method').notNullable().defaultTo('GET');
|
||||
table.text('body');
|
||||
table.text('headers');
|
||||
table.string('basic_auth_user');
|
||||
table.string('basic_auth_pass');
|
||||
|
||||
table.index(['user_id']);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('incident', function(table) {
|
||||
table.increments('id');
|
||||
table.string('title', 255).notNullable();
|
||||
table.string('content');
|
||||
table.string('style', 30).notNullable().defaultTo('warning');
|
||||
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
|
||||
table.datetime('last_updated_date');
|
||||
table.boolean('pin').notNullable().defaultTo(true);
|
||||
table.boolean('active').notNullable().defaultTo(true);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('group', function(table) {
|
||||
table.increments('id');
|
||||
table.string('name', 255).notNullable();
|
||||
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
|
||||
|
||||
table.boolean('public').notNullable().defaultTo(false);
|
||||
table.boolean('active').notNullable().defaultTo(true);
|
||||
table.integer('weight').notNullable().defaultTo(1000);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('tag', function(table) {
|
||||
table.increments('id');
|
||||
table.string('name', 255).notNullable();
|
||||
table.string('color', 255).notNullable();
|
||||
table.datetime('created_date').notNullable().defaultTo(knex.fn.now());
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('monitor_tls_info', function(table) {
|
||||
table.increments('id');
|
||||
table.integer('monitor_id', 10).unsigned().notNullable().references('monitor.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.text('info_json');
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('notification_sent_history', function(table) {
|
||||
table.increments('id');
|
||||
table.string('type', 50);
|
||||
table.integer('monitor_id', 10).unsigned().notNullable().references('monitor.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.integer('days').notNullable();
|
||||
|
||||
table.unique(['type', 'monitor_id', 'days']);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('heartbeat', function(table) {
|
||||
table.increments('id');
|
||||
table.boolean('important').notNullable().defaultTo(false);
|
||||
table.integer('monitor_id', 10).unsigned().notNullable().references('monitor.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.integer('status').notNullable();
|
||||
table.text('msg');
|
||||
table.datetime('time').notNullable();
|
||||
table.integer('ping');
|
||||
table.integer('duration').notNullable().defaultTo(0);
|
||||
|
||||
table.index(['monitor_id', 'time'], 'monitor_time_index');
|
||||
table.index(['monitor_id', 'important', 'time'], 'monitor_important_time_index');
|
||||
table.index(['monitor_id']);
|
||||
table.index(['important']);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('monitor_notification', function(table) {
|
||||
table.increments('id');
|
||||
table.integer('monitor_id', 10).unsigned().notNullable().references('monitor.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.integer('notification_id', 10).unsigned().notNullable().references('notification.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
|
||||
table.index(['monitor_id', 'notification_id']);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('monitor_group', function(table) {
|
||||
table.increments('id');
|
||||
table.integer('monitor_id', 10).unsigned().notNullable().references('monitor.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.integer('group_id', 10).unsigned().notNullable().references('group.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.integer('weight').notNullable().defaultTo(1000);
|
||||
|
||||
table.index(['monitor_id', 'group_id']);
|
||||
})
|
||||
).then(() =>
|
||||
knex.schema.createTable('monitor_tag', function(table) {
|
||||
table.increments('id');
|
||||
table.integer('monitor_id', 10).unsigned().notNullable().references('monitor.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.integer('tag_id', 10).unsigned().notNullable().references('tag.id').onUpdate('CASCADE').onDelete('CASCADE');
|
||||
table.string('value');
|
||||
|
||||
table.index(['monitor_id']);
|
||||
table.index(['tag_id']);
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
exports.down = function(knex) {
|
||||
return knex.schema.dropTable('monitor_tag')
|
||||
.then(() => knex.schema.dropTable('monitor_group'))
|
||||
.then(() => knex.schema.dropTable('monitor_notification'))
|
||||
.then(() => knex.schema.dropTable('heartbeat'))
|
||||
.then(() => knex.schema.dropTable('notification_sent_history'))
|
||||
.then(() => knex.schema.dropTable('monitor_tls_info'))
|
||||
.then(() => knex.schema.dropTable('tag'))
|
||||
.then(() => knex.schema.dropTable('group'))
|
||||
.then(() => knex.schema.dropTable('incident'))
|
||||
.then(() => knex.schema.dropTable('monitor'))
|
||||
.then(() => knex.schema.dropTable('notification'))
|
||||
.then(() => knex.schema.dropTable('user'))
|
||||
.then(() => knex.schema.dropTable('setting'))
|
||||
};
|
|
@ -10,8 +10,6 @@ const knex = require("knex");
|
|||
*/
|
||||
class Database {
|
||||
|
||||
static templatePath = "./db/kuma.db";
|
||||
|
||||
/**
|
||||
* Data Dir (Default: ./data)
|
||||
*/
|
||||
|
@ -41,19 +39,19 @@ class Database {
|
|||
* false: Do nothing
|
||||
* { parents: []}: Need parents before add it
|
||||
*/
|
||||
static patchList = {
|
||||
"patch-setting-value-type.sql": true,
|
||||
"patch-improve-performance.sql": true,
|
||||
"patch-2fa.sql": true,
|
||||
"patch-add-retry-interval-monitor.sql": true,
|
||||
"patch-incident-table.sql": true,
|
||||
"patch-group-table.sql": true,
|
||||
"patch-monitor-push_token.sql": true,
|
||||
"patch-http-monitor-method-body-and-headers.sql": true,
|
||||
"patch-2fa-invalidate-used-token.sql": true,
|
||||
"patch-notification_sent_history.sql": true,
|
||||
"patch-monitor-basic-auth.sql": true,
|
||||
}
|
||||
// static patchList = {
|
||||
// "patch-setting-value-type.sql": true,
|
||||
// "patch-improve-performance.sql": true,
|
||||
// "patch-2fa.sql": true,
|
||||
// "patch-add-retry-interval-monitor.sql": true,
|
||||
// "patch-incident-table.sql": true,
|
||||
// "patch-group-table.sql": true,
|
||||
// "patch-monitor-push_token.sql": true,
|
||||
// "patch-http-monitor-method-body-and-headers.sql": true,
|
||||
// "patch-2fa-invalidate-used-token.sql": true,
|
||||
// "patch-notification_sent_history.sql": true,
|
||||
// "patch-monitor-basic-auth.sql": true,
|
||||
// }
|
||||
|
||||
/**
|
||||
* The final version should be 10 after merged tag feature
|
||||
|
@ -81,26 +79,14 @@ class Database {
|
|||
}
|
||||
|
||||
static async connect(testMode = false) {
|
||||
const acquireConnectionTimeout = 120 * 1000;
|
||||
const knexConfig = require('../knexfile.js');
|
||||
knexConfig.setPath(Database.path);
|
||||
|
||||
const Dialect = require("knex/lib/dialects/sqlite3/index.js");
|
||||
Dialect.prototype._driver = () => require("@louislam/sqlite3");
|
||||
Database.dialect = knexConfig.getDialect();
|
||||
|
||||
const knexInstance = knex({
|
||||
client: Dialect,
|
||||
connection: {
|
||||
filename: Database.path,
|
||||
acquireConnectionTimeout: acquireConnectionTimeout,
|
||||
},
|
||||
useNullAsDefault: true,
|
||||
pool: {
|
||||
min: 1,
|
||||
max: 1,
|
||||
idleTimeoutMillis: 120 * 1000,
|
||||
propagateCreateError: false,
|
||||
acquireTimeoutMillis: acquireConnectionTimeout,
|
||||
}
|
||||
});
|
||||
const knexInstance = knex(knexConfig['development']);
|
||||
|
||||
await knexInstance.migrate.latest();
|
||||
|
||||
R.setup(knexInstance);
|
||||
|
||||
|
@ -112,6 +98,7 @@ class Database {
|
|||
R.freeze(true);
|
||||
await R.autoloadModels("./server/model");
|
||||
|
||||
if (Database.dialect == "sqlite3") {
|
||||
await R.exec("PRAGMA foreign_keys = ON");
|
||||
if (testMode) {
|
||||
// Change to MEMORY
|
||||
|
@ -128,158 +115,6 @@ class Database {
|
|||
console.log(await R.getAll("PRAGMA cache_size"));
|
||||
console.log("SQLite Version: " + await R.getCell("SELECT sqlite_version()"));
|
||||
}
|
||||
|
||||
static async patch() {
|
||||
let version = parseInt(await setting("database_version"));
|
||||
|
||||
if (! version) {
|
||||
version = 0;
|
||||
}
|
||||
|
||||
console.info("Your database version: " + version);
|
||||
console.info("Latest database version: " + this.latestVersion);
|
||||
|
||||
if (version === this.latestVersion) {
|
||||
console.info("Database patch not needed");
|
||||
} else if (version > this.latestVersion) {
|
||||
console.info("Warning: Database version is newer than expected");
|
||||
} else {
|
||||
console.info("Database patch is needed");
|
||||
|
||||
this.backup(version);
|
||||
|
||||
// Try catch anything here, if gone wrong, restore the backup
|
||||
try {
|
||||
for (let i = version + 1; i <= this.latestVersion; i++) {
|
||||
const sqlFile = `./db/patch${i}.sql`;
|
||||
console.info(`Patching ${sqlFile}`);
|
||||
await Database.importSQLFile(sqlFile);
|
||||
console.info(`Patched ${sqlFile}`);
|
||||
await setSetting("database_version", i);
|
||||
}
|
||||
} catch (ex) {
|
||||
await Database.close();
|
||||
|
||||
console.error(ex);
|
||||
console.error("Start Uptime-Kuma failed due to issue patching the database");
|
||||
console.error("Please submit a bug report if you still encounter the problem after restart: https://github.com/louislam/uptime-kuma/issues");
|
||||
|
||||
this.restore();
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
await this.patch2();
|
||||
}
|
||||
|
||||
/**
|
||||
* Call it from patch() only
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
static async patch2() {
|
||||
console.log("Database Patch 2.0 Process");
|
||||
let databasePatchedFiles = await setting("databasePatchedFiles");
|
||||
|
||||
if (! databasePatchedFiles) {
|
||||
databasePatchedFiles = {};
|
||||
}
|
||||
|
||||
debug("Patched files:");
|
||||
debug(databasePatchedFiles);
|
||||
|
||||
try {
|
||||
for (let sqlFilename in this.patchList) {
|
||||
await this.patch2Recursion(sqlFilename, databasePatchedFiles);
|
||||
}
|
||||
|
||||
if (this.patched) {
|
||||
console.log("Database Patched Successfully");
|
||||
}
|
||||
|
||||
} catch (ex) {
|
||||
await Database.close();
|
||||
|
||||
console.error(ex);
|
||||
console.error("Start Uptime-Kuma failed due to issue patching the database");
|
||||
console.error("Please submit the bug report if you still encounter the problem after restart: https://github.com/louislam/uptime-kuma/issues");
|
||||
|
||||
this.restore();
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
await setSetting("databasePatchedFiles", databasePatchedFiles);
|
||||
}
|
||||
|
||||
/**
|
||||
* Used it patch2() only
|
||||
* @param sqlFilename
|
||||
* @param databasePatchedFiles
|
||||
*/
|
||||
static async patch2Recursion(sqlFilename, databasePatchedFiles) {
|
||||
let value = this.patchList[sqlFilename];
|
||||
|
||||
if (! value) {
|
||||
console.log(sqlFilename + " skip");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if patched
|
||||
if (! databasePatchedFiles[sqlFilename]) {
|
||||
console.log(sqlFilename + " is not patched");
|
||||
|
||||
if (value.parents) {
|
||||
console.log(sqlFilename + " need parents");
|
||||
for (let parentSQLFilename of value.parents) {
|
||||
await this.patch2Recursion(parentSQLFilename, databasePatchedFiles);
|
||||
}
|
||||
}
|
||||
|
||||
this.backup(dayjs().format("YYYYMMDDHHmmss"));
|
||||
|
||||
console.log(sqlFilename + " is patching");
|
||||
this.patched = true;
|
||||
await this.importSQLFile("./db/" + sqlFilename);
|
||||
databasePatchedFiles[sqlFilename] = true;
|
||||
console.log(sqlFilename + " was patched successfully");
|
||||
|
||||
} else {
|
||||
debug(sqlFilename + " is already patched, skip");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sadly, multi sql statements is not supported by many sqlite libraries, I have to implement it myself
|
||||
* @param filename
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
static async importSQLFile(filename) {
|
||||
|
||||
await R.getCell("SELECT 1");
|
||||
|
||||
let text = fs.readFileSync(filename).toString();
|
||||
|
||||
// Remove all comments (--)
|
||||
let lines = text.split("\n");
|
||||
lines = lines.filter((line) => {
|
||||
return ! line.startsWith("--");
|
||||
});
|
||||
|
||||
// Split statements by semicolon
|
||||
// Filter out empty line
|
||||
text = lines.join("\n");
|
||||
|
||||
let statements = text.split(";")
|
||||
.map((statement) => {
|
||||
return statement.trim();
|
||||
})
|
||||
.filter((statement) => {
|
||||
return statement !== "";
|
||||
});
|
||||
|
||||
for (let statement of statements) {
|
||||
await R.exec(statement);
|
||||
}
|
||||
}
|
||||
|
||||
static getBetterSQLite3Database() {
|
||||
|
@ -320,6 +155,9 @@ class Database {
|
|||
* @param version
|
||||
*/
|
||||
static backup(version) {
|
||||
if (Database.dialect !== 'sqlite3')
|
||||
return;
|
||||
|
||||
if (! this.backupPath) {
|
||||
console.info("Backing up the database");
|
||||
this.backupPath = this.dataDir + "kuma.db.bak" + version;
|
||||
|
@ -343,6 +181,9 @@ class Database {
|
|||
*
|
||||
*/
|
||||
static restore() {
|
||||
if (Database.dialect !== 'sqlite3')
|
||||
return;
|
||||
|
||||
if (this.backupPath) {
|
||||
console.error("Patching the database failed!!! Restoring the backup");
|
||||
|
||||
|
@ -384,6 +225,9 @@ class Database {
|
|||
}
|
||||
|
||||
static getSize() {
|
||||
if (Database.dialect !== 'sqlite3')
|
||||
throw {message: "DB size is only supported on SQLite"};
|
||||
|
||||
debug("Database.getSize()");
|
||||
let stats = fs.statSync(Database.path);
|
||||
debug(stats);
|
||||
|
@ -391,7 +235,10 @@ class Database {
|
|||
}
|
||||
|
||||
static async shrink() {
|
||||
await R.exec("VACUUM");
|
||||
if (Database.dialect !== 'sqlite3')
|
||||
throw {message: "VACUUM is only supported on SQLite"};
|
||||
|
||||
return R.exec("VACUUM");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -547,15 +547,16 @@ class Monitor extends BeanModel {
|
|||
static async sendAvgPing(duration, io, monitorID, userID) {
|
||||
const timeLogger = new TimeLogger();
|
||||
|
||||
let avgPing = parseInt(await R.getCell(`
|
||||
SELECT AVG(ping)
|
||||
FROM heartbeat
|
||||
WHERE time > DATETIME('now', ? || ' hours')
|
||||
AND ping IS NOT NULL
|
||||
AND monitor_id = ? `, [
|
||||
-duration,
|
||||
monitorID,
|
||||
]));
|
||||
let startTime = dayjs.utc().subtract(duration, 'hours').toISOString();
|
||||
|
||||
let results = await R._knex.avg('ping as avg_ping')
|
||||
.from('heartbeat')
|
||||
.where('time', '>', startTime)
|
||||
.whereNotNull('ping')
|
||||
.andWhere({monitor_id: monitorID})
|
||||
.limit(1);
|
||||
|
||||
let avgPing = results[0].avg_ping;
|
||||
|
||||
timeLogger.print(`[Monitor: ${monitorID}] avgPing`);
|
||||
|
||||
|
@ -580,40 +581,34 @@ class Monitor extends BeanModel {
|
|||
static async calcUptime(duration, monitorID) {
|
||||
const timeLogger = new TimeLogger();
|
||||
|
||||
const startTime = R.isoDateTime(dayjs.utc().subtract(duration, "hour"));
|
||||
const startTimeRaw = dayjs.utc().subtract(duration, "hour");
|
||||
const startTime = R.isoDateTime(startTimeRaw);
|
||||
|
||||
// Handle if heartbeat duration longer than the target duration
|
||||
// e.g. If the last beat's duration is bigger that the 24hrs window, it will use the duration between the (beat time - window margin) (THEN case in SQL)
|
||||
let result = await R.getRow(`
|
||||
SELECT
|
||||
-- SUM all duration, also trim off the beat out of time window
|
||||
SUM(
|
||||
CASE
|
||||
WHEN (JULIANDAY(\`time\`) - JULIANDAY(?)) * 86400 < duration
|
||||
THEN (JULIANDAY(\`time\`) - JULIANDAY(?)) * 86400
|
||||
ELSE duration
|
||||
END
|
||||
) AS total_duration,
|
||||
// Handle when heartbeat duration is longer than the target duration
|
||||
// e.g. If the first beat's duration is partially outside the 24hrs window,
|
||||
// it will subtract this outlying part from the results
|
||||
|
||||
-- SUM all uptime duration, also trim off the beat out of time window
|
||||
SUM(
|
||||
CASE
|
||||
WHEN (status = 1)
|
||||
THEN
|
||||
CASE
|
||||
WHEN (JULIANDAY(\`time\`) - JULIANDAY(?)) * 86400 < duration
|
||||
THEN (JULIANDAY(\`time\`) - JULIANDAY(?)) * 86400
|
||||
ELSE duration
|
||||
END
|
||||
END
|
||||
) AS uptime_duration
|
||||
FROM heartbeat
|
||||
WHERE time > ?
|
||||
AND monitor_id = ?
|
||||
`, [
|
||||
startTime, startTime, startTime, startTime, startTime,
|
||||
monitorID,
|
||||
]);
|
||||
// example timeline:
|
||||
// vvvvv-durationBefore
|
||||
// --b1---s----b2------------b3--------b4------b5---------b6--n
|
||||
// ^-startTime ^-beat ^-now
|
||||
// first query total_duration includes duration between (b1 and n),
|
||||
// including durationBefore (b1 to s), but we need only (s to n) so we have to subtract it
|
||||
|
||||
let results = await R._knex.select({
|
||||
first_status: 'time',
|
||||
first_time: 'time',
|
||||
first_duration: 'duration',
|
||||
total_duration: R._knex.raw('sum(ping)'),
|
||||
uptime_duration: R._knex.raw('sum(ping * (CASE WHEN status = 1 THEN 1 ELSE 0 END))')
|
||||
}).from('heartbeat')
|
||||
.where('time', '>', startTime)
|
||||
.whereNotNull('ping')
|
||||
.andWhere({monitor_id: monitorID})
|
||||
.orderBy('time', 'asc')
|
||||
.limit(1);
|
||||
|
||||
let result = results[0];
|
||||
|
||||
timeLogger.print(`[Monitor: ${monitorID}][${duration}] sendUptime`);
|
||||
|
||||
|
@ -621,6 +616,18 @@ class Monitor extends BeanModel {
|
|||
let uptimeDuration = result.uptime_duration;
|
||||
let uptime = 0;
|
||||
|
||||
|
||||
// start of duration of the first beat (time of the previous beat):
|
||||
let timeBefore = dayjs(result.first_time).subtract(result.first_duration, 'seconds');
|
||||
// duration outside time window:
|
||||
let durationBefore = timeBefore.diff(startTimeRaw, 'seconds');
|
||||
|
||||
// subtract uptime_duration and total_duration which is outside the requested duration time window
|
||||
totalDuration -= durationBefore;
|
||||
if (result.first_status == 1)
|
||||
uptimeDuration -= durationBefore;
|
||||
|
||||
|
||||
if (totalDuration > 0) {
|
||||
uptime = uptimeDuration / totalDuration;
|
||||
if (uptime < 0) {
|
||||
|
|
|
@ -1420,18 +1420,11 @@ async function getMonitorJSONList(userID) {
|
|||
}
|
||||
|
||||
async function initDatabase(testMode = false) {
|
||||
if (! fs.existsSync(Database.path)) {
|
||||
console.log("Copying Database");
|
||||
fs.copyFileSync(Database.templatePath, Database.path);
|
||||
}
|
||||
|
||||
console.log("Connecting to the Database");
|
||||
await Database.connect(testMode);
|
||||
console.log("Connected");
|
||||
|
||||
// Patch the database
|
||||
await Database.patch();
|
||||
|
||||
let jwtSecretBean = await R.findOne("setting", " `key` = ? ", [
|
||||
"jwtSecret",
|
||||
]);
|
||||
|
|
|
@ -22,7 +22,7 @@ module.exports = (socket) => {
|
|||
socket.on("shrinkDatabase", async (callback) => {
|
||||
try {
|
||||
checkLogin(socket);
|
||||
Database.shrink();
|
||||
await Database.shrink();
|
||||
callback({
|
||||
ok: true,
|
||||
});
|
||||
|
|
Loading…
Add table
Reference in a new issue