mirror of
https://git.alemi.dev/dashboard.git
synced 2024-11-14 11:59:18 +01:00
feat: allow processing multiple dbs from one surveyor
This commit is contained in:
parent
c59c4b0009
commit
08ed95d74c
3 changed files with 86 additions and 73 deletions
|
@ -26,6 +26,7 @@ reqwest = { version = "0.11", features = ["json"] }
|
||||||
|
|
||||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||||
tracing-subscriber = "0.3"
|
tracing-subscriber = "0.3"
|
||||||
|
ctrlc = "3.2.3"
|
||||||
|
|
||||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||||
console_error_panic_hook = "0.1.6"
|
console_error_panic_hook = "0.1.6"
|
||||||
|
|
116
src/main.rs
116
src/main.rs
|
@ -25,6 +25,7 @@ use gui::{
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[command(author, version, about)]
|
#[command(author, version, about)]
|
||||||
struct CliArgs {
|
struct CliArgs {
|
||||||
|
/// Which mode to run in
|
||||||
#[clap(subcommand)]
|
#[clap(subcommand)]
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
|
|
||||||
|
@ -46,7 +47,8 @@ enum Mode {
|
||||||
/// Run as background service fetching sources from db
|
/// Run as background service fetching sources from db
|
||||||
Worker {
|
Worker {
|
||||||
/// Connection string for database to use
|
/// Connection string for database to use
|
||||||
db_uri: String,
|
#[arg(required = true)]
|
||||||
|
db_uris: Vec<String>,
|
||||||
},
|
},
|
||||||
/// Run as foreground user interface displaying collected data
|
/// Run as foreground user interface displaying collected data
|
||||||
GUI {
|
GUI {
|
||||||
|
@ -56,7 +58,7 @@ enum Mode {
|
||||||
|
|
||||||
// When compiling for web:
|
// When compiling for web:
|
||||||
#[cfg(target_arch = "wasm32")]
|
#[cfg(target_arch = "wasm32")]
|
||||||
fn setup_tracing(_layer: InternalLoggerLayer) {
|
fn setup_tracing(_layer: Option<InternalLoggerLayer>) {
|
||||||
// Make sure panics are logged using `console.error`.
|
// Make sure panics are logged using `console.error`.
|
||||||
console_error_panic_hook::set_once();
|
console_error_panic_hook::set_once();
|
||||||
// Redirect tracing to console.log and friends:
|
// Redirect tracing to console.log and friends:
|
||||||
|
@ -65,13 +67,17 @@ fn setup_tracing(_layer: InternalLoggerLayer) {
|
||||||
|
|
||||||
// When compiling natively:
|
// When compiling natively:
|
||||||
#[cfg(not(target_arch = "wasm32"))]
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
fn setup_tracing(layer: InternalLoggerLayer) {
|
fn setup_tracing(layer: Option<InternalLoggerLayer>) {
|
||||||
tracing_subscriber::registry()
|
let sub = tracing_subscriber::registry()
|
||||||
.with(LevelFilter::INFO)
|
.with(LevelFilter::INFO)
|
||||||
.with(filter_fn(|x| x.target() != "sqlx::query"))
|
.with(filter_fn(|x| x.target() != "sqlx::query"))
|
||||||
.with(tracing_subscriber::fmt::Layer::new())
|
.with(tracing_subscriber::fmt::Layer::new());
|
||||||
.with(layer)
|
|
||||||
.init();
|
if let Some(layer) = layer {
|
||||||
|
sub.with(layer).init();
|
||||||
|
} else {
|
||||||
|
sub.init();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
@ -80,63 +86,74 @@ fn main() {
|
||||||
// TODO is there an alternative to this ugly botch?
|
// TODO is there an alternative to this ugly botch?
|
||||||
let (ctx_tx, ctx_rx) = watch::channel::<Option<Context>>(None);
|
let (ctx_tx, ctx_rx) = watch::channel::<Option<Context>>(None);
|
||||||
|
|
||||||
let (width_tx, width_rx) = watch::channel(0);
|
|
||||||
let (run_tx, run_rx) = watch::channel(true);
|
let (run_tx, run_rx) = watch::channel(true);
|
||||||
|
|
||||||
let logger = InternalLogger::new(args.log_size as usize);
|
|
||||||
let logger_view = logger.view();
|
|
||||||
|
|
||||||
setup_tracing(logger.layer());
|
|
||||||
|
|
||||||
match args.mode {
|
match args.mode {
|
||||||
Mode::Worker { db_uri } => {
|
Mode::Worker { db_uris } => {
|
||||||
let run_rx_clone = run_rx.clone();
|
setup_tracing(None);
|
||||||
|
|
||||||
let worker = std::thread::spawn(move || {
|
let worker = std::thread::spawn(move || {
|
||||||
tokio::runtime::Builder::new_current_thread()
|
tokio::runtime::Builder::new_current_thread()
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.build()
|
.build()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.block_on(async {
|
.block_on(async {
|
||||||
let db = match Database::connect(db_uri.clone()).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
error!(target: "launcher", "Could not connect to db: {:?}", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
info!(target: "launcher", "Connected to '{}'", db_uri);
|
|
||||||
|
|
||||||
let mut jobs = vec![];
|
let mut jobs = vec![];
|
||||||
|
|
||||||
jobs.push(
|
for db_uri in db_uris {
|
||||||
tokio::spawn(logger.worker(run_rx_clone.clone()))
|
let db = match Database::connect(db_uri.clone()).await {
|
||||||
);
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
error!(target: "worker", "Could not connect to db: {:?}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
jobs.push(
|
info!(target: "worker", "Connected to '{}'", db_uri);
|
||||||
tokio::spawn(
|
|
||||||
surveyor_loop(
|
jobs.push(
|
||||||
db.clone(),
|
tokio::spawn(
|
||||||
args.interval as i64,
|
surveyor_loop(
|
||||||
args.cache_time as i64,
|
db.clone(),
|
||||||
run_rx_clone.clone(),
|
args.interval as i64,
|
||||||
|
args.cache_time as i64,
|
||||||
|
run_rx.clone(),
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
);
|
||||||
);
|
}
|
||||||
|
|
||||||
for (i, job) in jobs.into_iter().enumerate() {
|
for (i, job) in jobs.into_iter().enumerate() {
|
||||||
if let Err(e) = job.await {
|
if let Err(e) = job.await {
|
||||||
error!(target: "launcher", "Could not join task #{}: {:?}", i, e);
|
error!(target: "worker", "Could not join task #{}: {:?}", i, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(target: "launcher", "Stopping background worker");
|
info!(target: "worker", "Stopping background worker");
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
worker.join().unwrap();
|
let (sigint_tx, sigint_rx) = std::sync::mpsc::channel(); // TODO can I avoid using a std channel?
|
||||||
|
ctrlc::set_handler(move ||
|
||||||
|
sigint_tx.send(()).expect("Could not send signal on channel")
|
||||||
|
).expect("Could not set SIGINT handler");
|
||||||
|
|
||||||
|
sigint_rx.recv().expect("Could not receive signal from channel");
|
||||||
|
info!(target: "launcher", "Received SIGINT, stopping...");
|
||||||
|
|
||||||
|
run_tx.send(false).unwrap_or(()); // ignore errors
|
||||||
|
worker.join().expect("Failed joining worker thread");
|
||||||
},
|
},
|
||||||
|
|
||||||
Mode::GUI { } => {
|
Mode::GUI { } => {
|
||||||
let (uri_tx, uri_rx) = mpsc::channel(10);
|
let (uri_tx, uri_rx) = mpsc::channel(10);
|
||||||
|
let (width_tx, width_rx) = watch::channel(0);
|
||||||
|
|
||||||
|
let logger = InternalLogger::new(args.log_size as usize);
|
||||||
|
let logger_view = logger.view();
|
||||||
|
|
||||||
|
setup_tracing(Some(logger.layer()));
|
||||||
|
|
||||||
let state = match AppState::new(
|
let state = match AppState::new(
|
||||||
width_rx,
|
width_rx,
|
||||||
uri_rx,
|
uri_rx,
|
||||||
|
@ -150,7 +167,6 @@ fn main() {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let view = state.view();
|
let view = state.view();
|
||||||
let run_rx_clone = run_rx.clone();
|
|
||||||
|
|
||||||
let worker = std::thread::spawn(move || {
|
let worker = std::thread::spawn(move || {
|
||||||
tokio::runtime::Builder::new_current_thread()
|
tokio::runtime::Builder::new_current_thread()
|
||||||
|
@ -160,7 +176,7 @@ fn main() {
|
||||||
.block_on(async {
|
.block_on(async {
|
||||||
let mut jobs = vec![];
|
let mut jobs = vec![];
|
||||||
|
|
||||||
let run_rx_clone_clone = run_rx_clone.clone();
|
let run_rx_clone_clone = run_rx.clone();
|
||||||
|
|
||||||
jobs.push(
|
jobs.push(
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
@ -174,22 +190,22 @@ fn main() {
|
||||||
);
|
);
|
||||||
|
|
||||||
jobs.push(
|
jobs.push(
|
||||||
tokio::spawn(logger.worker(run_rx_clone.clone()))
|
tokio::spawn(logger.worker(run_rx.clone()))
|
||||||
);
|
);
|
||||||
|
|
||||||
jobs.push(
|
jobs.push(
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
state.worker(run_rx_clone.clone())
|
state.worker(run_rx.clone())
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
for (i, job) in jobs.into_iter().enumerate() {
|
for (i, job) in jobs.into_iter().enumerate() {
|
||||||
if let Err(e) = job.await {
|
if let Err(e) = job.await {
|
||||||
error!(target: "launcher", "Could not join task #{}: {:?}", i, e);
|
error!(target: "worker", "Could not join task #{}: {:?}", i, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(target: "launcher", "Stopping background worker");
|
info!(target: "worker", "Stopping background worker");
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -220,15 +236,11 @@ fn main() {
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
|
||||||
info!(target: "launcher", "Stopping native GUI");
|
info!(target: "launcher", "GUI quit, stopping background worker...");
|
||||||
|
|
||||||
if let Err(e) = run_tx.send(false) {
|
run_tx.send(false).unwrap_or(()); // ignore errors
|
||||||
error!(target: "launcher", "Error signaling end to workers: {:?}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = worker.join() {
|
worker.join().expect("Failed joining worker thread");
|
||||||
error!(target: "launcher", "Error joining background thread : {:?}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -126,27 +126,27 @@ impl AppState {
|
||||||
.order_by(entities::panels::Column::Position, Order::Asc)
|
.order_by(entities::panels::Column::Position, Order::Asc)
|
||||||
.all(db).await?;
|
.all(db).await?;
|
||||||
if let Err(e) = self.tx.panels.send(self.panels.clone()) {
|
if let Err(e) = self.tx.panels.send(self.panels.clone()) {
|
||||||
error!(target: "worker", "Could not send panels update: {:?}", e);
|
error!(target: "state-manager", "Could not send panels update: {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.sources = entities::sources::Entity::find()
|
self.sources = entities::sources::Entity::find()
|
||||||
.order_by(entities::sources::Column::Position, Order::Asc)
|
.order_by(entities::sources::Column::Position, Order::Asc)
|
||||||
.all(db).await?;
|
.all(db).await?;
|
||||||
if let Err(e) = self.tx.sources.send(self.sources.clone()) {
|
if let Err(e) = self.tx.sources.send(self.sources.clone()) {
|
||||||
error!(target: "worker", "Could not send sources update: {:?}", e);
|
error!(target: "state-manager", "Could not send sources update: {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.metrics = entities::metrics::Entity::find()
|
self.metrics = entities::metrics::Entity::find()
|
||||||
.order_by(entities::metrics::Column::Position, Order::Asc)
|
.order_by(entities::metrics::Column::Position, Order::Asc)
|
||||||
.all(db).await?;
|
.all(db).await?;
|
||||||
if let Err(e) = self.tx.metrics.send(self.metrics.clone()) {
|
if let Err(e) = self.tx.metrics.send(self.metrics.clone()) {
|
||||||
error!(target: "worker", "Could not send metrics update: {:?}", e);
|
error!(target: "state-manager", "Could not send metrics update: {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.panel_metric = entities::panel_metric::Entity::find()
|
self.panel_metric = entities::panel_metric::Entity::find()
|
||||||
.all(db).await?;
|
.all(db).await?;
|
||||||
if let Err(e) = self.tx.panel_metric.send(self.panel_metric.clone()) {
|
if let Err(e) = self.tx.panel_metric.send(self.panel_metric.clone()) {
|
||||||
error!(target: "worker", "Could not send panel-metric update: {:?}", e);
|
error!(target: "state-manager", "Could not send panel-metric update: {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.last_refresh = chrono::Utc::now().timestamp();
|
self.last_refresh = chrono::Utc::now().timestamp();
|
||||||
|
@ -183,10 +183,10 @@ impl AppState {
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}).await {
|
}).await {
|
||||||
error!(target: "worker", "Could not update panels on database: {:?}", e);
|
error!(target: "state-manager", "Could not update panels on database: {:?}", e);
|
||||||
} else {
|
} else {
|
||||||
if let Err(e) = self.tx.panels.send(panels.clone()) {
|
if let Err(e) = self.tx.panels.send(panels.clone()) {
|
||||||
error!(target: "worker", "Could not send panels update: {:?}", e);
|
error!(target: "state-manager", "Could not send panels update: {:?}", e);
|
||||||
}
|
}
|
||||||
self.panels = panels;
|
self.panels = panels;
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ impl AppState {
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}).await {
|
}).await {
|
||||||
error!(target: "worker", "Could not update panels on database: {:?}", e);
|
error!(target: "state-manager", "Could not update panels on database: {:?}", e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.view.request_flush().await;
|
self.view.request_flush().await;
|
||||||
|
@ -226,7 +226,7 @@ impl AppState {
|
||||||
BackgroundAction::UpdateMetric { metric } => {
|
BackgroundAction::UpdateMetric { metric } => {
|
||||||
let op = if metric.id == NotSet { metric.insert(db) } else { metric.update(db) };
|
let op = if metric.id == NotSet { metric.insert(db) } else { metric.update(db) };
|
||||||
if let Err(e) = op.await {
|
if let Err(e) = op.await {
|
||||||
error!(target: "worker", "Could not update metric: {:?}", e);
|
error!(target: "state-manager", "Could not update metric: {:?}", e);
|
||||||
} else {
|
} else {
|
||||||
self.view.request_flush().await;
|
self.view.request_flush().await;
|
||||||
}
|
}
|
||||||
|
@ -250,10 +250,10 @@ impl AppState {
|
||||||
.all(db)
|
.all(db)
|
||||||
.await?.into();
|
.await?.into();
|
||||||
if let Err(e) = self.tx.points.send(self.points.clone().into()) {
|
if let Err(e) = self.tx.points.send(self.points.clone().into()) {
|
||||||
warn!(target: "worker", "Could not send new points: {:?}", e); // TODO should be an err?
|
warn!(target: "state-manager", "Could not send new points: {:?}", e); // TODO should be an err?
|
||||||
}
|
}
|
||||||
self.last_check = Utc::now().timestamp() - *self.width.borrow();
|
self.last_check = Utc::now().timestamp() - *self.width.borrow();
|
||||||
info!(target: "worker", "Reloaded points");
|
info!(target: "state-manager", "Reloaded points");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ impl AppState {
|
||||||
.all(db)
|
.all(db)
|
||||||
.await?;
|
.await?;
|
||||||
if previous_points.len() > 0 {
|
if previous_points.len() > 0 {
|
||||||
info!(target: "worker", "Fetched {} previous points", previous_points.len());
|
info!(target: "state-manager", "Fetched {} previous points", previous_points.len());
|
||||||
}
|
}
|
||||||
previous_points.reverse(); // TODO wasteful!
|
previous_points.reverse(); // TODO wasteful!
|
||||||
for p in previous_points {
|
for p in previous_points {
|
||||||
|
@ -313,7 +313,7 @@ impl AppState {
|
||||||
self.last_check = now;
|
self.last_check = now;
|
||||||
if changes {
|
if changes {
|
||||||
if let Err(e) = self.tx.points.send(self.points.clone().into()) {
|
if let Err(e) = self.tx.points.send(self.points.clone().into()) {
|
||||||
warn!(target: "worker", "Could not send changes to main thread: {:?}", e);
|
warn!(target: "state-manager", "Could not send changes to main thread: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -325,7 +325,7 @@ impl AppState {
|
||||||
|
|
||||||
let mut db = Database::connect(first_db_uri.clone()).await.unwrap();
|
let mut db = Database::connect(first_db_uri.clone()).await.unwrap();
|
||||||
|
|
||||||
info!(target: "worker", "Connected to '{}'", first_db_uri);
|
info!(target: "state-manager", "Connected to '{}'", first_db_uri);
|
||||||
|
|
||||||
while *run.borrow() {
|
while *run.borrow() {
|
||||||
now = Utc::now().timestamp();
|
now = Utc::now().timestamp();
|
||||||
|
@ -338,38 +338,38 @@ impl AppState {
|
||||||
info!("Connected to '{}'", uri);
|
info!("Connected to '{}'", uri);
|
||||||
db = new_db;
|
db = new_db;
|
||||||
},
|
},
|
||||||
Err(e) => error!(target: "worker", "Could not connect to db: {:?}", e),
|
Err(e) => error!(target: "state-manager", "Could not connect to db: {:?}", e),
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
None => { error!(target: "worker", "URI channel closed"); break; },
|
None => { error!(target: "state-manager", "URI channel closed"); break; },
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
res = self.op.recv() => {
|
res = self.op.recv() => {
|
||||||
match res {
|
match res {
|
||||||
Some(op) => match self.parse_op(op, &db).await {
|
Some(op) => match self.parse_op(op, &db).await {
|
||||||
Ok(()) => { },
|
Ok(()) => { },
|
||||||
Err(e) => error!(target: "worker", "Failed executing operation: {:?}", e),
|
Err(e) => error!(target: "state-manager", "Failed executing operation: {:?}", e),
|
||||||
},
|
},
|
||||||
None => { error!(target: "worker", "Operations channel closed"); break; },
|
None => { error!(target: "state-manager", "Operations channel closed"); break; },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
res = self.flush.recv() => {
|
res = self.flush.recv() => {
|
||||||
match res {
|
match res {
|
||||||
Some(()) => match self.flush_data(&db).await {
|
Some(()) => match self.flush_data(&db).await {
|
||||||
Ok(()) => { },
|
Ok(()) => { },
|
||||||
Err(e) => error!(target: "worker", "Could not flush away current data: {:?}", e),
|
Err(e) => error!(target: "state-manager", "Could not flush away current data: {:?}", e),
|
||||||
},
|
},
|
||||||
None => { error!(target: "worker", "Flush channel closed"); break; },
|
None => { error!(target: "state-manager", "Flush channel closed"); break; },
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
_ = sleep(self.cache_age - (now - self.last_refresh)) => {
|
_ = sleep(self.cache_age - (now - self.last_refresh)) => {
|
||||||
if let Err(e) = self.fetch(&db).await {
|
if let Err(e) = self.fetch(&db).await {
|
||||||
error!(target: "worker", "Could not fetch from db: {:?}", e);
|
error!(target: "state-manager", "Could not fetch from db: {:?}", e);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
_ = sleep(self.interval - (now - self.last_check)) => {
|
_ = sleep(self.interval - (now - self.last_check)) => {
|
||||||
if let Err(e) = self.update_points(&db).await {
|
if let Err(e) = self.update_points(&db).await {
|
||||||
error!(target: "worker", "Could not update points: {:?}", e);
|
error!(target: "state-manager", "Could not update points: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue