mirror of
https://gitlab.com/fabinfra/fabaccess/bffh.git
synced 2024-11-22 14:57:56 +01:00
Merge branch 'feature/demo-sprint' into 'development'
merge feature/demo-sprint See merge request fabinfra/fabaccess/bffh!6
This commit is contained in:
commit
a87ea52a1d
1527
Cargo.lock
generated
1527
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
17
Cargo.toml
17
Cargo.toml
@ -31,11 +31,10 @@ capnp-rpc = "0.13"
|
|||||||
capnp-futures = "0.13"
|
capnp-futures = "0.13"
|
||||||
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
toml = "0.5"
|
||||||
flexbuffers = "0.1"
|
flexbuffers = "0.1"
|
||||||
|
|
||||||
glob = "0.3"
|
serde_dhall = { version = "0.9", default-features = false }
|
||||||
toml = "0.5"
|
|
||||||
config = { version = "0.10", default-features = false, features = ["toml"] }
|
|
||||||
|
|
||||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||||
|
|
||||||
@ -55,5 +54,17 @@ lmdb-rkv = "0.14"
|
|||||||
|
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
|
||||||
|
rust-argon2 = "0.8"
|
||||||
|
rand = "0.7"
|
||||||
|
|
||||||
|
async-channel = "1.5"
|
||||||
|
easy-parallel = "3.1"
|
||||||
|
genawaiter = "0.99"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
capnpc = "0.13"
|
capnpc = "0.13"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
futures-test = "0.3"
|
||||||
|
@ -15,7 +15,8 @@ FROM debian:buster-slim
|
|||||||
RUN apt-get update && apt-get upgrade -yqq
|
RUN apt-get update && apt-get upgrade -yqq
|
||||||
RUN apt-get install -yqq libgsasl7 && rm -rf /var/lib/apt/lists/*
|
RUN apt-get install -yqq libgsasl7 && rm -rf /var/lib/apt/lists/*
|
||||||
COPY --from=builder /usr/local/cargo/bin/diflouroborane /usr/local/bin/diflouroborane
|
COPY --from=builder /usr/local/cargo/bin/diflouroborane /usr/local/bin/diflouroborane
|
||||||
|
COPY --from=builder /usr/src/bffh/examples/bffh.dhall /etc/diflouroborane.dhall
|
||||||
# RUN diflouroborane --print-default > /etc/diflouroborane.toml
|
# RUN diflouroborane --print-default > /etc/diflouroborane.toml
|
||||||
VOLUME /etc/diflouroborane.toml
|
VOLUME /etc/diflouroborane.dhall
|
||||||
EXPOSE 59661
|
EXPOSE 59661
|
||||||
ENTRYPOINT ["diflouroborane"]
|
ENTRYPOINT ["diflouroborane"]
|
@ -1,12 +1,9 @@
|
|||||||
# API-Testsetup
|
# API-Testsetup
|
||||||
|
|
||||||
wirklich nur um das API zu testen. ATM implementiert: machine::read
|
wirklich nur um das API zu testen. ATM implementiert: machines::* & machine::read, authenticate
|
||||||
|
|
||||||
1. `cargo run -- --print-default > /tmp/bffh.toml` um eine default config zu generieren
|
|
||||||
1. in /tmp/bffh.toml den parameter `machines` auf ./examples/machines.toml umbiegen
|
|
||||||
* Bei mir z.b. `~/Development/FabInfra/Diflouroborane/examples/machines.toml`
|
|
||||||
1. Ein mosquitto o.ä MQTT Server starten
|
1. Ein mosquitto o.ä MQTT Server starten
|
||||||
* Bringt aber leider gerade nicht viel ^^'
|
1. Datenbanken füllen: `cargo run -- -c examples/bffh.dhall --load=examples`
|
||||||
1. `cargo run -- -c /tmp/bffh.toml`
|
1. Daemon starten: `cargo run -- -c examples/bffh.dhall`
|
||||||
1. ???
|
1. ???
|
||||||
1. PROFIT!
|
1. PROFIT!
|
||||||
|
20
examples/bffh.dhall
Normal file
20
examples/bffh.dhall
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{ actor_connections = [{ _1 = "Testmachine", _2 = "Actor" }]
|
||||||
|
, actors =
|
||||||
|
{ Actor = { module = "Shelly", params = {=} }
|
||||||
|
}
|
||||||
|
, init_connections = [{ _1 = "Initiator", _2 = "Testmachine" }]
|
||||||
|
, initiators =
|
||||||
|
{ Initiator = { module = "Dummy", params = {=} }
|
||||||
|
}
|
||||||
|
, listens = [{ address = "localhost", port = Some 59661 }]
|
||||||
|
, machines =
|
||||||
|
{ Testmachine =
|
||||||
|
{ description = Some "A test machine"
|
||||||
|
, disclose = "lab.test.read"
|
||||||
|
, manage = "lab.test.admin"
|
||||||
|
, name = "Testmachine"
|
||||||
|
, read = "lab.test.read"
|
||||||
|
, write = "lab.test.write"
|
||||||
|
} }
|
||||||
|
, mqtt_url = "tcp://localhost:1883"
|
||||||
|
}
|
@ -1,14 +0,0 @@
|
|||||||
[e5408099-d3e5-440b-a92b-3aabf7683d6b]
|
|
||||||
name = "Somemachine"
|
|
||||||
disclose = "lab.some.disclose"
|
|
||||||
read = "lab.some.read"
|
|
||||||
write = "lab.some.write"
|
|
||||||
manage = "lab.some.admin"
|
|
||||||
|
|
||||||
[eaabebae-34d1-4a3a-912a-967b495d3d6e]
|
|
||||||
name = "Testmachine"
|
|
||||||
description = "An optional description"
|
|
||||||
disclose = "lab.test.read"
|
|
||||||
read = "lab.test.read"
|
|
||||||
write = "lab.test.write"
|
|
||||||
manage = "lab.test.admin"
|
|
1
examples/pass.toml
Normal file
1
examples/pass.toml
Normal file
@ -0,0 +1 @@
|
|||||||
|
Testuser = "secret"
|
11
examples/users.toml
Normal file
11
examples/users.toml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
[Testuser]
|
||||||
|
# Define them in roles.toml as well
|
||||||
|
roles = []
|
||||||
|
|
||||||
|
# If two or more users want to use the same machine at once the higher prio
|
||||||
|
# wins
|
||||||
|
priority = 0
|
||||||
|
|
||||||
|
# You can add whatever random data you want.
|
||||||
|
# It will get stored in the `kv` field in UserData.
|
||||||
|
noot = "noot!"
|
2
schema
2
schema
@ -1 +1 @@
|
|||||||
Subproject commit a4667b94f331f9f624416bbbb951fe78d5304d26
|
Subproject commit 83cd61e299230f33474e2efa950667d1acfbe085
|
178
src/actor.rs
Normal file
178
src/actor.rs
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Poll, Context};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::future::Future;
|
||||||
|
|
||||||
|
use smol::Executor;
|
||||||
|
|
||||||
|
use futures::{future::BoxFuture, Stream, StreamExt};
|
||||||
|
use futures::channel::mpsc;
|
||||||
|
use futures_signals::signal::{Signal, MutableSignalCloned, MutableSignal, Mutable};
|
||||||
|
|
||||||
|
use crate::db::machine::MachineState;
|
||||||
|
use crate::config::Config;
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::network::ActorMap;
|
||||||
|
|
||||||
|
use paho_mqtt::AsyncClient;
|
||||||
|
use slog::Logger;
|
||||||
|
|
||||||
|
pub trait Actuator {
|
||||||
|
fn apply(&mut self, state: MachineState) -> BoxFuture<'static, ()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type ActorSignal = Box<dyn Signal<Item=MachineState> + Unpin + Send>;
|
||||||
|
|
||||||
|
pub struct Actor {
|
||||||
|
// FIXME: This should really be a Signal.
|
||||||
|
// But, alas, MutableSignalCloned is itself not `Clone`. For good reason as keeping track of
|
||||||
|
// the changes itself happens in a way that Clone won't work (well).
|
||||||
|
// So, you can't clone it, you can't copy it and you can't get at the variable inside outside
|
||||||
|
// of a task context. In short, using Mutable isn't possible and we would have to write our own
|
||||||
|
// implementation of MutableSignal*'s . Preferably with the correct optimizations for our case
|
||||||
|
// where there is only one consumer. So a mpsc channel that drops all but the last input.
|
||||||
|
rx: mpsc::Receiver<Option<ActorSignal>>,
|
||||||
|
inner: Option<ActorSignal>,
|
||||||
|
|
||||||
|
actuator: Box<dyn Actuator + Send + Sync>,
|
||||||
|
future: Option<BoxFuture<'static, ()>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Actor {
|
||||||
|
pub fn new(rx: mpsc::Receiver<Option<ActorSignal>>, actuator: Box<dyn Actuator + Send + Sync>) -> Self {
|
||||||
|
Self {
|
||||||
|
rx: rx,
|
||||||
|
inner: None,
|
||||||
|
actuator: actuator,
|
||||||
|
future: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wrap(actuator: Box<dyn Actuator + Send + Sync>) -> (mpsc::Sender<Option<ActorSignal>>, Self) {
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
(tx, Self::new(rx, actuator))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Future for Actor {
|
||||||
|
type Output = ();
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||||
|
let mut this = &mut *self;
|
||||||
|
let mut done = false; // Is the channel with new state-signals exhausted?
|
||||||
|
|
||||||
|
// FIXME: This is potentially invalid, and may lead to the situation that the signal is
|
||||||
|
// replaced *twice* but the second change will not be honoured since this implementation of
|
||||||
|
// events is *EDGE*-triggered!
|
||||||
|
// Update the signal we're polling from, if there is an update that is.
|
||||||
|
match Stream::poll_next(Pin::new(&mut this.rx), cx) {
|
||||||
|
Poll::Ready(None) => done = true,
|
||||||
|
Poll::Ready(Some(new_signal)) => this.inner = new_signal,
|
||||||
|
Poll::Pending => { },
|
||||||
|
}
|
||||||
|
|
||||||
|
// Work until there is no more work to do.
|
||||||
|
loop {
|
||||||
|
|
||||||
|
// Poll the `apply` future. And ensure it's completed before the next one is started
|
||||||
|
match this.future.as_mut().map(|future| Future::poll(Pin::new(future), cx)) {
|
||||||
|
// Skip and poll for a new future to do
|
||||||
|
None => { }
|
||||||
|
|
||||||
|
// This apply future is done, get a new one
|
||||||
|
Some(Poll::Ready(_)) => this.future = None,
|
||||||
|
|
||||||
|
// This future would block so we return to continue work another time
|
||||||
|
Some(Poll::Pending) => return Poll::Pending,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poll the signal and apply any change that happen to the inner Actuator
|
||||||
|
match this.inner.as_mut().map(|inner| Signal::poll_change(Pin::new(inner), cx)) {
|
||||||
|
// No signal to poll
|
||||||
|
None => return Poll::Pending,
|
||||||
|
Some(Poll::Pending) => return Poll::Pending,
|
||||||
|
Some(Poll::Ready(None)) => {
|
||||||
|
this.inner = None;
|
||||||
|
|
||||||
|
if done {
|
||||||
|
return Poll::Ready(());
|
||||||
|
} else {
|
||||||
|
return Poll::Pending;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Some(Poll::Ready(Some(state))) => {
|
||||||
|
// This future MUST be polled before we exit from the Actor::poll because if we
|
||||||
|
// do not do that it will not register the dependency and thus NOT BE POLLED.
|
||||||
|
this.future.replace(this.actuator.apply(state));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Dummy {
|
||||||
|
log: Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Dummy {
|
||||||
|
pub fn new(log: &Logger) -> Self {
|
||||||
|
Self { log: log.new(o!("module" => "Dummy Actor")) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Actuator for Dummy {
|
||||||
|
fn apply(&mut self, state: MachineState) -> BoxFuture<'static, ()> {
|
||||||
|
info!(self.log, "New state for dummy actuator: {:?}", state);
|
||||||
|
Box::pin(smol::future::ready(()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load(log: &Logger, client: &AsyncClient, config: &Config) -> Result<(ActorMap, Vec<Actor>)> {
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
|
||||||
|
let actuators = config.actors.iter()
|
||||||
|
.map(|(k,v)| (k, load_single(log, client, k, &v.module, &v.params)))
|
||||||
|
.filter_map(|(k, n)| match n {
|
||||||
|
None => None,
|
||||||
|
Some(a) => Some((k, a))
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for (name, actuator) in actuators {
|
||||||
|
let (tx, a) = Actor::wrap(actuator);
|
||||||
|
map.insert(name.clone(), Mutex::new(tx));
|
||||||
|
v.push(a);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Ok(( map, v ))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_single(
|
||||||
|
log: &Logger,
|
||||||
|
client: &AsyncClient,
|
||||||
|
name: &String,
|
||||||
|
module_name: &String,
|
||||||
|
params: &HashMap<String, String>
|
||||||
|
) -> Option<Box<dyn Actuator + Sync + Send>>
|
||||||
|
{
|
||||||
|
use crate::modules::*;
|
||||||
|
|
||||||
|
match module_name.as_ref() {
|
||||||
|
"Shelly" => {
|
||||||
|
if !params.is_empty() {
|
||||||
|
warn!(log, "\"{}\" module expects no parameters. Configured as \"{}\".",
|
||||||
|
module_name, name);
|
||||||
|
}
|
||||||
|
Some(Box::new(Shelly::new(log, name.clone(), client.clone())))
|
||||||
|
},
|
||||||
|
"Dummy" => {
|
||||||
|
Some(Box::new(Dummy::new(log)))
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
error!(log, "No actor found with name \"{}\", configured as \"{}\".", module_name, name);
|
||||||
|
None
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
19
src/api.rs
19
src/api.rs
@ -9,21 +9,27 @@ use crate::connection::Session;
|
|||||||
|
|
||||||
use crate::db::Databases;
|
use crate::db::Databases;
|
||||||
|
|
||||||
|
use crate::builtin;
|
||||||
|
|
||||||
|
use crate::network::Network;
|
||||||
|
|
||||||
pub mod auth;
|
pub mod auth;
|
||||||
mod machine;
|
mod machine;
|
||||||
mod machines;
|
mod machines;
|
||||||
|
|
||||||
use machines::Machines;
|
use machines::Machines;
|
||||||
|
|
||||||
|
// TODO Session restoration by making the Bootstrap cap a SturdyRef
|
||||||
pub struct Bootstrap {
|
pub struct Bootstrap {
|
||||||
session: Arc<Session>,
|
session: Arc<Session>,
|
||||||
db: Databases,
|
db: Databases,
|
||||||
|
nw: Arc<Network>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Bootstrap {
|
impl Bootstrap {
|
||||||
pub fn new(session: Arc<Session>, db: Databases) -> Self {
|
pub fn new(session: Arc<Session>, db: Databases, nw: Arc<Network>) -> Self {
|
||||||
info!(session.log, "Created Bootstrap");
|
info!(session.log, "Created Bootstrap");
|
||||||
Self { session, db }
|
Self { session, db, nw }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,12 +39,11 @@ impl connection_capnp::bootstrap::Server for Bootstrap {
|
|||||||
_: Params<auth_params::Owned>,
|
_: Params<auth_params::Owned>,
|
||||||
mut res: Results<auth_results::Owned>
|
mut res: Results<auth_results::Owned>
|
||||||
) -> Promise<(), capnp::Error> {
|
) -> Promise<(), capnp::Error> {
|
||||||
// Forbid mutltiple authentication for now
|
// TODO: Forbid mutltiple authentication for now
|
||||||
// TODO: When should we allow multiple auth and how do me make sure that does not leak
|
// TODO: When should we allow multiple auth and how do me make sure that does not leak
|
||||||
// priviledges (e.g. due to previously issues caps)?
|
// priviledges (e.g. due to previously issues caps)?
|
||||||
if self.session.user.is_none() {
|
|
||||||
res.get().set_auth(capnp_rpc::new_client(auth::Auth::new(self.session.clone())))
|
res.get().set_auth(capnp_rpc::new_client(auth::Auth::new(self.db.passdb.clone(), self.session.clone())));
|
||||||
}
|
|
||||||
|
|
||||||
Promise::ok(())
|
Promise::ok(())
|
||||||
}
|
}
|
||||||
@ -55,7 +60,7 @@ impl connection_capnp::bootstrap::Server for Bootstrap {
|
|||||||
mut res: Results<machines_results::Owned>
|
mut res: Results<machines_results::Owned>
|
||||||
) -> Promise<(), capnp::Error> {
|
) -> Promise<(), capnp::Error> {
|
||||||
// TODO actual permission check and stuff
|
// TODO actual permission check and stuff
|
||||||
let c = capnp_rpc::new_client(Machines::new(self.session.clone(), self.db.clone()));
|
let c = capnp_rpc::new_client(Machines::new(self.session.clone(), self.db.clone(), self.nw.clone()));
|
||||||
res.get().set_machines(c);
|
res.get().set_machines(c);
|
||||||
|
|
||||||
Promise::ok(())
|
Promise::ok(())
|
||||||
|
@ -27,20 +27,46 @@ use crate::config::Settings;
|
|||||||
use crate::api::Session;
|
use crate::api::Session;
|
||||||
|
|
||||||
pub use crate::schema::auth_capnp;
|
pub use crate::schema::auth_capnp;
|
||||||
|
use crate::db::pass::PassDB;
|
||||||
|
|
||||||
pub struct AppData;
|
pub struct AppData {
|
||||||
|
passdb: Arc<PassDB>,
|
||||||
|
}
|
||||||
pub struct SessionData;
|
pub struct SessionData;
|
||||||
|
|
||||||
|
|
||||||
struct CB;
|
struct CB;
|
||||||
impl Callback<AppData, SessionData> for CB {
|
impl Callback<AppData, SessionData> for CB {
|
||||||
fn callback(sasl: SaslCtx<AppData, SessionData>, session: SaslSession<SessionData>, prop: Property) -> libc::c_int {
|
fn callback(mut sasl: SaslCtx<AppData, SessionData>, session: SaslSession<SessionData>, prop: Property) -> libc::c_int {
|
||||||
let ret = match prop {
|
let ret = match prop {
|
||||||
Property::GSASL_VALIDATE_SIMPLE => {
|
Property::GSASL_VALIDATE_SIMPLE => {
|
||||||
let authid = session.get_property(Property::GSASL_AUTHID).unwrap().to_string_lossy();
|
let authid = match session.get_property(Property::GSASL_AUTHID) {
|
||||||
let pass = session.get_property(Property::GSASL_PASSWORD).unwrap().to_string_lossy();
|
None => return ReturnCode::GSASL_NO_AUTHID as libc::c_int,
|
||||||
|
Some(a) => {
|
||||||
|
match a.to_str() {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => return ReturnCode::GSASL_SASLPREP_ERROR as libc::c_int,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
if authid == "test" && pass == "secret" {
|
let pass = session.get_property(Property::GSASL_PASSWORD);
|
||||||
ReturnCode::GSASL_OK
|
if pass.is_none() {
|
||||||
|
return ReturnCode::GSASL_NO_PASSWORD as libc::c_int;
|
||||||
|
}
|
||||||
|
let pass = pass.unwrap();
|
||||||
|
|
||||||
|
|
||||||
|
if let Some(sessiondata) = sasl.retrieve_mut() {
|
||||||
|
if let Ok(Some(b)) = sessiondata.passdb.check(authid, pass.to_bytes()) {
|
||||||
|
if b {
|
||||||
|
ReturnCode::GSASL_OK
|
||||||
|
} else {
|
||||||
|
ReturnCode::GSASL_AUTHENTICATION_ERROR
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ReturnCode::GSASL_AUTHENTICATION_ERROR
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ReturnCode::GSASL_AUTHENTICATION_ERROR
|
ReturnCode::GSASL_AUTHENTICATION_ERROR
|
||||||
}
|
}
|
||||||
@ -60,10 +86,10 @@ pub struct Auth {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Auth {
|
impl Auth {
|
||||||
pub fn new(session: Arc<Session>) -> Self {
|
pub fn new(passdb: Arc<PassDB>, session: Arc<Session>) -> Self {
|
||||||
let mut ctx = SASL::new().unwrap();
|
let mut ctx = SASL::new().unwrap();
|
||||||
|
|
||||||
let mut appdata = Box::new(AppData);
|
let mut appdata = Box::new(AppData { passdb });
|
||||||
|
|
||||||
ctx.store(appdata);
|
ctx.store(appdata);
|
||||||
|
|
||||||
@ -172,58 +198,34 @@ impl auth_capnp::authentication::Server for Auth {
|
|||||||
// somewhere and pass it somewhere else and in between don't check if it's the right type and
|
// somewhere and pass it somewhere else and in between don't check if it's the right type and
|
||||||
// accidentally pass the authzid where the authcid should have gone.
|
// accidentally pass the authzid where the authcid should have gone.
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
|
||||||
/// Authentication Identity
|
|
||||||
///
|
|
||||||
/// Under the hood a string because the form depends heavily on the method
|
|
||||||
struct AuthCId(String);
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
|
||||||
/// Authorization Identity
|
|
||||||
///
|
|
||||||
/// This identity is internal to FabAccess and completely independent from the authentication
|
|
||||||
/// method or source
|
|
||||||
struct AuthZId {
|
|
||||||
/// Main User ID. Generally an user name or similar
|
|
||||||
uid: String,
|
|
||||||
/// Sub user ID.
|
|
||||||
///
|
|
||||||
/// Can change scopes for permissions, e.g. having a +admin account with more permissions than
|
|
||||||
/// the default account and +dashboard et.al. accounts that have restricted permissions for
|
|
||||||
/// their applications
|
|
||||||
subuid: String,
|
|
||||||
/// Realm this account originates.
|
|
||||||
///
|
|
||||||
/// The Realm is usually described by a domain name but local policy may dictate an unrelated
|
|
||||||
/// mapping
|
|
||||||
realm: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
// What is a man?! A miserable little pile of secrets!
|
// What is a man?! A miserable little pile of secrets!
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||||
/// Authentication/Authorization user object.
|
/// Authentication/Authorization user object.
|
||||||
///
|
///
|
||||||
/// This struct contains the user as is passed to the actual authentication/authorization
|
/// This struct describes the user as can be gathered from API authentication exchanges.
|
||||||
/// subsystems
|
/// Specifically this is the value bffh gets after a successful authentication.
|
||||||
///
|
///
|
||||||
pub struct User {
|
pub struct AuthenticationData {
|
||||||
/// Contains the Authentication ID used
|
/// Contains the Authentication ID used
|
||||||
///
|
///
|
||||||
/// The authentication ID is an identifier for the authentication exchange. This is different
|
/// The authentication ID is an identifier for the authentication exchange. This is
|
||||||
/// than the ID of the user to be authenticated; for example when using x509 the authcid is
|
/// conceptually different than the ID of the user to be authenticated; for example when using
|
||||||
/// the dn of the certificate, when using GSSAPI the authcid is of form `<userid>@<REALM>`
|
/// x509 the authcid is the dn of the certificate, when using GSSAPI the authcid is of form
|
||||||
authcid: AuthCId,
|
/// `<ID>@<REALM>`
|
||||||
|
authcid: String,
|
||||||
|
|
||||||
/// Contains the Authorization ID
|
/// Authorization ID
|
||||||
///
|
///
|
||||||
/// This is the identifier of the user to *authenticate as*. This in several cases is different
|
/// The authzid represents the identity that a client wants to act as. In our case this is
|
||||||
/// to the `authcid`:
|
/// always an user id. If unset no preference is indicated and the server will authenticate the
|
||||||
|
/// client as whatever user — if any — they associate with the authcid. Setting the authzid is
|
||||||
|
/// useful in a number if situations:
|
||||||
/// If somebody wants to authenticate as somebody else, su-style.
|
/// If somebody wants to authenticate as somebody else, su-style.
|
||||||
/// If a person wants to authenticate as a higher-permissions account, e.g. foo may set authzid foo+admin
|
/// If a person wants to authenticate as a higher-permissions account, e.g. foo may set authzid foo+admin
|
||||||
/// to split normal user and "admin" accounts.
|
/// to split normal user and "admin" accounts.
|
||||||
/// If a method requires a specific authcid that is different from the identifier of the user
|
/// If a method requires a specific authcid that is different from the identifier of the user
|
||||||
/// to authenticate as, e.g. GSSAPI, x509 client certificates, API TOKEN authentication.
|
/// to authenticate as, e.g. GSSAPI, x509 client certificates, API TOKEN authentication.
|
||||||
authzid: AuthZId,
|
authzid: String,
|
||||||
|
|
||||||
/// Contains the authentication method used
|
/// Contains the authentication method used
|
||||||
///
|
///
|
||||||
|
@ -1,25 +1,26 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
use capnp::capability::Promise;
|
use capnp::capability::Promise;
|
||||||
use capnp::Error;
|
use capnp::Error;
|
||||||
|
|
||||||
use crate::schema::api_capnp::State;
|
use crate::schema::api_capnp::State;
|
||||||
use crate::schema::api_capnp::machine::*;
|
use crate::schema::api_capnp::machine::*;
|
||||||
use crate::db::machine::MachineIdentifier;
|
|
||||||
use crate::connection::Session;
|
use crate::connection::Session;
|
||||||
use crate::db::Databases;
|
use crate::db::Databases;
|
||||||
use crate::db::machine::Status;
|
use crate::db::machine::Status;
|
||||||
|
use crate::machine::Machine as NwMachine;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Machine {
|
pub struct Machine {
|
||||||
session: Arc<Session>,
|
session: Arc<Session>,
|
||||||
id: MachineIdentifier,
|
machine: NwMachine,
|
||||||
db: Databases,
|
db: Databases,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Machine {
|
impl Machine {
|
||||||
pub fn new(session: Arc<Session>, id: MachineIdentifier, db: Databases) -> Self {
|
pub fn new(session: Arc<Session>, machine: NwMachine, db: Databases) -> Self {
|
||||||
Machine { session, id, db }
|
Machine { session, machine, db }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill(self: Arc<Self>, builder: &mut Builder) {
|
pub fn fill(self: Arc<Self>, builder: &mut Builder) {
|
||||||
@ -28,32 +29,33 @@ impl Machine {
|
|||||||
// TODO set all the others
|
// TODO set all the others
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fill_info(&self, builder: &mut m_info::Builder) {
|
pub async fn fill_info(&self, builder: &mut m_info::Builder<'_>) {
|
||||||
if let Some(desc) = self.db.machine.get_desc(&self.id) {
|
let guard = self.machine.lock().await;
|
||||||
builder.set_name(&desc.name);
|
|
||||||
if let Some(d) = desc.description.as_ref() {
|
|
||||||
builder.set_description(d);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Set `responsible`
|
builder.set_name(guard.desc.name.as_ref());
|
||||||
// TODO: Error Handling
|
|
||||||
if let Some(state) = self.db.machine.get_state(&self.id) {
|
if let Some(desc) = guard.desc.description.as_ref() {
|
||||||
match state.state {
|
builder.set_description(desc);
|
||||||
Status::Free => builder.set_state(State::Free),
|
}
|
||||||
Status::InUse(_u) => {
|
|
||||||
builder.set_state(State::InUse);
|
match guard.read_state().lock_ref().deref().state {
|
||||||
}
|
Status::Free => {
|
||||||
Status::ToCheck(_u) => {
|
builder.set_state(State::Free);
|
||||||
builder.set_state(State::ToCheck);
|
}
|
||||||
}
|
Status::Disabled => {
|
||||||
Status::Blocked(_u) => {
|
builder.set_state(State::Disabled);
|
||||||
builder.set_state(State::Blocked);
|
}
|
||||||
}
|
Status::Blocked(_,_) => {
|
||||||
Status::Disabled => builder.set_state(State::Disabled),
|
builder.set_state(State::Blocked);
|
||||||
Status::Reserved(_u) => {
|
}
|
||||||
builder.set_state(State::Reserved);
|
Status::InUse(_,_) => {
|
||||||
}
|
builder.set_state(State::InUse);
|
||||||
}
|
}
|
||||||
|
Status::ToCheck(_,_) => {
|
||||||
|
builder.set_state(State::ToCheck);
|
||||||
|
}
|
||||||
|
Status::Reserved(_,_) => {
|
||||||
|
builder.set_state(State::Reserved);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,8 @@ use crate::connection::Session;
|
|||||||
|
|
||||||
use crate::db::Databases;
|
use crate::db::Databases;
|
||||||
use crate::db::machine::uuid_from_api;
|
use crate::db::machine::uuid_from_api;
|
||||||
use crate::db::machine::MachineDB;
|
|
||||||
|
use crate::network::Network;
|
||||||
|
|
||||||
use super::machine::Machine;
|
use super::machine::Machine;
|
||||||
|
|
||||||
@ -19,12 +20,13 @@ pub struct Machines {
|
|||||||
session: Arc<Session>,
|
session: Arc<Session>,
|
||||||
|
|
||||||
db: Databases,
|
db: Databases,
|
||||||
|
network: Arc<Network>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Machines {
|
impl Machines {
|
||||||
pub fn new(session: Arc<Session>, db: Databases) -> Self {
|
pub fn new(session: Arc<Session>, db: Databases, network: Arc<Network>) -> Self {
|
||||||
info!(session.log, "Machines created");
|
info!(session.log, "Machines created");
|
||||||
Self { session, db }
|
Self { session, db, network }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,6 +36,19 @@ impl machines::Server for Machines {
|
|||||||
mut results: machines::ListMachinesResults)
|
mut results: machines::ListMachinesResults)
|
||||||
-> Promise<(), Error>
|
-> Promise<(), Error>
|
||||||
{
|
{
|
||||||
|
let v: Vec<(String, crate::machine::Machine)> = self.network.machines.iter()
|
||||||
|
.map(|(n, m)| (n.clone(), m.clone()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut res = results.get();
|
||||||
|
let mut machines = res.init_machines(v.len() as u32);
|
||||||
|
|
||||||
|
for (i, (name, machine)) in v.into_iter().enumerate() {
|
||||||
|
let machine = Arc::new(Machine::new(self.session.clone(), machine, self.db.clone()));
|
||||||
|
let mut builder = machines.reborrow().get(i as u32);
|
||||||
|
Machine::fill(machine, &mut builder);
|
||||||
|
}
|
||||||
|
|
||||||
Promise::ok(())
|
Promise::ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,26 +57,14 @@ impl machines::Server for Machines {
|
|||||||
mut results: machines::GetMachineResults)
|
mut results: machines::GetMachineResults)
|
||||||
-> Promise<(), Error>
|
-> Promise<(), Error>
|
||||||
{
|
{
|
||||||
match params.get() {
|
if let Ok(uid) = params.get().and_then(|x| x.get_uid()) {
|
||||||
Ok(reader) => {
|
if let Some(machine_inner) = self.network.machines.get(uid) {
|
||||||
if let Ok(api_id) = reader.get_uuid() {
|
let machine = Arc::new(Machine::new(self.session.clone(), machine_inner.clone(), self.db.clone()));
|
||||||
let id = uuid_from_api(api_id);
|
let mut builder = results.get().init_machine();
|
||||||
if self.db.machine.exists(id) {
|
Machine::fill(machine, &mut builder);
|
||||||
debug!(self.session.log, "Accessing machine {}", id);
|
|
||||||
// TODO check disclose permission
|
|
||||||
|
|
||||||
let mut builder = results.get().init_machine();
|
|
||||||
|
|
||||||
let m = Machine::new(self.session.clone(), id, self.db.clone());
|
|
||||||
|
|
||||||
Machine::fill(Arc::new(m), &mut builder);
|
|
||||||
} else {
|
|
||||||
debug!(self.session.log, "Client requested nonexisting machine {}", id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Promise::ok(())
|
|
||||||
}
|
}
|
||||||
Err(e) => Promise::err(e),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Promise::ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
42
src/builtin.rs
Normal file
42
src/builtin.rs
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use crate::db::access::{
|
||||||
|
Permission,
|
||||||
|
PermissionBuf,
|
||||||
|
PermRule,
|
||||||
|
RoleIdentifier,
|
||||||
|
Role,
|
||||||
|
};
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref AUTH_PERM: &'static Permission = Permission::new("bffh.auth");
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// lazy_static! {
|
||||||
|
// pub static ref AUTH_ROLE: RoleIdentifier = {
|
||||||
|
// RoleIdentifier::Local {
|
||||||
|
// name: "mayauth".to_string(),
|
||||||
|
// source: "builtin".to_string(),
|
||||||
|
// }
|
||||||
|
// };
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// lazy_static! {
|
||||||
|
// pub static ref DEFAULT_ROLEIDS: [RoleIdentifier; 1] = {
|
||||||
|
// [ AUTH_ROLE.clone(), ]
|
||||||
|
// };
|
||||||
|
//
|
||||||
|
// pub static ref DEFAULT_ROLES: HashMap<RoleIdentifier, Role> = {
|
||||||
|
// let mut m = HashMap::new();
|
||||||
|
// m.insert(AUTH_ROLE.clone(),
|
||||||
|
// Role {
|
||||||
|
// parents: vec![],
|
||||||
|
// permissions: vec![
|
||||||
|
// PermRule::Base(PermissionBuf::from_perm(AUTH_PERM)),
|
||||||
|
// ]
|
||||||
|
// }
|
||||||
|
// );
|
||||||
|
// m
|
||||||
|
// };
|
||||||
|
// }
|
114
src/config.rs
114
src/config.rs
@ -1,37 +1,47 @@
|
|||||||
|
use std::default::Default;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use serde::{Serialize, Deserialize};
|
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::fs::File;
|
use std::fs;
|
||||||
|
|
||||||
use crate::error::Result;
|
|
||||||
|
|
||||||
use std::default::Default;
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use config::Config;
|
use serde::{Serialize, Deserialize};
|
||||||
pub use config::ConfigError;
|
|
||||||
use glob::glob;
|
|
||||||
|
|
||||||
pub fn read(path: &Path) -> Result<Settings> {
|
use crate::error::Result;
|
||||||
let mut settings = Config::default();
|
use crate::machine::MachineDescription;
|
||||||
settings
|
use crate::db::machine::MachineIdentifier;
|
||||||
.merge(config::File::from(path)).unwrap();
|
use crate::db::access::*;
|
||||||
|
|
||||||
Ok(settings.try_into()?)
|
pub fn read(path: &Path) -> Result<Config> {
|
||||||
|
serde_dhall::from_file(path)
|
||||||
|
.parse()
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[deprecated]
|
||||||
|
pub type Settings = Config;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct Settings {
|
pub struct Config {
|
||||||
pub machines: PathBuf,
|
/// A list of address/port pairs to listen on.
|
||||||
|
// TODO: This should really be a variant type; that is something that can figure out itself if
|
||||||
|
// it contains enough information to open a socket (i.e. it checks if it's a valid path (=>
|
||||||
|
// Unix socket) or IPv4/v6 address)
|
||||||
pub listens: Box<[Listen]>,
|
pub listens: Box<[Listen]>,
|
||||||
pub shelly: Option<ShellyCfg>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
/// Machine descriptions to load
|
||||||
pub struct ShellyCfg {
|
pub machines: HashMap<MachineIdentifier, MachineDescription>,
|
||||||
pub mqtt_url: String
|
|
||||||
|
/// Actors to load and their configuration options
|
||||||
|
pub actors: HashMap<String, ModuleConfig>,
|
||||||
|
|
||||||
|
/// Initiators to load and their configuration options
|
||||||
|
pub initiators: HashMap<String, ModuleConfig>,
|
||||||
|
|
||||||
|
pub mqtt_url: String,
|
||||||
|
|
||||||
|
pub actor_connections: Box<[(String, String)]>,
|
||||||
|
pub init_connections: Box<[(String, String)]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@ -40,21 +50,55 @@ pub struct Listen {
|
|||||||
pub port: Option<u16>,
|
pub port: Option<u16>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Settings {
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ModuleConfig {
|
||||||
|
pub module: String,
|
||||||
|
pub params: HashMap<String, String>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Settings {
|
let mut actors: HashMap::<String, ModuleConfig> = HashMap::new();
|
||||||
listens: Box::new([Listen {
|
let mut initiators: HashMap::<String, ModuleConfig> = HashMap::new();
|
||||||
address: "127.0.0.1".to_string(),
|
let mut machines = HashMap::new();
|
||||||
port: Some(DEFAULT_PORT)
|
|
||||||
},
|
actors.insert("Actor".to_string(), ModuleConfig {
|
||||||
|
module: "Shelly".to_string(),
|
||||||
|
params: HashMap::new(),
|
||||||
|
});
|
||||||
|
initiators.insert("Initiator".to_string(), ModuleConfig {
|
||||||
|
module: "TCP-Listen".to_string(),
|
||||||
|
params: HashMap::new(),
|
||||||
|
});
|
||||||
|
|
||||||
|
machines.insert("Testmachine".to_string(), MachineDescription {
|
||||||
|
name: "Testmachine".to_string(),
|
||||||
|
description: Some("A test machine".to_string()),
|
||||||
|
privs: PrivilegesBuf {
|
||||||
|
disclose: PermissionBuf::from_string("lab.test.read".to_string()),
|
||||||
|
read: PermissionBuf::from_string("lab.test.read".to_string()),
|
||||||
|
write: PermissionBuf::from_string("lab.test.write".to_string()),
|
||||||
|
manage: PermissionBuf::from_string("lab.test.admin".to_string()),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
Config {
|
||||||
|
listens: Box::new([
|
||||||
Listen {
|
Listen {
|
||||||
address: "::1".to_string(),
|
address: "localhost".to_string(),
|
||||||
port: Some(DEFAULT_PORT)
|
port: Some(DEFAULT_PORT),
|
||||||
}]),
|
}
|
||||||
shelly: Some(ShellyCfg {
|
]),
|
||||||
mqtt_url: "127.0.0.1:1883".to_string()
|
machines: machines,
|
||||||
}),
|
actors: actors,
|
||||||
machines: PathBuf::from("/etc/bffh/machines/")
|
initiators: initiators,
|
||||||
|
mqtt_url: "tcp://localhost:1883".to_string(),
|
||||||
|
actor_connections: Box::new([
|
||||||
|
("Testmachine".to_string(), "Actor".to_string()),
|
||||||
|
]),
|
||||||
|
init_connections: Box::new([
|
||||||
|
("Initiator".to_string(), "Testmachine".to_string()),
|
||||||
|
]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::future::Future;
|
||||||
|
use futures::FutureExt;
|
||||||
|
|
||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
|
|
||||||
@ -13,63 +15,60 @@ use capnp_rpc::{twoparty, rpc_twoparty_capnp};
|
|||||||
use crate::schema::connection_capnp;
|
use crate::schema::connection_capnp;
|
||||||
|
|
||||||
use crate::db::Databases;
|
use crate::db::Databases;
|
||||||
|
use crate::db::access::{AccessControl, Permission};
|
||||||
|
use crate::db::user::User;
|
||||||
|
use crate::builtin;
|
||||||
|
use crate::network::Network;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
/// Connection context
|
/// Connection context
|
||||||
// TODO this should track over several connections
|
// TODO this should track over several connections
|
||||||
pub struct Session {
|
pub struct Session {
|
||||||
|
// Session-spezific log
|
||||||
pub log: Logger,
|
pub log: Logger,
|
||||||
pub user: Option<auth::User>,
|
user: Option<User>,
|
||||||
|
accessdb: Arc<AccessControl>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session {
|
impl Session {
|
||||||
pub fn new(log: Logger) -> Self {
|
pub fn new(log: Logger, accessdb: Arc<AccessControl>) -> Self {
|
||||||
let user = None;
|
let user = None;
|
||||||
|
|
||||||
Session { log, user }
|
Session { log, user, accessdb }
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
async fn handshake(log: &Logger, stream: &mut TcpStream) -> Result<()> {
|
/// Check if the current session has a certain permission
|
||||||
if let Some(m) = capnp_futures::serialize::read_message(stream.clone(), Default::default()).await? {
|
pub async fn check_permission<P: AsRef<Permission>>(&self, perm: &P) -> Result<bool> {
|
||||||
let greeting = m.get_root::<connection_capnp::greeting::Reader>()?;
|
if let Some(user) = self.user.as_ref() {
|
||||||
let major = greeting.get_major();
|
self.accessdb.check(&user.data, perm).await
|
||||||
let minor = greeting.get_minor();
|
|
||||||
|
|
||||||
if major != 0 {
|
|
||||||
Err(Error::BadVersion((major, minor)))
|
|
||||||
} else {
|
} else {
|
||||||
let program = format!("{}-{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"));
|
Ok(false)
|
||||||
|
|
||||||
let mut answer = ::capnp::message::Builder::new_default();
|
|
||||||
let mut b = answer.init_root::<connection_capnp::greeting::Builder>();
|
|
||||||
b.set_program(&program);
|
|
||||||
b.set_host("localhost");
|
|
||||||
b.set_major(0);
|
|
||||||
b.set_minor(1);
|
|
||||||
capnp_futures::serialize::write_message(stream, answer).await?;
|
|
||||||
info!(log, "Handshake successful with peer {} running {}, API {}.{}",
|
|
||||||
greeting.get_host()?, greeting.get_program()?, major, minor);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_connection(log: Logger, mut stream: TcpStream, db: Databases) -> Result<()> {
|
pub struct ConnectionHandler {
|
||||||
handshake(&log, &mut stream).await?;
|
log: Logger,
|
||||||
|
db: Databases,
|
||||||
info!(log, "New connection from on {:?}", stream);
|
network: Arc<Network>,
|
||||||
let session = Arc::new(Session::new(log));
|
}
|
||||||
let boots = Bootstrap::new(session, db);
|
|
||||||
let rpc: connection_capnp::bootstrap::Client = capnp_rpc::new_client(boots);
|
impl ConnectionHandler {
|
||||||
|
pub fn new(log: Logger, db: Databases, network: Arc<Network>) -> Self {
|
||||||
let network = twoparty::VatNetwork::new(stream.clone(), stream,
|
Self { log, db, network }
|
||||||
rpc_twoparty_capnp::Side::Server, Default::default());
|
}
|
||||||
let rpc_system = capnp_rpc::RpcSystem::new(Box::new(network),
|
|
||||||
Some(rpc.client));
|
pub fn handle(&mut self, mut stream: TcpStream) -> impl Future<Output=Result<()>> {
|
||||||
|
info!(self.log, "New connection from on {:?}", stream);
|
||||||
rpc_system.await.unwrap();
|
let session = Arc::new(Session::new(self.log.new(o!()), self.db.access.clone()));
|
||||||
Ok(())
|
let boots = Bootstrap::new(session, self.db.clone(), self.network.clone());
|
||||||
|
let rpc: connection_capnp::bootstrap::Client = capnp_rpc::new_client(boots);
|
||||||
|
|
||||||
|
let network = twoparty::VatNetwork::new(stream.clone(), stream,
|
||||||
|
rpc_twoparty_capnp::Side::Server, Default::default());
|
||||||
|
let rpc_system = capnp_rpc::RpcSystem::new(Box::new(network), Some(rpc.client));
|
||||||
|
|
||||||
|
// Convert the error type to one of our errors
|
||||||
|
rpc_system.map(|r| r.map_err(Into::into))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
54
src/db.rs
54
src/db.rs
@ -1,14 +1,22 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use slog::Logger;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::config::Settings;
|
||||||
|
|
||||||
|
/// (Hashed) password database
|
||||||
|
pub mod pass;
|
||||||
|
|
||||||
|
/// User storage
|
||||||
|
pub mod user;
|
||||||
|
|
||||||
/// Access control storage
|
/// Access control storage
|
||||||
///
|
///
|
||||||
/// Stores&Retrieves Permissions and Roles
|
/// Stores&Retrieves Permissions and Roles
|
||||||
pub mod access;
|
pub mod access;
|
||||||
/// User storage
|
|
||||||
///
|
|
||||||
/// Stores&Retrieves Users
|
|
||||||
pub mod user;
|
|
||||||
|
|
||||||
/// Machine storage
|
/// Machine storage
|
||||||
///
|
///
|
||||||
@ -18,5 +26,41 @@ pub mod machine;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Databases {
|
pub struct Databases {
|
||||||
pub access: Arc<access::AccessControl>,
|
pub access: Arc<access::AccessControl>,
|
||||||
pub machine: Arc<machine::MachineDB>,
|
pub machine: Arc<machine::internal::Internal>,
|
||||||
|
pub passdb: Arc<pass::PassDB>,
|
||||||
|
pub userdb: Arc<user::Internal>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const LMDB_MAX_DB: u32 = 16;
|
||||||
|
|
||||||
|
impl Databases {
|
||||||
|
pub fn new(log: &Logger, config: &Settings) -> Result<Self> {
|
||||||
|
|
||||||
|
// Initialize the LMDB environment. This blocks until the mmap() finishes
|
||||||
|
info!(log, "LMDB env");
|
||||||
|
let env = lmdb::Environment::new()
|
||||||
|
.set_flags(lmdb::EnvironmentFlags::MAP_ASYNC | lmdb::EnvironmentFlags::NO_SUB_DIR)
|
||||||
|
.set_max_dbs(LMDB_MAX_DB as libc::c_uint)
|
||||||
|
.open(&PathBuf::from_str("/tmp/a.db").unwrap())?;
|
||||||
|
|
||||||
|
// Start loading the machine database, authentication system and permission system
|
||||||
|
// All of those get a custom logger so the source of a log message can be better traced and
|
||||||
|
// filtered
|
||||||
|
let env = Arc::new(env);
|
||||||
|
let mdb = machine::init(log.new(o!("system" => "machines")), &config, env.clone())?;
|
||||||
|
|
||||||
|
let permdb = access::init(log.new(o!("system" => "permissions")), &config, env.clone())?;
|
||||||
|
let mut ac = access::AccessControl::new(permdb);
|
||||||
|
|
||||||
|
let passdb = pass::PassDB::init(log.new(o!("system" => "passwords")), env.clone()).unwrap();
|
||||||
|
|
||||||
|
let userdb = user::init(log.new(o!("system" => "users")), &config, env.clone())?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
access: Arc::new(ac),
|
||||||
|
passdb: Arc::new(passdb),
|
||||||
|
machine: Arc::new(mdb),
|
||||||
|
userdb: Arc::new(userdb),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,16 +29,18 @@ use crate::error::Result;
|
|||||||
|
|
||||||
pub mod internal;
|
pub mod internal;
|
||||||
|
|
||||||
use crate::db::user::User;
|
use crate::db::user::UserData;
|
||||||
pub use internal::init;
|
pub use internal::{init, Internal};
|
||||||
|
|
||||||
pub struct AccessControl {
|
pub struct AccessControl {
|
||||||
|
pub internal: Internal,
|
||||||
sources: HashMap<String, Box<dyn RoleDB>>,
|
sources: HashMap<String, Box<dyn RoleDB>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AccessControl {
|
impl AccessControl {
|
||||||
pub fn new() -> Self {
|
pub fn new(internal: Internal) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
internal: internal,
|
||||||
sources: HashMap::new()
|
sources: HashMap::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -49,25 +51,52 @@ impl AccessControl {
|
|||||||
self.sources.insert(name, source);
|
self.sources.insert(name, source);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn check<P: AsRef<Permission>>(&self, user: &User, perm: &P) -> Result<bool> {
|
pub async fn check<P: AsRef<Permission>>(&self, user: &UserData, perm: &P) -> Result<bool> {
|
||||||
for v in self.sources.values() {
|
for v in self.sources.values() {
|
||||||
if v.check(user, perm.as_ref())? {
|
if v.check(user, perm.as_ref())? {
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if self.internal.check(user, perm.as_ref())? {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn check_roles<P: AsRef<Permission>>(&self, roles: &[RoleIdentifier], perm: &P)
|
||||||
|
-> Result<bool>
|
||||||
|
{
|
||||||
|
for v in self.sources.values() {
|
||||||
|
if v.check_roles(roles, perm.as_ref())? {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for AccessControl {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
let mut b = f.debug_struct("AccessControl");
|
||||||
|
for (name, roledb) in self.sources.iter() {
|
||||||
|
b.field(name, &roledb.get_type_name().to_string());
|
||||||
|
}
|
||||||
|
b.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait RoleDB {
|
pub trait RoleDB {
|
||||||
|
fn get_type_name(&self) -> &'static str;
|
||||||
|
|
||||||
fn get_role(&self, roleID: &RoleIdentifier) -> Result<Option<Role>>;
|
fn get_role(&self, roleID: &RoleIdentifier) -> Result<Option<Role>>;
|
||||||
|
|
||||||
/// Check if a given user has the given permission
|
/// Check if a given user has the given permission
|
||||||
///
|
///
|
||||||
/// Default implementation which adapter may overwrite with more efficient specialized
|
/// Default implementation which adapter may overwrite with more efficient specialized
|
||||||
/// implementations.
|
/// implementations.
|
||||||
fn check(&self, user: &User, perm: &Permission) -> Result<bool> {
|
fn check(&self, user: &UserData, perm: &Permission) -> Result<bool> {
|
||||||
self.check_roles(&user.roles, perm)
|
self.check_roles(&user.roles, perm)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,8 +159,6 @@ pub trait RoleDB {
|
|||||||
/// assign to all users.
|
/// assign to all users.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
pub struct Role {
|
pub struct Role {
|
||||||
name: String,
|
|
||||||
|
|
||||||
// If a role doesn't define parents, default to an empty Vec.
|
// If a role doesn't define parents, default to an empty Vec.
|
||||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||||
/// A Role can have parents, inheriting all permissions
|
/// A Role can have parents, inheriting all permissions
|
||||||
@ -328,6 +355,10 @@ impl PermissionBuf {
|
|||||||
Self { inner }
|
Self { inner }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn from_perm(perm: &Permission) -> Self {
|
||||||
|
Self { inner: perm.inner.to_string() }
|
||||||
|
}
|
||||||
|
|
||||||
pub fn into_string(self) -> String {
|
pub fn into_string(self) -> String {
|
||||||
self.inner
|
self.inner
|
||||||
}
|
}
|
||||||
@ -501,7 +532,7 @@ impl TryFrom<String> for PermRule {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test_DISABLED)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
@ -17,24 +17,23 @@ use crate::config::Settings;
|
|||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
use crate::db::access::{Permission, Role, RoleIdentifier, RoleDB};
|
use crate::db::access::{Permission, Role, RoleIdentifier, RoleDB};
|
||||||
use crate::db::user::{UserIdentifier, User};
|
use crate::db::user::{User, UserData};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Internal {
|
pub struct Internal {
|
||||||
log: Logger,
|
log: Logger,
|
||||||
env: Arc<Environment>,
|
env: Arc<Environment>,
|
||||||
roledb: lmdb::Database,
|
roledb: lmdb::Database,
|
||||||
userdb: lmdb::Database,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Internal {
|
impl Internal {
|
||||||
pub fn new(log: Logger, env: Arc<Environment>, roledb: lmdb::Database, userdb: lmdb::Database) -> Self {
|
pub fn new(log: Logger, env: Arc<Environment>, roledb: lmdb::Database) -> Self {
|
||||||
Self { log, env, roledb, userdb }
|
Self { log, env, roledb, }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if a given user has the given permission
|
/// Check if a given user has the given permission
|
||||||
#[allow(unused)]
|
#[allow(unused)]
|
||||||
pub fn _check<T: Transaction, P: AsRef<Permission>>(&self, txn: &T, user: &User, perm: &P)
|
pub fn _check<T: Transaction, P: AsRef<Permission>>(&self, txn: &T, user: &UserData, perm: &P)
|
||||||
-> Result<bool>
|
-> Result<bool>
|
||||||
{
|
{
|
||||||
// Tally all roles. Makes dependent roles easier
|
// Tally all roles. Makes dependent roles easier
|
||||||
@ -117,40 +116,29 @@ impl Internal {
|
|||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_db(&mut self, txn: &mut RwTransaction, mut path: PathBuf) -> Result<()> {
|
pub fn load_roles<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||||
path.push("roles");
|
let mut txn = self.env.begin_rw_txn()?;
|
||||||
if !path.is_dir() {
|
self.load_roles_txn(&mut txn, path.as_ref())
|
||||||
error!(self.log, "Given load directory is malformed, no 'roles' subdir, not loading roles!");
|
|
||||||
} else {
|
|
||||||
self.load_roles(txn, path.as_path())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
fn load_roles_txn(&self, txn: &mut RwTransaction, path: &Path) -> Result<()> {
|
||||||
|
let roles = Role::load_file(path)?;
|
||||||
|
|
||||||
fn load_roles(&mut self, txn: &mut RwTransaction, path: &Path) -> Result<()> {
|
for (k,v) in roles.iter() {
|
||||||
if path.is_file() {
|
self.put_role(txn, k, v.clone())?;
|
||||||
let roles = Role::load_file(path)?;
|
|
||||||
|
|
||||||
for (k,v) in roles.iter() {
|
|
||||||
self.put_role(txn, k, v.clone())?;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for entry in std::fs::read_dir(path)? {
|
|
||||||
let roles = Role::load_file(entry?.path())?;
|
|
||||||
|
|
||||||
for (k,v) in roles.iter() {
|
|
||||||
self.put_role(txn, k, v.clone())?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
debug!(self.log, "Loaded roles: {:?}", roles);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RoleDB for Internal {
|
impl RoleDB for Internal {
|
||||||
fn check(&self, user: &User, perm: &Permission) -> Result<bool> {
|
fn get_type_name(&self) -> &'static str {
|
||||||
|
"Internal"
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check(&self, user: &UserData, perm: &Permission) -> Result<bool> {
|
||||||
let txn = self.env.begin_ro_txn()?;
|
let txn = self.env.begin_ro_txn()?;
|
||||||
self._check(&txn, user, &perm)
|
self._check(&txn, user, &perm)
|
||||||
}
|
}
|
||||||
@ -178,9 +166,6 @@ pub fn init(log: Logger, config: &Settings, env: Arc<lmdb::Environment>)
|
|||||||
debug!(&log, "Opened access database '{}' successfully.", "role");
|
debug!(&log, "Opened access database '{}' successfully.", "role");
|
||||||
//let permdb = env.create_db(Some("perm"), flags)?;
|
//let permdb = env.create_db(Some("perm"), flags)?;
|
||||||
//debug!(&log, "Opened access database '{}' successfully.", "perm");
|
//debug!(&log, "Opened access database '{}' successfully.", "perm");
|
||||||
let userdb = env.create_db(Some("user"), flags)?;
|
|
||||||
debug!(&log, "Opened access database '{}' successfully.", "user");
|
|
||||||
info!(&log, "Opened all access databases");
|
|
||||||
|
|
||||||
Ok(Internal::new(log, env, roledb, userdb))
|
Ok(Internal::new(log, env, roledb))
|
||||||
}
|
}
|
||||||
|
@ -16,8 +16,6 @@ use crate::error::Result;
|
|||||||
use crate::config::Settings;
|
use crate::config::Settings;
|
||||||
use crate::db::access;
|
use crate::db::access;
|
||||||
|
|
||||||
use crate::db::user::UserIdentifier;
|
|
||||||
|
|
||||||
use capnp::Error;
|
use capnp::Error;
|
||||||
|
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@ -29,15 +27,15 @@ use smol::channel::{Receiver, Sender};
|
|||||||
use futures::{Future, Stream, StreamExt};
|
use futures::{Future, Stream, StreamExt};
|
||||||
use futures_signals::signal::*;
|
use futures_signals::signal::*;
|
||||||
|
|
||||||
use crate::registries::StatusSignal;
|
|
||||||
use crate::db::user::User;
|
|
||||||
|
|
||||||
use crate::machine::MachineDescription;
|
use crate::machine::MachineDescription;
|
||||||
|
|
||||||
|
use crate::db::user::UserId;
|
||||||
|
|
||||||
pub mod internal;
|
pub mod internal;
|
||||||
use internal::Internal;
|
use internal::Internal;
|
||||||
|
|
||||||
pub type MachineIdentifier = Uuid;
|
pub type MachineIdentifier = String;
|
||||||
|
pub type Priority = u64;
|
||||||
|
|
||||||
/// Status of a Machine
|
/// Status of a Machine
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||||
@ -45,15 +43,15 @@ pub enum Status {
|
|||||||
/// Not currently used by anybody
|
/// Not currently used by anybody
|
||||||
Free,
|
Free,
|
||||||
/// Used by somebody
|
/// Used by somebody
|
||||||
InUse(UserIdentifier),
|
InUse(UserId, Priority),
|
||||||
/// Was used by somebody and now needs to be checked for cleanliness
|
/// Was used by somebody and now needs to be checked for cleanliness
|
||||||
ToCheck(UserIdentifier),
|
ToCheck(UserId, Priority),
|
||||||
/// Not used by anybody but also can not be used. E.g. down for maintenance
|
/// Not used by anybody but also can not be used. E.g. down for maintenance
|
||||||
Blocked(UserIdentifier),
|
Blocked(UserId, Priority),
|
||||||
/// Disabled for some other reason
|
/// Disabled for some other reason
|
||||||
Disabled,
|
Disabled,
|
||||||
/// Reserved
|
/// Reserved
|
||||||
Reserved(UserIdentifier),
|
Reserved(UserId, Priority),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn uuid_from_api(uuid: crate::schema::api_capnp::u_u_i_d::Reader) -> Uuid {
|
pub fn uuid_from_api(uuid: crate::schema::api_capnp::u_u_i_d::Reader) -> Uuid {
|
||||||
@ -76,8 +74,37 @@ pub struct MachineState {
|
|||||||
pub state: Status,
|
pub state: Status,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl MachineState {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self { state: Status::Free }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn free() -> Self {
|
||||||
|
Self { state: Status::Free }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn used(uid: UserId, priority: Priority) -> Self {
|
||||||
|
Self { state: Status::InUse(uid, priority) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the given priority is higher than one's own.
|
||||||
|
///
|
||||||
|
/// If `self` does not have a priority then this function always returns `true`
|
||||||
|
pub fn is_higher_priority(&self, priority: u64) -> bool {
|
||||||
|
match self.state {
|
||||||
|
Status::Disabled | Status::Free => { true },
|
||||||
|
Status::Blocked(_, self_prio) |
|
||||||
|
Status::InUse(_, self_prio) |
|
||||||
|
Status::ToCheck(_, self_prio) |
|
||||||
|
Status::Reserved(_, self_prio) =>
|
||||||
|
{
|
||||||
|
priority > self_prio
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn init(log: Logger, config: &Settings, env: Arc<lmdb::Environment>) -> Result<Internal> {
|
pub fn init(log: Logger, config: &Settings, env: Arc<lmdb::Environment>) -> Result<Internal> {
|
||||||
let mut machine_descriptions = MachineDescription::load_file(&config.machines)?;
|
|
||||||
let mut flags = lmdb::DatabaseFlags::empty();
|
let mut flags = lmdb::DatabaseFlags::empty();
|
||||||
flags.set(lmdb::DatabaseFlags::INTEGER_KEY, true);
|
flags.set(lmdb::DatabaseFlags::INTEGER_KEY, true);
|
||||||
let machdb = env.create_db(Some("machines"), flags)?;
|
let machdb = env.create_db(Some("machines"), flags)?;
|
||||||
@ -85,30 +112,3 @@ pub fn init(log: Logger, config: &Settings, env: Arc<lmdb::Environment>) -> Resu
|
|||||||
|
|
||||||
Ok(Internal::new(log, env, machdb))
|
Ok(Internal::new(log, env, machdb))
|
||||||
}
|
}
|
||||||
|
|
||||||
type MachMap = HashMap<MachineIdentifier, MachineDescription>;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct MachineDB {
|
|
||||||
state_db: Internal,
|
|
||||||
def_db: MachMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MachineDB {
|
|
||||||
pub fn new(state_db: Internal, def_db: MachMap) -> Self {
|
|
||||||
Self { state_db, def_db }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn exists(&self, id: MachineIdentifier) -> bool {
|
|
||||||
self.def_db.get(&id).is_some()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_desc(&self, id: &MachineIdentifier) -> Option<&MachineDescription> {
|
|
||||||
self.def_db.get(&id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_state(&self, id: &MachineIdentifier) -> Option<MachineState> {
|
|
||||||
// TODO: Error Handling
|
|
||||||
self.state_db.get(id).unwrap_or(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -30,10 +30,10 @@ impl Internal {
|
|||||||
Self { log, env, db }
|
Self { log, env, db }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_with_txn<T: Transaction>(&self, txn: &T, uuid: &Uuid)
|
pub fn get_with_txn<T: Transaction>(&self, txn: &T, id: &String)
|
||||||
-> Result<Option<MachineState>>
|
-> Result<Option<MachineState>>
|
||||||
{
|
{
|
||||||
match txn.get(self.db, uuid.as_bytes()) {
|
match txn.get(self.db, &id.as_bytes()) {
|
||||||
Ok(bytes) => {
|
Ok(bytes) => {
|
||||||
let mut machine: MachineState = flexbuffers::from_slice(bytes)?;
|
let mut machine: MachineState = flexbuffers::from_slice(bytes)?;
|
||||||
Ok(Some(machine))
|
Ok(Some(machine))
|
||||||
@ -48,20 +48,25 @@ impl Internal {
|
|||||||
self.get_with_txn(&txn, id)
|
self.get_with_txn(&txn, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_with_txn(&self, txn: &mut RwTransaction, uuid: &Uuid, status: MachineState)
|
pub fn put_with_txn(&self, txn: &mut RwTransaction, uuid: &String, status: &MachineState)
|
||||||
-> Result<()>
|
-> Result<()>
|
||||||
{
|
{
|
||||||
let bytes = flexbuffers::to_vec(status)?;
|
let bytes = flexbuffers::to_vec(status)?;
|
||||||
txn.put(self.db, uuid.as_bytes(), &bytes, lmdb::WriteFlags::empty())?;
|
txn.put(self.db, &uuid.as_bytes(), &bytes, lmdb::WriteFlags::empty())?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn put(&self, id: &MachineIdentifier, status: &MachineState) -> Result<()> {
|
||||||
|
let mut txn = self.env.begin_rw_txn()?;
|
||||||
|
self.put_with_txn(&mut txn, id, status)?;
|
||||||
|
txn.commit().map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn iter<T: Transaction>(&self, txn: &T) -> Result<impl Iterator<Item=MachineState>> {
|
pub fn iter<T: Transaction>(&self, txn: &T) -> Result<impl Iterator<Item=MachineState>> {
|
||||||
let mut cursor = txn.open_ro_cursor(self.db)?;
|
let mut cursor = txn.open_ro_cursor(self.db)?;
|
||||||
Ok(cursor.iter_start().map(|buf| {
|
Ok(cursor.iter_start().map(|buf| {
|
||||||
let (kbuf, vbuf) = buf.unwrap();
|
let (kbuf, vbuf) = buf.unwrap();
|
||||||
let machID = uuid::Uuid::from_slice(kbuf).unwrap();
|
|
||||||
flexbuffers::from_slice(vbuf).unwrap()
|
flexbuffers::from_slice(vbuf).unwrap()
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
79
src/db/pass.rs
Normal file
79
src/db/pass.rs
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fs;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use argon2;
|
||||||
|
use lmdb::{Environment, Transaction, RwTransaction, Cursor};
|
||||||
|
use rand::prelude::*;
|
||||||
|
use slog::Logger;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
|
pub struct PassDB {
|
||||||
|
log: Logger,
|
||||||
|
env: Arc<Environment>,
|
||||||
|
db: lmdb::Database,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PassDB {
|
||||||
|
pub fn new(log: Logger, env: Arc<Environment>, db: lmdb::Database) -> Self {
|
||||||
|
Self { log, env, db }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init(log: Logger, env: Arc<Environment>) -> Result<Self> {
|
||||||
|
let mut flags = lmdb::DatabaseFlags::empty();
|
||||||
|
flags.set(lmdb::DatabaseFlags::INTEGER_KEY, true);
|
||||||
|
let db = env.create_db(Some("pass"), flags)?;
|
||||||
|
|
||||||
|
Ok(Self::new(log, env, db))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check a password for a given authcid.
|
||||||
|
///
|
||||||
|
/// `Ok(None)` means the given authcid is not stored in the database
|
||||||
|
pub fn check_with_txn<T: Transaction>(&self, txn: &T, authcid: &str, password: &[u8]) -> Result<Option<bool>> {
|
||||||
|
match txn.get(self.db, &authcid.as_bytes()) {
|
||||||
|
Ok(bytes) => {
|
||||||
|
let encoded = unsafe { std::str::from_utf8_unchecked(bytes) };
|
||||||
|
let res = argon2::verify_encoded(encoded, password)?;
|
||||||
|
Ok(Some(res))
|
||||||
|
},
|
||||||
|
Err(lmdb::Error::NotFound) => { Ok(None) },
|
||||||
|
Err(e) => { Err(e.into()) },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn check(&self, authcid: &str, password: &[u8]) -> Result<Option<bool>> {
|
||||||
|
let txn = self.env.begin_ro_txn()?;
|
||||||
|
self.check_with_txn(&txn, authcid, password)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a password for a given authcid, potentially overwriting an existing password
|
||||||
|
pub fn store_with_txn(&self, txn: &mut RwTransaction, authcid: &str, password: &[u8]) -> Result<()> {
|
||||||
|
let config = argon2::Config::default();
|
||||||
|
let salt: [u8; 16] = rand::random();
|
||||||
|
let hash = argon2::hash_encoded(password, &salt, &config)?;
|
||||||
|
txn.put(self.db, &authcid.as_bytes(), &hash.as_bytes(), lmdb::WriteFlags::empty())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert_multiple(&self, vec: Vec<(String, String)>) -> Result<()> {
|
||||||
|
let mut txn = self.env.begin_rw_txn()?;
|
||||||
|
for (authcid, password) in vec.iter() {
|
||||||
|
self.store_with_txn(&mut txn, authcid.as_ref(), password.as_bytes())?;
|
||||||
|
}
|
||||||
|
txn.commit()?;
|
||||||
|
|
||||||
|
let v: Vec<&String> = vec.iter().map(|(a,_)| a).collect();
|
||||||
|
debug!(self.log, "Loaded passwords for: {:?}", v);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_file<P: AsRef<Path>>(&self, path: P) -> Result<()> {
|
||||||
|
let f = fs::read(path)?;
|
||||||
|
let mut map: HashMap<String, String> = toml::from_slice(&f)?;
|
||||||
|
|
||||||
|
self.insert_multiple(map.drain().collect())
|
||||||
|
}
|
||||||
|
}
|
150
src/db/user.rs
150
src/db/user.rs
@ -1,63 +1,137 @@
|
|||||||
|
//! UserDB does two kinds of lookups:
|
||||||
|
//! 1. "I have this here username, what user is that"
|
||||||
|
//! 2. "I have this here user, what are their roles (and other associated data)"
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::{Serialize, Deserialize};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
use std::fs;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::iter::FromIterator;
|
||||||
|
use std::path::Path;
|
||||||
use crate::db::access::RoleIdentifier;
|
use crate::db::access::RoleIdentifier;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
/// A Person, from the Authorization perspective
|
use slog::Logger;
|
||||||
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
|
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::config::Config;
|
||||||
|
|
||||||
|
mod internal;
|
||||||
|
pub use internal::Internal;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
/// An user
|
||||||
pub struct User {
|
pub struct User {
|
||||||
/// The identification of this user.
|
/// The precise (and unique) identifier of this user
|
||||||
pub id: UserIdentifier,
|
pub id: UserId,
|
||||||
|
/// Data BFFH stores on this user to base decisions on
|
||||||
/// A Person has N ≥ 0 roles.
|
pub data: UserData,
|
||||||
/// Persons are only ever given roles, not permissions directly
|
|
||||||
pub roles: Vec<RoleIdentifier>,
|
|
||||||
|
|
||||||
/// Additional data storage
|
|
||||||
#[serde(flatten)]
|
|
||||||
kv: HashMap<Box<[u8]>, Box<[u8]>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl User {
|
||||||
/// Locally unique identifier for an user
|
pub fn new(id: UserId, data: UserData) -> Self {
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
Self { id, data }
|
||||||
pub struct UserIdentifier {
|
|
||||||
/// Main UID. Must be unique in this instance so that the tuple (uid, location) is globally
|
|
||||||
/// unique.
|
|
||||||
uid: String,
|
|
||||||
/// Subordinate ID. Must be unique for this user, i.e. the tuple (uid, subuid) must be unique
|
|
||||||
/// but two different uids can have the same subuid. `None` means no subuid is set and the ID
|
|
||||||
/// refers to the main users
|
|
||||||
subuid: Option<String>,
|
|
||||||
/// Location of the instance the user comes from. `None` means the local instance.
|
|
||||||
location: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UserIdentifier {
|
|
||||||
pub fn new(uid: String, subuid: Option<String>, location: Option<String>) -> Self {
|
|
||||||
Self { uid, subuid, location }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for UserIdentifier {
|
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||||
|
/// Authorization Identity
|
||||||
|
///
|
||||||
|
/// This identity is internal to FabAccess and completely independent from the authentication
|
||||||
|
/// method or source
|
||||||
|
pub struct UserId {
|
||||||
|
/// Main User ID. Generally an user name or similar. Locally unique
|
||||||
|
uid: String,
|
||||||
|
/// Sub user ID.
|
||||||
|
///
|
||||||
|
/// Can change scopes for permissions, e.g. having a +admin account with more permissions than
|
||||||
|
/// the default account and +dashboard et.al. accounts that have restricted permissions for
|
||||||
|
/// their applications
|
||||||
|
subuid: Option<String>,
|
||||||
|
/// Realm this account originates.
|
||||||
|
///
|
||||||
|
/// The Realm is usually described by a domain name but local policy may dictate an unrelated
|
||||||
|
/// mapping
|
||||||
|
realm: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserId {
|
||||||
|
pub fn new(uid: String, subuid: Option<String>, realm: Option<String>) -> Self {
|
||||||
|
Self { uid, subuid, realm }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for UserId {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
let r = write!(f, "{}", self.uid);
|
let r = write!(f, "{}", self.uid);
|
||||||
if let Some(ref s) = self.subuid {
|
if let Some(ref s) = self.subuid {
|
||||||
write!(f, "+{}", s)?;
|
write!(f, "+{}", s)?;
|
||||||
}
|
}
|
||||||
if let Some(ref l) = self.location {
|
if let Some(ref l) = self.realm {
|
||||||
write!(f, "@{}", l)?;
|
write!(f, "@{}", l)?;
|
||||||
}
|
}
|
||||||
r
|
r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// User Database Trait
|
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
|
||||||
pub trait UserDB {
|
/// Data on an user to base decisions on
|
||||||
fn get_user(&self, uid: UserIdentifier) -> Option<User>;
|
///
|
||||||
|
/// This of course includes authorization data, i.e. that users set roles
|
||||||
|
pub struct UserData {
|
||||||
|
/// A Person has N ≥ 0 roles.
|
||||||
|
/// Persons are only ever given roles, not permissions directly
|
||||||
|
pub roles: Vec<RoleIdentifier>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "is_zero")]
|
||||||
|
#[serde(default = "default_priority")]
|
||||||
|
/// A priority number, defaulting to 0.
|
||||||
|
///
|
||||||
|
/// The higher, the higher the priority. Higher priority users overwrite lower priority ones.
|
||||||
|
pub priority: u64,
|
||||||
|
|
||||||
|
/// Additional data storage
|
||||||
|
#[serde(flatten, skip_serializing_if = "HashMap::is_empty")]
|
||||||
|
kv: HashMap<String, String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
impl UserData {
|
||||||
|
pub fn new(roles: Vec<RoleIdentifier>, priority: u64) -> Self {
|
||||||
|
Self {
|
||||||
|
roles: roles,
|
||||||
|
priority: priority,
|
||||||
|
kv: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_zero(i: &u64) -> bool {
|
||||||
|
*i == 0
|
||||||
|
}
|
||||||
|
const fn default_priority() -> u64 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_file<P: AsRef<Path>>(path: P) -> Result<HashMap<String, User>> {
|
||||||
|
let f = fs::read(path)?;
|
||||||
|
let mut map: HashMap<String, UserData> = toml::from_slice(&f)?;
|
||||||
|
|
||||||
|
Ok(HashMap::from_iter(map.drain().map(|(uid, user_data)|
|
||||||
|
( uid.clone()
|
||||||
|
, User::new(UserId::new(uid, None, None), user_data)
|
||||||
|
)
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init(log: Logger, config: &Config, env: Arc<lmdb::Environment>) -> Result<Internal> {
|
||||||
|
let mut flags = lmdb::DatabaseFlags::empty();
|
||||||
|
flags.set(lmdb::DatabaseFlags::INTEGER_KEY, true);
|
||||||
|
let db = env.create_db(Some("users"), flags)?;
|
||||||
|
debug!(&log, "Opened user db successfully.");
|
||||||
|
|
||||||
|
Ok(Internal::new(log, env, db))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test_DISABLED)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
@ -65,7 +139,7 @@ mod tests {
|
|||||||
fn format_uid_test() {
|
fn format_uid_test() {
|
||||||
let uid = "testuser".to_string();
|
let uid = "testuser".to_string();
|
||||||
let suid = "testsuid".to_string();
|
let suid = "testsuid".to_string();
|
||||||
let location = "testloc".to_string();
|
let realm = "testloc".to_string();
|
||||||
|
|
||||||
assert_eq!("testuser",
|
assert_eq!("testuser",
|
||||||
format!("{}", UserIdentifier::new(uid.clone(), None, None)));
|
format!("{}", UserIdentifier::new(uid.clone(), None, None)));
|
||||||
@ -74,6 +148,6 @@ mod tests {
|
|||||||
assert_eq!("testuser+testsuid",
|
assert_eq!("testuser+testsuid",
|
||||||
format!("{}", UserIdentifier::new(uid.clone(), Some(suid.clone()), None)));
|
format!("{}", UserIdentifier::new(uid.clone(), Some(suid.clone()), None)));
|
||||||
assert_eq!("testuser+testsuid@testloc",
|
assert_eq!("testuser+testsuid@testloc",
|
||||||
format!("{}", UserIdentifier::new(uid, Some(suid), Some(location))));
|
format!("{}", UserIdentifier::new(uid, Some(suid), Some(realm))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
49
src/db/user/internal.rs
Normal file
49
src/db/user/internal.rs
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use slog::Logger;
|
||||||
|
use lmdb::{Environment, Transaction, RwTransaction, Cursor};
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Internal {
|
||||||
|
log: Logger,
|
||||||
|
env: Arc<Environment>,
|
||||||
|
db: lmdb::Database,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Internal {
|
||||||
|
pub fn new(log: Logger, env: Arc<Environment>, db: lmdb::Database) -> Self {
|
||||||
|
Self { log, env, db }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_user_txn<T: Transaction>(&self, txn: &T, uid: &str) -> Result<Option<User>> {
|
||||||
|
match txn.get(self.db, &uid.as_bytes()) {
|
||||||
|
Ok(bytes) => {
|
||||||
|
Ok(Some(flexbuffers::from_slice(bytes)?))
|
||||||
|
},
|
||||||
|
Err(lmdb::Error::NotFound) => Ok(None),
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn get_user(&self, uid: &str) -> Result<Option<User>> {
|
||||||
|
let txn = self.env.begin_ro_txn()?;
|
||||||
|
self.get_user_txn(&txn, uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn put_user_txn(&self, txn: &mut RwTransaction, uid: &str, user: &User) -> Result<()> {
|
||||||
|
let bytes = flexbuffers::to_vec(user)?;
|
||||||
|
txn.put(self.db, &uid.as_bytes(), &bytes, lmdb::WriteFlags::empty())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn put_user(&self, uid: &str, user: &User) -> Result<()> {
|
||||||
|
let mut txn = self.env.begin_rw_txn()?;
|
||||||
|
self.put_user_txn(&mut txn, uid, user)?;
|
||||||
|
txn.commit()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
47
src/error.rs
47
src/error.rs
@ -1,18 +1,22 @@
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use toml;
|
use toml;
|
||||||
|
use serde_dhall;
|
||||||
|
|
||||||
use rsasl::SaslError;
|
use rsasl::SaslError;
|
||||||
|
|
||||||
// SpawnError is a somewhat ambigous name, `use as` to make it futures::SpawnError instead.
|
// SpawnError is a somewhat ambigous name, `use as` to make it futures::SpawnError instead.
|
||||||
use futures::task as futures;
|
use futures::task as futures_task;
|
||||||
|
|
||||||
use paho_mqtt::errors as mqtt;
|
use paho_mqtt::errors as mqtt;
|
||||||
|
|
||||||
|
use crate::network;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
TomlDe(toml::de::Error),
|
TomlDe(toml::de::Error),
|
||||||
TomlSer(toml::ser::Error),
|
TomlSer(toml::ser::Error),
|
||||||
|
Dhall(serde_dhall::Error),
|
||||||
SASL(SaslError),
|
SASL(SaslError),
|
||||||
IO(io::Error),
|
IO(io::Error),
|
||||||
Boxed(Box<dyn std::error::Error>),
|
Boxed(Box<dyn std::error::Error>),
|
||||||
@ -20,10 +24,12 @@ pub enum Error {
|
|||||||
LMDB(lmdb::Error),
|
LMDB(lmdb::Error),
|
||||||
FlexbuffersDe(flexbuffers::DeserializationError),
|
FlexbuffersDe(flexbuffers::DeserializationError),
|
||||||
FlexbuffersSer(flexbuffers::SerializationError),
|
FlexbuffersSer(flexbuffers::SerializationError),
|
||||||
FuturesSpawn(futures::SpawnError),
|
FuturesSpawn(futures_task::SpawnError),
|
||||||
MQTT(mqtt::Error),
|
MQTT(mqtt::Error),
|
||||||
Config(config::ConfigError),
|
|
||||||
BadVersion((u32,u32)),
|
BadVersion((u32,u32)),
|
||||||
|
Argon2(argon2::Error),
|
||||||
|
EventNetwork(network::Error),
|
||||||
|
Denied,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
@ -35,6 +41,9 @@ impl fmt::Display for Error {
|
|||||||
Error::TomlSer(e) => {
|
Error::TomlSer(e) => {
|
||||||
write!(f, "TOML Serialization error: {}", e)
|
write!(f, "TOML Serialization error: {}", e)
|
||||||
},
|
},
|
||||||
|
Error::Dhall(e) => {
|
||||||
|
write!(f, "Dhall coding error: {}", e)
|
||||||
|
},
|
||||||
Error::SASL(e) => {
|
Error::SASL(e) => {
|
||||||
write!(f, "SASL Error: {}", e)
|
write!(f, "SASL Error: {}", e)
|
||||||
},
|
},
|
||||||
@ -62,12 +71,18 @@ impl fmt::Display for Error {
|
|||||||
Error::MQTT(e) => {
|
Error::MQTT(e) => {
|
||||||
write!(f, "Paho MQTT encountered an error: {}", e)
|
write!(f, "Paho MQTT encountered an error: {}", e)
|
||||||
},
|
},
|
||||||
Error::Config(e) => {
|
Error::Argon2(e) => {
|
||||||
write!(f, "Failed to parse config: {}", e)
|
write!(f, "Argon2 en/decoding failure: {}", e)
|
||||||
}
|
}
|
||||||
Error::BadVersion((major,minor)) => {
|
Error::BadVersion((major,minor)) => {
|
||||||
write!(f, "Peer uses API version {}.{} which is incompatible!", major, minor)
|
write!(f, "Peer uses API version {}.{} which is incompatible!", major, minor)
|
||||||
}
|
}
|
||||||
|
Error::Denied => {
|
||||||
|
write!(f, "You do not have the permission required to do that.")
|
||||||
|
}
|
||||||
|
Error::EventNetwork(e) => {
|
||||||
|
e.fmt(f)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,6 +111,12 @@ impl From<toml::ser::Error> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<serde_dhall::Error> for Error {
|
||||||
|
fn from(e: serde_dhall::Error) -> Error {
|
||||||
|
Error::Dhall(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<Box<dyn std::error::Error>> for Error {
|
impl From<Box<dyn std::error::Error>> for Error {
|
||||||
fn from(e: Box<dyn std::error::Error>) -> Error {
|
fn from(e: Box<dyn std::error::Error>) -> Error {
|
||||||
Error::Boxed(e)
|
Error::Boxed(e)
|
||||||
@ -126,8 +147,8 @@ impl From<flexbuffers::SerializationError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<futures::SpawnError> for Error {
|
impl From<futures_task::SpawnError> for Error {
|
||||||
fn from(e: futures::SpawnError) -> Error {
|
fn from(e: futures_task::SpawnError) -> Error {
|
||||||
Error::FuturesSpawn(e)
|
Error::FuturesSpawn(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -138,9 +159,15 @@ impl From<mqtt::Error> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<config::ConfigError> for Error {
|
impl From<network::Error> for Error {
|
||||||
fn from(e: config::ConfigError) -> Error {
|
fn from(e: network::Error) -> Error {
|
||||||
Error::Config(e)
|
Error::EventNetwork(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<argon2::Error> for Error {
|
||||||
|
fn from(e: argon2::Error) -> Error {
|
||||||
|
Error::Argon2(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
169
src/initiator.rs
Normal file
169
src/initiator.rs
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Poll, Context};
|
||||||
|
use std::future::Future;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use smol::{Task, Timer};
|
||||||
|
|
||||||
|
use slog::Logger;
|
||||||
|
|
||||||
|
use paho_mqtt::AsyncClient;
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use futures::future::BoxFuture;
|
||||||
|
|
||||||
|
use genawaiter::{sync::{Gen, GenBoxed, Co}, GeneratorState};
|
||||||
|
|
||||||
|
use futures_signals::signal::{Signal, Mutable, MutableSignalCloned};
|
||||||
|
use crate::machine::{Machine, ReturnToken};
|
||||||
|
use crate::db::machine::MachineState;
|
||||||
|
use crate::db::user::{User, UserId, UserData};
|
||||||
|
|
||||||
|
use crate::network::InitMap;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::config::Config;
|
||||||
|
|
||||||
|
pub trait Sensor {
|
||||||
|
fn run_sensor(&mut self) -> BoxFuture<'static, (Option<User>, MachineState)>;
|
||||||
|
}
|
||||||
|
|
||||||
|
type BoxSensor = Box<dyn Sensor + Send>;
|
||||||
|
|
||||||
|
pub struct Initiator {
|
||||||
|
signal: MutableSignalCloned<Option<Machine>>,
|
||||||
|
machine: Option<Machine>,
|
||||||
|
future: Option<BoxFuture<'static, (Option<User>, MachineState)>>,
|
||||||
|
token: Option<ReturnToken>,
|
||||||
|
sensor: BoxSensor,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Initiator {
|
||||||
|
pub fn new(sensor: BoxSensor, signal: MutableSignalCloned<Option<Machine>>) -> Self {
|
||||||
|
Self {
|
||||||
|
signal: signal,
|
||||||
|
machine: None,
|
||||||
|
future: None,
|
||||||
|
token: None,
|
||||||
|
sensor: sensor,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wrap(sensor: BoxSensor) -> (Mutable<Option<Machine>>, Self) {
|
||||||
|
let m = Mutable::new(None);
|
||||||
|
let s = m.signal_cloned();
|
||||||
|
|
||||||
|
(m, Self::new(sensor, s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Future for Initiator {
|
||||||
|
type Output = ();
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||||
|
let mut this = &mut *self;
|
||||||
|
|
||||||
|
// First of course, see what machine we should work with.
|
||||||
|
match Signal::poll_change(Pin::new(&mut this.signal), cx) {
|
||||||
|
Poll::Pending => { }
|
||||||
|
Poll::Ready(None) => return Poll::Ready(()),
|
||||||
|
// Keep in mind this is actually an Option<Machine>
|
||||||
|
Poll::Ready(Some(machine)) => this.machine = machine,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do as much work as we can:
|
||||||
|
loop {
|
||||||
|
// If there is a future, poll it
|
||||||
|
match this.future.as_mut().map(|future| Future::poll(Pin::new(future), cx)) {
|
||||||
|
None => {
|
||||||
|
this.future = Some(this.sensor.run_sensor());
|
||||||
|
},
|
||||||
|
Some(Poll::Ready((user, state))) => {
|
||||||
|
this.future.take();
|
||||||
|
this.machine.as_mut().map(|machine| machine.request_state_change(user.as_ref(), state).unwrap());
|
||||||
|
}
|
||||||
|
Some(Poll::Pending) => return Poll::Pending,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load(log: &Logger, client: &AsyncClient, config: &Config) -> Result<(InitMap, Vec<Initiator>)> {
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
|
||||||
|
let initiators = config.initiators.iter()
|
||||||
|
.map(|(k,v)| (k, load_single(log, client, k, &v.module, &v.params)))
|
||||||
|
.filter_map(|(k,n)| match n {
|
||||||
|
None => None,
|
||||||
|
Some(i) => Some((k, i)),
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut v = Vec::new();
|
||||||
|
for (name, initiator) in initiators {
|
||||||
|
let (m, i) = Initiator::wrap(initiator);
|
||||||
|
map.insert(name.clone(), m);
|
||||||
|
v.push(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((map, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_single(
|
||||||
|
log: &Logger,
|
||||||
|
client: &AsyncClient,
|
||||||
|
name: &String,
|
||||||
|
module_name: &String,
|
||||||
|
params: &HashMap<String, String>
|
||||||
|
) -> Option<BoxSensor>
|
||||||
|
{
|
||||||
|
match module_name.as_ref() {
|
||||||
|
"Dummy" => {
|
||||||
|
Some(Box::new(Dummy::new(log)))
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
error!(log, "No initiator found with name \"{}\", configured as \"{}\"",
|
||||||
|
module_name, name);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Dummy {
|
||||||
|
log: Logger,
|
||||||
|
step: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Dummy {
|
||||||
|
pub fn new(log: &Logger) -> Self {
|
||||||
|
Self { log: log.new(o!("module" => "Dummy Initiator")), step: false }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sensor for Dummy {
|
||||||
|
fn run_sensor(&mut self)
|
||||||
|
-> BoxFuture<'static, (Option<User>, MachineState)>
|
||||||
|
{
|
||||||
|
let step = self.step;
|
||||||
|
self.step = !step;
|
||||||
|
|
||||||
|
info!(self.log, "Kicking off new dummy initiator state change: {}", step);
|
||||||
|
|
||||||
|
let f = async move {
|
||||||
|
Timer::after(std::time::Duration::from_secs(1)).await;
|
||||||
|
if step {
|
||||||
|
return (None, MachineState::free());
|
||||||
|
} else {
|
||||||
|
let user = User::new(
|
||||||
|
UserId::new("test".to_string(), None, None),
|
||||||
|
UserData::new(vec![], 0),
|
||||||
|
);
|
||||||
|
let p = user.data.priority;
|
||||||
|
let id = user.id.clone();
|
||||||
|
return (Some(user), MachineState::used(id, p));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::pin(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -3,7 +3,7 @@ use slog_async;
|
|||||||
use slog_term::{TermDecorator, FullFormat};
|
use slog_term::{TermDecorator, FullFormat};
|
||||||
use crate::config::Settings;
|
use crate::config::Settings;
|
||||||
|
|
||||||
pub fn init(_config: &Settings) -> Logger {
|
pub fn init() -> Logger {
|
||||||
let decorator = TermDecorator::new().build();
|
let decorator = TermDecorator::new().build();
|
||||||
let drain = FullFormat::new(decorator).build().fuse();
|
let drain = FullFormat::new(decorator).build().fuse();
|
||||||
let drain = slog_async::Async::new(drain).build().fuse();
|
let drain = slog_async::Async::new(drain).build().fuse();
|
||||||
|
189
src/machine.rs
189
src/machine.rs
@ -1,4 +1,12 @@
|
|||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
use std::iter::FromIterator;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use futures_util::lock::Mutex;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::task::{Poll, Context};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::future::Future;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
|
||||||
@ -6,15 +14,85 @@ use serde::{Serialize, Deserialize};
|
|||||||
|
|
||||||
use futures_signals::signal::Signal;
|
use futures_signals::signal::Signal;
|
||||||
use futures_signals::signal::SignalExt;
|
use futures_signals::signal::SignalExt;
|
||||||
use futures_signals::signal::Mutable;
|
use futures_signals::signal::{Mutable, ReadOnlyMutable};
|
||||||
|
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::{Result, Error};
|
||||||
|
|
||||||
use crate::db::user::User;
|
|
||||||
use crate::db::access;
|
use crate::db::access;
|
||||||
use crate::db::machine::{MachineIdentifier, Status, MachineState};
|
use crate::db::machine::{MachineIdentifier, Status, MachineState};
|
||||||
|
use crate::db::user::User;
|
||||||
|
|
||||||
|
use crate::network::MachineMap;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Index {
|
||||||
|
inner: HashMap<String, Machine>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Index {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, key: String, value: Machine) -> Option<Machine> {
|
||||||
|
self.inner.insert(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get(&mut self, key: &String) -> Option<Machine> {
|
||||||
|
self.inner.get(key).map(|m| m.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Machine {
|
||||||
|
inner: Arc<Mutex<Inner>>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Machine {
|
||||||
|
pub fn new(inner: Inner) -> Self {
|
||||||
|
Self { inner: Arc::new(Mutex::new(inner)) }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn construct
|
||||||
|
( id: MachineIdentifier
|
||||||
|
, desc: MachineDescription
|
||||||
|
, state: MachineState
|
||||||
|
) -> Machine
|
||||||
|
{
|
||||||
|
Self::new(Inner::new(id, desc, state))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Vec<Machine>> {
|
||||||
|
let mut map: HashMap<MachineIdentifier, MachineDescription> = MachineDescription::load_file(path)?;
|
||||||
|
Ok(map.drain().map(|(id, desc)| {
|
||||||
|
Self::construct(id, desc, MachineState::new())
|
||||||
|
}).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request_state_change(&self, who: Option<&User>, new_state: MachineState)
|
||||||
|
-> Result<ReturnToken>
|
||||||
|
{
|
||||||
|
let mut guard = self.inner.try_lock().unwrap();
|
||||||
|
guard.request_state_change(who, new_state)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn signal(&self) -> impl Signal<Item=MachineState> {
|
||||||
|
let mut guard = self.inner.try_lock().unwrap();
|
||||||
|
guard.signal()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for Machine {
|
||||||
|
type Target = Mutex<Inner>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// Internal machine representation
|
/// Internal machine representation
|
||||||
@ -22,7 +100,7 @@ use crate::db::machine::{MachineIdentifier, Status, MachineState};
|
|||||||
/// A machine connects an event from a sensor to an actor activating/deactivating a real-world
|
/// A machine connects an event from a sensor to an actor activating/deactivating a real-world
|
||||||
/// machine, checking that the user who wants the machine (de)activated has the required
|
/// machine, checking that the user who wants the machine (de)activated has the required
|
||||||
/// permissions.
|
/// permissions.
|
||||||
pub struct Machine {
|
pub struct Inner {
|
||||||
/// Globally unique machine readable identifier
|
/// Globally unique machine readable identifier
|
||||||
pub id: MachineIdentifier,
|
pub id: MachineIdentifier,
|
||||||
|
|
||||||
@ -34,14 +112,18 @@ pub struct Machine {
|
|||||||
/// This is a Signal generator. Subscribers to this signal will be notified of changes. In the
|
/// This is a Signal generator. Subscribers to this signal will be notified of changes. In the
|
||||||
/// case of an actor it should then make sure that the real world matches up with the set state
|
/// case of an actor it should then make sure that the real world matches up with the set state
|
||||||
state: Mutable<MachineState>,
|
state: Mutable<MachineState>,
|
||||||
|
reset: Option<MachineState>,
|
||||||
|
rx: Option<futures::channel::oneshot::Receiver<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Machine {
|
impl Inner {
|
||||||
pub fn new(id: MachineIdentifier, desc: MachineDescription, perm: access::PermIdentifier) -> Machine {
|
pub fn new(id: MachineIdentifier, desc: MachineDescription, state: MachineState) -> Inner {
|
||||||
Machine {
|
Inner {
|
||||||
id: id,
|
id: id,
|
||||||
desc: desc,
|
desc: desc,
|
||||||
state: Mutable::new(MachineState { state: Status::Free}),
|
state: Mutable::new(state),
|
||||||
|
reset: None,
|
||||||
|
rx: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,26 +139,76 @@ impl Machine {
|
|||||||
Box::pin(self.state.signal_cloned().dedupe_cloned())
|
Box::pin(self.state.signal_cloned().dedupe_cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Requests to use a machine. Returns `true` if successful.
|
/// Requests to use a machine. Returns a return token if successful.
|
||||||
///
|
///
|
||||||
/// This will update the internal state of the machine, notifying connected actors, if any.
|
/// This will update the internal state of the machine, notifying connected actors, if any.
|
||||||
pub async fn request_use
|
/// The return token is a channel that considers the machine 'returned' if anything is sent
|
||||||
( &mut self
|
/// along it or if the sending end gets dropped. Anybody who holds this token needs to check if
|
||||||
, access: access::AccessControl
|
/// the receiving end was canceled which indicates that the machine has been taken off their
|
||||||
, who: &User
|
/// hands.
|
||||||
) -> Result<bool>
|
pub fn request_state_change(&mut self, who: Option<&User>, new_state: MachineState)
|
||||||
|
-> Result<ReturnToken>
|
||||||
{
|
{
|
||||||
// TODO: Check different levels
|
if who.is_none() {
|
||||||
if access.check(who, &self.desc.privs.write).await? {
|
if new_state.state == Status::Free {
|
||||||
self.state.set(MachineState { state: Status::InUse(who.id.clone()) });
|
return self.do_state_change(new_state);
|
||||||
return Ok(true);
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(false);
|
if self.state.lock_ref().is_higher_priority(who.unwrap().data.priority) {
|
||||||
|
return self.do_state_change(new_state);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return Err(Error::Denied);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_state(&mut self, state: Status) {
|
fn do_state_change(&mut self, new_state: MachineState) -> Result<ReturnToken> {
|
||||||
self.state.set(MachineState { state })
|
let (tx, rx) = futures::channel::oneshot::channel();
|
||||||
|
let old_state = self.state.replace(new_state);
|
||||||
|
self.reset.replace(old_state);
|
||||||
|
// Also this drops the old receiver, which will signal to the initiator that the
|
||||||
|
// machine has been taken off their hands.
|
||||||
|
self.rx.replace(rx);
|
||||||
|
return Ok(tx);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_state(&self) -> ReadOnlyMutable<MachineState> {
|
||||||
|
self.state.read_only()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_signal(&self) -> impl Signal {
|
||||||
|
self.state.signal_cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset_state(&mut self) {
|
||||||
|
if let Some(state) = self.reset.take() {
|
||||||
|
self.state.replace(state);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type ReturnToken = futures::channel::oneshot::Sender<()>;
|
||||||
|
|
||||||
|
impl Future for Inner {
|
||||||
|
type Output = MachineState;
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||||
|
let mut this = &mut *self;
|
||||||
|
// TODO Return this on exit
|
||||||
|
if false {
|
||||||
|
return Poll::Ready(self.state.get_cloned());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the return token was sent/dropped
|
||||||
|
if let Some(mut rx) = this.rx.take() {
|
||||||
|
match Future::poll(Pin::new(&mut rx), cx) {
|
||||||
|
// Regardless if we were canceled or properly returned, reset.
|
||||||
|
Poll::Ready(_) => self.reset_state(),
|
||||||
|
Poll::Pending => { this.rx.replace(rx); },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Poll::Pending
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,7 +225,7 @@ pub struct MachineDescription {
|
|||||||
|
|
||||||
/// The permission required
|
/// The permission required
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
privs: access::PrivilegesBuf,
|
pub privs: access::PrivilegesBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MachineDescription {
|
impl MachineDescription {
|
||||||
@ -103,7 +235,18 @@ impl MachineDescription {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
pub fn load(config: &crate::config::Settings) -> Result<MachineMap> {
|
||||||
|
let mut map = config.machines.clone();
|
||||||
|
|
||||||
|
let it = map.drain()
|
||||||
|
.map(|(k,v)| {
|
||||||
|
// TODO: Read state from the state db
|
||||||
|
(v.name.clone(), Machine::construct(k, v, MachineState::new()))
|
||||||
|
});
|
||||||
|
Ok(HashMap::from_iter(it))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test_DISABLED)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
|
364
src/main.rs
364
src/main.rs
@ -13,10 +13,14 @@ mod api;
|
|||||||
mod config;
|
mod config;
|
||||||
mod error;
|
mod error;
|
||||||
mod connection;
|
mod connection;
|
||||||
mod registries;
|
|
||||||
mod schema;
|
mod schema;
|
||||||
mod db;
|
mod db;
|
||||||
mod machine;
|
mod machine;
|
||||||
|
mod builtin;
|
||||||
|
mod server;
|
||||||
|
mod network;
|
||||||
|
mod actor;
|
||||||
|
mod initiator;
|
||||||
|
|
||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
|
|
||||||
@ -26,9 +30,6 @@ use futures::compat::Stream01CompatExt;
|
|||||||
use futures::join;
|
use futures::join;
|
||||||
use futures::task::LocalSpawn;
|
use futures::task::LocalSpawn;
|
||||||
|
|
||||||
use smol::net::TcpListener;
|
|
||||||
use smol::net::unix::UnixStream;
|
|
||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@ -37,32 +38,17 @@ use std::str::FromStr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use lmdb::Transaction;
|
use lmdb::Transaction;
|
||||||
|
use smol::net::TcpListener;
|
||||||
|
|
||||||
|
use smol::Executor;
|
||||||
|
|
||||||
use error::Error;
|
use error::Error;
|
||||||
|
|
||||||
use registries::Registries;
|
use slog::Logger;
|
||||||
|
|
||||||
const LMDB_MAX_DB: u32 = 16;
|
use paho_mqtt::AsyncClient;
|
||||||
|
|
||||||
// Returning a `Result` from `main` allows us to use the `?` shorthand.
|
|
||||||
// In the case of an Err it will be printed using `fmt::Debug`
|
|
||||||
fn main() -> Result<(), Error> {
|
|
||||||
let signal = Box::pin(async {
|
|
||||||
let (tx, mut rx) = UnixStream::pair()?;
|
|
||||||
// Initialize signal handler.
|
|
||||||
// We currently only care about Ctrl-C so SIGINT it is.
|
|
||||||
// TODO: Make this do SIGHUP and a few others too. (By cloning the tx end of the pipe)
|
|
||||||
signal_hook::pipe::register(signal_hook::SIGINT, tx)?;
|
|
||||||
// When a signal is received this future can complete and read a byte from the underlying
|
|
||||||
// socket — the actual data is discarded but the act of being able to receive data tells us
|
|
||||||
// that we received a SIGINT.
|
|
||||||
|
|
||||||
// FIXME: What errors are possible and how to handle them properly?
|
|
||||||
rx.read_exact(&mut [0u8]).await?;
|
|
||||||
|
|
||||||
io::Result::Ok(LoopResult::Stop)
|
|
||||||
});
|
|
||||||
|
|
||||||
|
fn main() {
|
||||||
use clap::{crate_version, crate_description, crate_name};
|
use clap::{crate_version, crate_description, crate_name};
|
||||||
|
|
||||||
// Argument parsing
|
// Argument parsing
|
||||||
@ -97,255 +83,123 @@ fn main() -> Result<(), Error> {
|
|||||||
// Check for the --print-default option first because we don't need to do anything else in that
|
// Check for the --print-default option first because we don't need to do anything else in that
|
||||||
// case.
|
// case.
|
||||||
if matches.is_present("print default") {
|
if matches.is_present("print default") {
|
||||||
let config = config::Settings::default();
|
let config = config::Config::default();
|
||||||
let encoded = toml::to_vec(&config)?;
|
let encoded = serde_dhall::serialize(&config).to_string().unwrap();
|
||||||
|
|
||||||
// Direct writing to fd 1 is faster but also prevents any print-formatting that could
|
// Direct writing to fd 1 is faster but also prevents any print-formatting that could
|
||||||
// invalidate the generated TOML
|
// invalidate the generated TOML
|
||||||
let stdout = io::stdout();
|
let stdout = io::stdout();
|
||||||
let mut handle = stdout.lock();
|
let mut handle = stdout.lock();
|
||||||
handle.write_all(&encoded)?;
|
handle.write_all(&encoded.as_bytes()).unwrap();
|
||||||
|
|
||||||
// Early return to exit.
|
// Early return to exit.
|
||||||
return Ok(())
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let retval;
|
||||||
|
|
||||||
|
// Scope to drop everything before exiting.
|
||||||
|
{
|
||||||
|
// Initialize the logging subsystem first to be able to better document the progress from now
|
||||||
|
// on.
|
||||||
|
// TODO: Now would be a really good time to close stdin/out and move logging to syslog
|
||||||
|
// Log is in an Arc so we can do very cheap clones in closures.
|
||||||
|
let log = Arc::new(log::init());
|
||||||
|
info!(log, "Starting");
|
||||||
|
|
||||||
|
match maybe(matches, log.clone()) {
|
||||||
|
Ok(_) => retval = 0,
|
||||||
|
Err(e) => {
|
||||||
|
error!(log, "{}", e);
|
||||||
|
retval = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::process::exit(retval);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returning a `Result` from `main` allows us to use the `?` shorthand.
|
||||||
|
// In the case of an Err it will be printed using `fmt::Debug`
|
||||||
|
fn maybe(matches: clap::ArgMatches, log: Arc<Logger>) -> Result<(), Error> {
|
||||||
// If no `config` option is given use a preset default.
|
// If no `config` option is given use a preset default.
|
||||||
let configpath = matches.value_of("config").unwrap_or("/etc/bffh/config.toml");
|
let configpath = matches.value_of("config").unwrap_or("/etc/bffh/config.toml");
|
||||||
let config = config::read(&PathBuf::from_str(configpath).unwrap())?;
|
let config = config::read(&PathBuf::from_str(configpath).unwrap())?;
|
||||||
|
debug!(log, "Loaded Config: {:?}", config);
|
||||||
|
|
||||||
// Initialize the logging subsystem first to be able to better document the progress from now
|
if matches.is_present("dump") {
|
||||||
// on.
|
error!(log, "Dumping is currently not implemented");
|
||||||
// TODO: Now would be a really good time to close stdin/out and move logging to syslog
|
Ok(())
|
||||||
// Log is in an Arc so we can do very cheap clones in closures.
|
} else if matches.is_present("load") {
|
||||||
let log = Arc::new(log::init(&config));
|
let db = db::Databases::new(&log, &config)?;
|
||||||
info!(log, "Starting");
|
let mut dir = PathBuf::from(matches.value_of_os("load").unwrap());
|
||||||
|
|
||||||
// Initialize the LMDB environment. Since this would usually block untill the mmap() finishes
|
dir.push("users.toml");
|
||||||
// we wrap it in smol::unblock which runs this as future in a different thread.
|
let map = db::user::load_file(&dir)?;
|
||||||
let e_config = config.clone();
|
for (uid,user) in map.iter() {
|
||||||
info!(log, "LMDB env");
|
db.userdb.put_user(uid, user)?;
|
||||||
let env = lmdb::Environment::new()
|
|
||||||
.set_flags(lmdb::EnvironmentFlags::MAP_ASYNC | lmdb::EnvironmentFlags::NO_SUB_DIR)
|
|
||||||
.set_max_dbs(LMDB_MAX_DB as libc::c_uint)
|
|
||||||
.open(&PathBuf::from_str("/tmp/a.db").unwrap())?;
|
|
||||||
|
|
||||||
// Kick up an executor
|
|
||||||
// Most initializations from now on do some amount of IO and are much better done in an
|
|
||||||
// asyncronous fashion.
|
|
||||||
let mut exec = LocalPool::new();
|
|
||||||
|
|
||||||
|
|
||||||
// Start loading the machine database, authentication system and permission system
|
|
||||||
// All of those get a custom logger so the source of a log message can be better traced and
|
|
||||||
// filtered
|
|
||||||
let env = Arc::new(env);
|
|
||||||
let mdb = db::machine::init(log.new(o!("system" => "machines")), &config, env.clone());
|
|
||||||
let pdb = db::access::init(log.new(o!("system" => "permissions")), &config, env.clone());
|
|
||||||
|
|
||||||
// If --load or --dump is given we can stop at this point and load/dump the database and then
|
|
||||||
// exit.
|
|
||||||
if matches.is_present("load") {
|
|
||||||
if let Some(pathstr) = matches.value_of("load") {
|
|
||||||
let path = std::path::Path::new(pathstr);
|
|
||||||
|
|
||||||
let mut txn = env.begin_rw_txn()?;
|
|
||||||
let path = path.to_path_buf();
|
|
||||||
pdb?.load_db(&mut txn, path.clone())?;
|
|
||||||
//mdb?.load_db(&mut txn, path)?;
|
|
||||||
txn.commit()?;
|
|
||||||
} else {
|
|
||||||
error!(log, "You must provide a directory path to load from");
|
|
||||||
}
|
}
|
||||||
|
debug!(log, "Loaded users: {:?}", map);
|
||||||
|
dir.pop();
|
||||||
|
|
||||||
return Ok(())
|
dir.push("roles.toml");
|
||||||
} else if matches.is_present("dump") {
|
db.access.internal.load_roles(&dir)?;
|
||||||
if let Some(pathstr) = matches.value_of("dump") {
|
dir.pop();
|
||||||
let path = std::path::Path::new(pathstr);
|
|
||||||
if let Err(e) = std::fs::create_dir_all(path) {
|
dir.push("pass.toml");
|
||||||
error!(log, "The provided path could not be created: {}", e);
|
db.passdb.load_file(&dir);
|
||||||
return Ok(())
|
dir.pop();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
let ex = Executor::new();
|
||||||
|
|
||||||
|
let mqtt = AsyncClient::new(config.mqtt_url.clone())?;
|
||||||
|
let tok = mqtt.connect(paho_mqtt::ConnectOptions::new());
|
||||||
|
|
||||||
|
smol::block_on(tok)?;
|
||||||
|
|
||||||
|
let machines = machine::load(&config)?;
|
||||||
|
let (mut actor_map, actors) = actor::load(&log, &mqtt, &config)?;
|
||||||
|
let (mut init_map, initiators) = initiator::load(&log, &mqtt, &config)?;
|
||||||
|
|
||||||
|
// TODO: restore connections between initiators, machines, actors
|
||||||
|
let mut network = network::Network::new(machines, actor_map, init_map);
|
||||||
|
|
||||||
|
for (a,b) in config.actor_connections.iter() {
|
||||||
|
if let Err(e) = network.connect_actor(a,b) {
|
||||||
|
error!(log, "{}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
let txn = env.begin_ro_txn()?;
|
|
||||||
let path = path.to_path_buf();
|
|
||||||
pdb?.dump_db(&txn, path.clone())?;
|
|
||||||
//mdb?.dump_db(&txn, path)?;
|
|
||||||
} else {
|
|
||||||
error!(log, "You must provide a directory path to dump into");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return Ok(())
|
for (a,b) in config.init_connections.iter() {
|
||||||
|
if let Err(e) = network.connect_init(a,b) {
|
||||||
|
error!(log, "{}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for actor in actors.into_iter() {
|
||||||
|
ex.spawn(actor).detach();
|
||||||
|
}
|
||||||
|
for init in initiators.into_iter() {
|
||||||
|
ex.spawn(init).detach();
|
||||||
|
}
|
||||||
|
|
||||||
|
let (signal, shutdown) = async_channel::bounded::<()>(1);
|
||||||
|
let (_, r) = easy_parallel::Parallel::new()
|
||||||
|
.each(0..4, |_| smol::block_on(ex.run(shutdown.recv())))
|
||||||
|
.finish(|| {
|
||||||
|
let db = db::Databases::new(&log, &config)?;
|
||||||
|
// TODO: Spawn api connections on their own (non-main) thread, use the main thread to
|
||||||
|
// handle signals (a cli if stdin is not closed?) and make it stop and clean up all threads
|
||||||
|
// when bffh should exit
|
||||||
|
let r = server::serve_api_connections(log.clone(), config, db, network);
|
||||||
|
|
||||||
|
signal.try_send(());
|
||||||
|
std::mem::drop(signal);
|
||||||
|
return r;
|
||||||
|
});
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Bind to each address in config.listen.
|
|
||||||
// This is a Stream over Futures so it will do absolutely nothing unless polled to completion
|
|
||||||
let listeners_s: futures::stream::Collect<_, Vec<TcpListener>>
|
|
||||||
= stream::iter((&config).listens.iter())
|
|
||||||
.map(|l| {
|
|
||||||
let addr = l.address.clone();
|
|
||||||
let port = l.port.unwrap_or(config::DEFAULT_PORT);
|
|
||||||
TcpListener::bind((l.address.as_str(), port))
|
|
||||||
// If the bind errors, include the address so we can log it
|
|
||||||
// Since this closure is lazy we need to have a cloned addr
|
|
||||||
.map_err(move |e| { (addr, port, e) })
|
|
||||||
})
|
|
||||||
.filter_map(|f| async {
|
|
||||||
match f.await {
|
|
||||||
Ok(l) => Some(l),
|
|
||||||
Err((addr, port, e)) => {
|
|
||||||
error!(&log, "Could not setup socket on {} port {}: {}", addr, port, e);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}).collect();
|
|
||||||
|
|
||||||
//let (mach, auth) = exec.run_until(async {
|
|
||||||
// // Rull all futures to completion in parallel.
|
|
||||||
// // This will block until all three are done starting up.
|
|
||||||
// join!(machinedb_f, authentication_f)
|
|
||||||
//});
|
|
||||||
|
|
||||||
// Error out if any of the subsystems failed to start.
|
|
||||||
let mdb = mdb?;
|
|
||||||
let defs = machine::MachineDescription::load_file(&config.machines)?;
|
|
||||||
let machdb = db::machine::MachineDB::new(mdb, defs);
|
|
||||||
info!(log, "{:?}", machdb);
|
|
||||||
let pdb = pdb?;
|
|
||||||
let mut ac = db::access::AccessControl::new();
|
|
||||||
ac.add_source_unchecked("Internal".to_string(), Box::new(pdb));
|
|
||||||
let db = db::Databases {
|
|
||||||
access: Arc::new(db::access::AccessControl::new()),
|
|
||||||
machine: Arc::new(machdb),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Since the below closures will happen at a much later time we need to make sure all pointers
|
|
||||||
// are still valid. Thus, Arc.
|
|
||||||
let start_log = log.clone();
|
|
||||||
let stop_log = log.clone();
|
|
||||||
|
|
||||||
// Create a thread pool to run tasks on
|
|
||||||
let pool = ThreadPool::builder()
|
|
||||||
.after_start(move |i| {
|
|
||||||
info!(start_log.new(o!("system" => "threadpool")), "Starting Thread <{}>", i)
|
|
||||||
})
|
|
||||||
.before_stop(move |i| {
|
|
||||||
info!(stop_log.new(o!("system" => "threadpool")), "Stopping Thread <{}>", i)
|
|
||||||
})
|
|
||||||
.create()?;
|
|
||||||
let local_spawn = exec.spawner();
|
|
||||||
|
|
||||||
// Start all modules on the threadpool. The pool will run the modules until it is dropped.
|
|
||||||
// FIXME: implement notification so the modules can shut down cleanly instead of being killed
|
|
||||||
// without warning.
|
|
||||||
let modlog = log.clone();
|
|
||||||
let mut regs = Registries::new();
|
|
||||||
match exec.run_until(modules::init(modlog.new(o!("system" => "modules")), config.clone(), pool.clone(), regs.clone())) {
|
|
||||||
Ok(()) => {}
|
|
||||||
Err(e) => {
|
|
||||||
error!(modlog, "Module startup failed: {}", e);
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closure inefficiencies. Lucky cloning an Arc is pretty cheap.
|
|
||||||
let inner_log = log.clone();
|
|
||||||
let loop_log = log.clone();
|
|
||||||
|
|
||||||
exec.run_until(async move {
|
|
||||||
// Generate a stream of TcpStreams appearing on any of the interfaces we listen to
|
|
||||||
let listeners = listeners_s.await;
|
|
||||||
let incoming = stream::select_all(listeners.iter().map(|l| l.incoming()));
|
|
||||||
|
|
||||||
// For each incoming connection start a new task to handle it
|
|
||||||
let handle_sockets = incoming.map(|socket| {
|
|
||||||
// incoming.next() returns an error when the underlying `accept` call yielded an error
|
|
||||||
// In POSIX those are protocol errors we can't really handle, so we just log the error
|
|
||||||
// and the move on
|
|
||||||
match socket {
|
|
||||||
Ok(socket) => {
|
|
||||||
// If we have it available add the peer's address to all log messages
|
|
||||||
let log =
|
|
||||||
if let Ok(addr) = socket.peer_addr() {
|
|
||||||
inner_log.new(o!("address" => addr))
|
|
||||||
} else {
|
|
||||||
inner_log.new(o!())
|
|
||||||
};
|
|
||||||
|
|
||||||
// Clone a log for potential error handling
|
|
||||||
let elog = log.clone();
|
|
||||||
|
|
||||||
// We handle the error using map_err
|
|
||||||
let f = connection::handle_connection(log.clone(), socket, db.clone())
|
|
||||||
.map_err(move |e| {
|
|
||||||
error!(log, "Error occured during protocol handling: {}", e);
|
|
||||||
})
|
|
||||||
// Void any and all results since pool.spawn allows no return value.
|
|
||||||
.map(|_| ());
|
|
||||||
|
|
||||||
// In this case only the error is relevant since the Value is always ()
|
|
||||||
// The future is Boxed to make it the `LocalFutureObj` that LocalSpawn expects
|
|
||||||
if let Err(e) = local_spawn.spawn_local_obj(Box::new(f).into()) {
|
|
||||||
error!(elog, "Failed to spawn connection handler: {}", e);
|
|
||||||
// Failing to spawn a handler means we are most likely overloaded
|
|
||||||
return LoopResult::Overloaded;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
error!(inner_log, "Socket `accept` error: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unless we are overloaded we just want to keep going.
|
|
||||||
return LoopResult::Continue;
|
|
||||||
});
|
|
||||||
|
|
||||||
// Check each signal as it arrives
|
|
||||||
let handle_signals = signal.map(|r| { r.unwrap() }).into_stream();
|
|
||||||
|
|
||||||
let mut combined = stream::select(handle_signals, handle_sockets);
|
|
||||||
|
|
||||||
// This is the basic main loop that drives execution
|
|
||||||
loop {
|
|
||||||
match combined.next().await {
|
|
||||||
// When the result says to continue, do exactly that
|
|
||||||
Some(LoopResult::Continue) => {}
|
|
||||||
Some(LoopResult::Overloaded) => {
|
|
||||||
// In case over server overload we should install a replacement handler that
|
|
||||||
// would instead just return `overloaded` for all connections until the
|
|
||||||
// situation is remedied.
|
|
||||||
//
|
|
||||||
// For now, just log the overload and keep going.
|
|
||||||
error!(loop_log, "Server overloaded");
|
|
||||||
}
|
|
||||||
// When the result says to stop the server, do exactly that.
|
|
||||||
// Also catches a `None` from the stream; None should never be returned because it
|
|
||||||
// would mean all sockets were closed and we can not receive any further signals.
|
|
||||||
// Still, in that case shut down cleanly anyway, the only reason this could happen
|
|
||||||
// are some heavy bugs in the runtime
|
|
||||||
Some(LoopResult::Stop) | None => {
|
|
||||||
warn!(loop_log, "Stopping server");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// TODO: Run actual shut down code here
|
|
||||||
info!(log, "Shutting down...");
|
|
||||||
|
|
||||||
// Returning () is an implicit success so this will properly set the exit code as well
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The result of one iteration of the core loop
|
|
||||||
enum LoopResult {
|
|
||||||
/// Everything was fine, keep going
|
|
||||||
Continue,
|
|
||||||
/// Something happened that means we should shut down
|
|
||||||
Stop,
|
|
||||||
/// The Server is currently overloaded
|
|
||||||
Overloaded,
|
|
||||||
}
|
}
|
||||||
|
@ -8,17 +8,10 @@
|
|||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
|
|
||||||
mod shelly;
|
mod shelly;
|
||||||
|
pub use shelly::Shelly;
|
||||||
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::task::Spawn;
|
use futures::task::Spawn;
|
||||||
|
|
||||||
use crate::config::Settings;
|
use crate::config::Settings;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::registries::Registries;
|
|
||||||
|
|
||||||
// spawner is a type that allows 'tasks' to be spawned on it, running them to completion.
|
|
||||||
pub async fn init<S: Spawn + Clone + Send>(log: Logger, config: Settings, spawner: S, registries: Registries) -> Result<()> {
|
|
||||||
shelly::run(log.clone(), config.clone(), registries.clone(), spawner.clone()).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
@ -1,120 +1,61 @@
|
|||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
|
|
||||||
use crate::config::Settings;
|
use crate::config::Settings;
|
||||||
use crate::registries::{Registries, Actuator, ActBox, StatusSignal};
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::db::machine::Status;
|
use crate::db::machine::Status;
|
||||||
|
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::channel::mpsc;
|
use futures::channel::mpsc;
|
||||||
|
use futures::future::BoxFuture;
|
||||||
use futures::ready;
|
use futures::ready;
|
||||||
use futures::task::{Poll, Context, Waker, Spawn, FutureObj};
|
use futures::task::{Poll, Context, Waker, Spawn, FutureObj};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use futures_signals::signal::Signal;
|
use futures_signals::signal::Signal;
|
||||||
|
|
||||||
|
use crate::actor::Actuator;
|
||||||
|
use crate::db::machine::MachineState;
|
||||||
|
|
||||||
use paho_mqtt as mqtt;
|
use paho_mqtt as mqtt;
|
||||||
|
|
||||||
// TODO: Late config parsing. Right now the config is validated at the very startup in its
|
/// An actuator for a Shellie connected listening on one MQTT broker
|
||||||
// entirety. This works reasonably enough for this static modules here but if we do dynamic loading
|
|
||||||
// via dlopen(), lua API, python API etc it will not.
|
|
||||||
pub async fn run<S: Spawn>(log: Logger, config: Settings, registries: Registries, spawner: S) {
|
|
||||||
let (tx, rx) = mpsc::channel(1);
|
|
||||||
let mut shelly = Shelly::new(log, config, rx).await;
|
|
||||||
|
|
||||||
let r = registries.actuators.register("shelly".to_string(), tx).await;
|
|
||||||
|
|
||||||
let f = shelly.for_each(|f| f);
|
|
||||||
spawner.spawn_obj(FutureObj::from(Box::pin(f)));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An actuator for all Shellies connected listening on one MQTT broker
|
|
||||||
///
|
///
|
||||||
/// This actuator can power toggle an arbitrariy named shelly on the broker it is connected to. If
|
/// This actuator will toggle the shellie with the given `name`.
|
||||||
/// you need to toggle shellies on multiple brokers you need multiple instanced of this actuator.
|
/// If you need to toggle shellies on multiple brokers you need multiple instanced of this
|
||||||
struct Shelly {
|
/// actuator with different clients.
|
||||||
|
pub struct Shelly {
|
||||||
log: Logger,
|
log: Logger,
|
||||||
sigchan: mpsc::Receiver<StatusSignal>,
|
|
||||||
signal: Option<StatusSignal>,
|
|
||||||
waker: Option<Waker>,
|
|
||||||
name: String,
|
name: String,
|
||||||
client: mqtt::AsyncClient,
|
client: mqtt::AsyncClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Shelly {
|
impl Shelly {
|
||||||
// Can't use Error, it's not Send. fabinfra/fabaccess/bffh#7
|
pub fn new(log_view: &Logger, name: String, client: mqtt::AsyncClient) -> Self {
|
||||||
pub async fn new(log: Logger, config: Settings, sigchan: mpsc::Receiver<StatusSignal>) -> Self {
|
let log = log_view.new(o!("shelly_name" => name.clone()));
|
||||||
let client = mqtt::AsyncClient::new(config.shelly.unwrap().mqtt_url).unwrap();
|
debug!(log, "Starting shelly module for {}", &name);
|
||||||
|
Shelly { log, name, client, }
|
||||||
|
}
|
||||||
|
|
||||||
let o = client.connect(mqtt::ConnectOptions::new()).await.unwrap();
|
/// Set the name to a new one. This changes the shelly that will be activated
|
||||||
println!("{:?}", o);
|
pub fn set_name(&mut self, new_name: String) {
|
||||||
|
let log = self.log.new(o!("shelly_name" => new_name.clone()));
|
||||||
let name = "test".to_string();
|
self.name = new_name;
|
||||||
let signal: Option<StatusSignal> = None;
|
self.log = log;
|
||||||
let waker = None;
|
|
||||||
|
|
||||||
Shelly { log, sigchan, signal, waker, name, client }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl Actuator for Shelly {
|
impl Actuator for Shelly {
|
||||||
fn subscribe(&mut self, signal: StatusSignal) {
|
fn apply(&mut self, state: MachineState) -> BoxFuture<'static, ()> {
|
||||||
self.signal.replace(signal);
|
info!(self.log, "Machine Status changed: {:?}", state);
|
||||||
if let Some(waker) = self.waker.take() {
|
let topic = format!("shellies/{}/relay/0/command", self.name);
|
||||||
waker.wake();
|
let pl = match state.state {
|
||||||
}
|
Status::InUse(_, _) => "on",
|
||||||
}
|
_ => "off",
|
||||||
}
|
};
|
||||||
|
let msg = mqtt::Message::new(topic, pl, 0);
|
||||||
impl Stream for Shelly {
|
let f = self.client.publish(msg).map(|_| ());
|
||||||
type Item = future::BoxFuture<'static, ()>;
|
|
||||||
|
return Box::pin(f);
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
|
||||||
let unpin = Pin::into_inner(self);
|
|
||||||
|
|
||||||
info!(unpin.log, "tick {}", unpin.signal.is_some());
|
|
||||||
|
|
||||||
if let Poll::Ready(v) = Stream::poll_next(Pin::new(&mut unpin.sigchan), cx) {
|
|
||||||
if let Some(s) = v {
|
|
||||||
// We have received a new signal to use
|
|
||||||
unpin.signal.replace(s);
|
|
||||||
// We use `if let` instead of .and_then because we want the waker to be dropped
|
|
||||||
// afterwards. It's only there to ensure the future is called when a signal is
|
|
||||||
// installed the first time
|
|
||||||
// TODO probably don't need that here because we're polling it either way directly
|
|
||||||
// afterwards, eh?
|
|
||||||
if let Some(waker) = unpin.waker.take() {
|
|
||||||
waker.wake();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
info!(unpin.log, "bye");
|
|
||||||
// This means that the sending end was dropped, so we shut down
|
|
||||||
unpin.signal.take();
|
|
||||||
unpin.waker.take();
|
|
||||||
return Poll::Ready(None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref mut s) = unpin.signal {
|
|
||||||
if let Some(status) = ready!(Signal::poll_change(Pin::new(s), cx)) {
|
|
||||||
info!(unpin.log, "Machine Status changed: {:?}", status);
|
|
||||||
let topic = format!("shellies/{}/relay/0/command", unpin.name);
|
|
||||||
let pl = match status {
|
|
||||||
Status::InUse(_) => "on",
|
|
||||||
_ => "off",
|
|
||||||
};
|
|
||||||
let msg = mqtt::Message::new(topic, pl, 0);
|
|
||||||
let f = unpin.client.publish(msg).map(|_| ());
|
|
||||||
|
|
||||||
return Poll::Ready(Some(Box::pin(f)));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
info!(unpin.log, "I ain't got no signal son");
|
|
||||||
unpin.waker.replace(cx.waker().clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
Poll::Pending
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
85
src/network.rs
Normal file
85
src/network.rs
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
use std::sync::{Arc, Mutex, MutexGuard, TryLockResult};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use smol::Executor;
|
||||||
|
|
||||||
|
use futures::channel::mpsc;
|
||||||
|
use futures_signals::signal::{Signal, MutableSignalCloned, Mutable};
|
||||||
|
|
||||||
|
use crate::machine::Machine;
|
||||||
|
use crate::actor::{Actor, ActorSignal};
|
||||||
|
use crate::initiator::Initiator;
|
||||||
|
use crate::db::machine::MachineState;
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
|
pub type MachineMap = HashMap<String, Machine>;
|
||||||
|
pub type ActorMap = HashMap<String, Mutex<mpsc::Sender<Option<ActorSignal>>>>;
|
||||||
|
pub type InitMap = HashMap<String, Mutable<Option<Machine>>>;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub enum Error {
|
||||||
|
NoSuchInitiator,
|
||||||
|
NoSuchMachine,
|
||||||
|
NoSuchActor,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Error::NoSuchInitiator => write!(f, "No initiator found with that name"),
|
||||||
|
Error::NoSuchActor => write!(f, "No actor found with that name"),
|
||||||
|
Error::NoSuchMachine => write!(f, "No machine found with that name"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Main signal network
|
||||||
|
///
|
||||||
|
/// Network as per FRP, not the one with packages and frames
|
||||||
|
// TODO De/Serialize established connection on startup/shutdown.
|
||||||
|
pub struct Network {
|
||||||
|
inits: InitMap,
|
||||||
|
|
||||||
|
// Store connections
|
||||||
|
//miconn: Vec<(String, String)>,
|
||||||
|
|
||||||
|
pub machines: MachineMap,
|
||||||
|
|
||||||
|
// Store connections
|
||||||
|
//maconn: Vec<(String, String)>,
|
||||||
|
|
||||||
|
actors: ActorMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Network {
|
||||||
|
pub fn new(machines: MachineMap, actors: ActorMap, inits: InitMap) -> Self {
|
||||||
|
Self { machines, actors, inits }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connect_init(&self, init_key: &String, machine_key: &String) -> Result<()> {
|
||||||
|
let init = self.inits.get(init_key)
|
||||||
|
.ok_or(Error::NoSuchInitiator)?;
|
||||||
|
let machine = self.machines.get(machine_key)
|
||||||
|
.ok_or(Error::NoSuchMachine)?;
|
||||||
|
|
||||||
|
init.set(Some(machine.clone()));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connect_actor(&mut self, machine_key: &String, actor_key: &String)
|
||||||
|
-> Result<()>
|
||||||
|
{
|
||||||
|
let machine = self.machines.get(machine_key)
|
||||||
|
.ok_or(Error::NoSuchMachine)?;
|
||||||
|
let actor = self.actors.get(actor_key)
|
||||||
|
.ok_or(Error::NoSuchActor)?;
|
||||||
|
|
||||||
|
// FIXME Yeah this should not unwrap. Really, really shoudln't.
|
||||||
|
let mut guard = actor.try_lock().unwrap();
|
||||||
|
|
||||||
|
guard.try_send(Some(Box::new(machine.signal()))).map_err(|_| Error::NoSuchActor.into())
|
||||||
|
}
|
||||||
|
}
|
@ -1,24 +0,0 @@
|
|||||||
mod actuators;
|
|
||||||
mod sensors;
|
|
||||||
|
|
||||||
pub use actuators::{Actuator, ActBox, StatusSignal};
|
|
||||||
pub use sensors::{Sensor, SensBox};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
/// BFFH registries
|
|
||||||
///
|
|
||||||
/// This struct is only a reference to the underlying registries - cloning it will generate a new
|
|
||||||
/// reference, not clone the registries
|
|
||||||
pub struct Registries {
|
|
||||||
pub actuators: actuators::Actuators,
|
|
||||||
pub sensors: sensors::Sensors,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Registries {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Registries {
|
|
||||||
actuators: actuators::Actuators::new(),
|
|
||||||
sensors: sensors::Sensors::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,82 +0,0 @@
|
|||||||
use slog::Logger;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use smol::lock::RwLock;
|
|
||||||
|
|
||||||
use std::pin::Pin;
|
|
||||||
use futures::ready;
|
|
||||||
use futures::prelude::*;
|
|
||||||
use futures::channel::mpsc;
|
|
||||||
use futures::task::{Context, Poll, Spawn};
|
|
||||||
use futures_signals::signal::Signal;
|
|
||||||
|
|
||||||
use crate::db::machine::Status;
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Actuators {
|
|
||||||
inner: Arc<RwLock<Inner>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type ActBox = Box<dyn Actuator + Sync + Send + Unpin>;
|
|
||||||
|
|
||||||
type Inner = HashMap<String, mpsc::Sender<StatusSignal>>;
|
|
||||||
|
|
||||||
impl Actuators {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Actuators {
|
|
||||||
inner: Arc::new(RwLock::new(Inner::new()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn register(&self, name: String, tx: mpsc::Sender<StatusSignal>) {
|
|
||||||
let mut wlock = self.inner.write().await;
|
|
||||||
// TODO: Log an error or something if that name was already taken
|
|
||||||
wlock.insert(name, tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn subscribe(&mut self, name: String, signal: StatusSignal) {
|
|
||||||
let mut wlock = self.inner.write().await;
|
|
||||||
if let Some(tx) = wlock.get_mut(&name) {
|
|
||||||
tx.send(signal).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type StatusSignal = Pin<Box<dyn Signal<Item = Status> + Send + Sync>>;
|
|
||||||
|
|
||||||
pub trait Actuator: Stream<Item = future::BoxFuture<'static, ()>> {
|
|
||||||
fn subscribe(&mut self, signal: StatusSignal);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is merely a proof that Actuator *can* be implemented on a finite, known type. Yay for type
|
|
||||||
// systems with halting problems.
|
|
||||||
struct Dummy {
|
|
||||||
log: Logger,
|
|
||||||
sigchan: mpsc::Receiver<StatusSignal>,
|
|
||||||
signal: Option<StatusSignal>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Actuator for Dummy {
|
|
||||||
fn subscribe(&mut self, signal: StatusSignal) {
|
|
||||||
self.signal.replace(signal);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Stream for Dummy {
|
|
||||||
type Item = future::BoxFuture<'static, ()>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
|
||||||
let unpin = Pin::into_inner(self);
|
|
||||||
if let Some(ref mut s) = unpin.signal {
|
|
||||||
let status = ready!(Signal::poll_change(Pin::new(s), cx));
|
|
||||||
|
|
||||||
info!(unpin.log, "Dummy actuator would set status to {:?}, but is a Dummy", status);
|
|
||||||
|
|
||||||
Poll::Ready(Some(Box::pin(futures::future::ready(()))))
|
|
||||||
} else {
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,73 +0,0 @@
|
|||||||
use std::pin::Pin;
|
|
||||||
use futures::task::{Context, Poll};
|
|
||||||
use futures::{Future, Stream};
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use smol::lock::RwLock;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Sensors {
|
|
||||||
inner: Arc<RwLock<Inner>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sensors {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Sensors {
|
|
||||||
inner: Arc::new(RwLock::new(Inner::new())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type SensBox = Box<dyn Sensor + Send + Sync>;
|
|
||||||
type Inner = HashMap<String, SensBox>;
|
|
||||||
|
|
||||||
|
|
||||||
// Implementing Sensors.
|
|
||||||
//
|
|
||||||
// Given the coroutine/task split stays as it is - Sensor input to machine update being one,
|
|
||||||
// machine update signal to actor doing thing being another, a Sensor implementation would send a
|
|
||||||
// Stream of futures - each future being an atomic Machine update.
|
|
||||||
#[async_trait]
|
|
||||||
/// BFFH Sensor
|
|
||||||
///
|
|
||||||
/// A sensor is anything that can forward an intent of an user to do something to bffh.
|
|
||||||
/// This may be a card reader connected to a machine, a website allowing users to select a machine
|
|
||||||
/// they want to use or something like QRHello
|
|
||||||
pub trait Sensor: Stream<Item = BoxFuture<'static, ()>> {
|
|
||||||
/// Setup the Sensor.
|
|
||||||
///
|
|
||||||
/// After this async function completes the Stream implementation should be able to generate
|
|
||||||
/// futures when polled.
|
|
||||||
/// Implementations can rely on this function being polled to completeion before the stream
|
|
||||||
/// is polled.
|
|
||||||
// TODO Is this sensible vs just having module-specific setup fns?
|
|
||||||
async fn setup(&mut self);
|
|
||||||
|
|
||||||
/// Shutdown the sensor gracefully
|
|
||||||
///
|
|
||||||
/// Implementations can rely on that the stream will not be polled after this function has been
|
|
||||||
/// called.
|
|
||||||
async fn shutdown(&mut self);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Dummy;
|
|
||||||
#[async_trait]
|
|
||||||
impl Sensor for Dummy {
|
|
||||||
async fn setup(&mut self) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn shutdown(&mut self) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Stream for Dummy {
|
|
||||||
type Item = BoxFuture<'static, ()>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
|
||||||
Poll::Ready(Some(Box::pin(futures::future::ready(()))))
|
|
||||||
}
|
|
||||||
}
|
|
176
src/server.rs
Normal file
176
src/server.rs
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
use slog::Logger;
|
||||||
|
|
||||||
|
use crate::config;
|
||||||
|
use crate::config::Settings;
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::connection;
|
||||||
|
|
||||||
|
use smol::net::TcpListener;
|
||||||
|
use smol::net::unix::UnixStream;
|
||||||
|
use smol::LocalExecutor;
|
||||||
|
|
||||||
|
use clap::{App, Arg};
|
||||||
|
|
||||||
|
use futures::prelude::*;
|
||||||
|
use futures::executor::{LocalPool, ThreadPool};
|
||||||
|
use futures::compat::Stream01CompatExt;
|
||||||
|
use futures::join;
|
||||||
|
use futures::task::LocalSpawn;
|
||||||
|
|
||||||
|
use std::io;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::db::Databases;
|
||||||
|
use crate::network::Network;
|
||||||
|
|
||||||
|
/// Handle all API connections and run the RPC tasks spawned from that on the local thread.
|
||||||
|
pub fn serve_api_connections(log: Arc<Logger>, config: Settings, db: Databases, nw: Network)
|
||||||
|
-> Result<(), Error>
|
||||||
|
{
|
||||||
|
let signal = Box::pin(async {
|
||||||
|
let (tx, mut rx) = UnixStream::pair()?;
|
||||||
|
// Initialize signal handler.
|
||||||
|
// We currently only care about Ctrl-C so SIGINT it is.
|
||||||
|
// TODO: Make this do SIGHUP and a few others too. (By cloning the tx end of the pipe)
|
||||||
|
signal_hook::pipe::register(signal_hook::SIGINT, tx)?;
|
||||||
|
// When a signal is received this future can complete and read a byte from the underlying
|
||||||
|
// socket — the actual data is discarded but the act of being able to receive data tells us
|
||||||
|
// that we received a SIGINT.
|
||||||
|
|
||||||
|
// FIXME: What errors are possible and how to handle them properly?
|
||||||
|
rx.read_exact(&mut [0u8]).await?;
|
||||||
|
|
||||||
|
io::Result::Ok(LoopResult::Stop)
|
||||||
|
});
|
||||||
|
|
||||||
|
// Bind to each address in config.listens.
|
||||||
|
// This is a Stream over Futures so it will do absolutely nothing unless polled to completion
|
||||||
|
let listeners_s: futures::stream::Collect<_, Vec<TcpListener>>
|
||||||
|
= stream::iter((&config).listens.iter())
|
||||||
|
.map(|l| {
|
||||||
|
let addr = l.address.clone();
|
||||||
|
let port = l.port.unwrap_or(config::DEFAULT_PORT);
|
||||||
|
info!(&log, "Binding to {} port {}.", l.address.as_str(), &port);
|
||||||
|
TcpListener::bind((l.address.as_str(), port))
|
||||||
|
// If the bind errors, include the address so we can log it
|
||||||
|
// Since this closure is lazy we need to have a cloned addr
|
||||||
|
.map_err(move |e| { (addr, port, e) })
|
||||||
|
})
|
||||||
|
// Filter out the sockets we couldn't open and log those
|
||||||
|
.filter_map(|f| async {
|
||||||
|
match f.await {
|
||||||
|
Ok(l) => Some(l),
|
||||||
|
Err((addr, port, e)) => {
|
||||||
|
error!(&log, "Could not setup socket on {} port {}: {}", addr, port, e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
let local_ex = LocalExecutor::new();
|
||||||
|
|
||||||
|
let network = Arc::new(nw);
|
||||||
|
|
||||||
|
let inner_log = log.clone();
|
||||||
|
let loop_log = log.clone();
|
||||||
|
|
||||||
|
smol::block_on(local_ex.run(async {
|
||||||
|
// Generate a stream of TcpStreams appearing on any of the interfaces we listen to
|
||||||
|
let listeners = listeners_s.await;
|
||||||
|
let incoming = stream::select_all(listeners.iter().map(|l| l.incoming()));
|
||||||
|
|
||||||
|
let mut handler = connection::ConnectionHandler::new(inner_log.new(o!()), db, network.clone());
|
||||||
|
|
||||||
|
// For each incoming connection start a new task to handle it
|
||||||
|
let handle_sockets = incoming.map(|socket| {
|
||||||
|
// incoming.next() returns an error when the underlying `accept` call yielded an error
|
||||||
|
// In POSIX those are protocol errors we can't really handle, so we just log the error
|
||||||
|
// and the move on
|
||||||
|
match socket {
|
||||||
|
Ok(socket) => {
|
||||||
|
// If we have it available add the peer's address to all log messages
|
||||||
|
let log =
|
||||||
|
if let Ok(addr) = socket.peer_addr() {
|
||||||
|
inner_log.new(o!("address" => addr))
|
||||||
|
} else {
|
||||||
|
inner_log.new(o!())
|
||||||
|
};
|
||||||
|
|
||||||
|
// Clone a log for potential error handling
|
||||||
|
let elog = log.clone();
|
||||||
|
|
||||||
|
// We handle the error using map_err
|
||||||
|
let f = handler.handle(socket)
|
||||||
|
.map_err(move |e| {
|
||||||
|
error!(log, "Error occured during protocol handling: {}", e);
|
||||||
|
})
|
||||||
|
// Void any and all results since pool.spawn allows no return value.
|
||||||
|
.map(|_| ());
|
||||||
|
|
||||||
|
// Spawn the connection context onto the local executor since it isn't Send
|
||||||
|
// Also `detach` it so the task isn't canceled as soon as it's dropped.
|
||||||
|
// TODO: Store all those tasks to have a easier way of managing them?
|
||||||
|
local_ex.spawn(f).detach();
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
error!(inner_log, "Socket `accept` error: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unless we are overloaded we just want to keep going.
|
||||||
|
return LoopResult::Continue;
|
||||||
|
});
|
||||||
|
|
||||||
|
info!(&log, "Started");
|
||||||
|
|
||||||
|
// Check each signal as it arrives
|
||||||
|
let handle_signals = signal.map(|r| { r.unwrap() }).into_stream();
|
||||||
|
|
||||||
|
let mut combined = stream::select(handle_signals, handle_sockets);
|
||||||
|
|
||||||
|
// This is the basic main loop that drives execution
|
||||||
|
loop {
|
||||||
|
match combined.next().await {
|
||||||
|
// When the result says to continue, do exactly that
|
||||||
|
Some(LoopResult::Continue) => {}
|
||||||
|
Some(LoopResult::Overloaded) => {
|
||||||
|
// In case over server overload we should install a replacement handler that
|
||||||
|
// would instead just return `overloaded` for all connections until the
|
||||||
|
// situation is remedied.
|
||||||
|
//
|
||||||
|
// For now, just log the overload and keep going.
|
||||||
|
error!(loop_log, "Server overloaded");
|
||||||
|
}
|
||||||
|
// When the result says to stop the server, do exactly that.
|
||||||
|
// Also catches a `None` from the stream; None should never be returned because it
|
||||||
|
// would mean all sockets were closed and we can not receive any further signals.
|
||||||
|
// Still, in that case shut down cleanly anyway, the only reason this could happen
|
||||||
|
// are some heavy bugs in the runtime
|
||||||
|
Some(LoopResult::Stop) | None => {
|
||||||
|
warn!(loop_log, "Stopping server");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
// TODO: Run actual shut down code here
|
||||||
|
info!(log, "Shutting down...");
|
||||||
|
|
||||||
|
// Returning () is an implicit success so this will properly set the exit code as well
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The result of one iteration of the core loop
|
||||||
|
pub enum LoopResult {
|
||||||
|
/// Everything was fine, keep going
|
||||||
|
Continue,
|
||||||
|
/// Something happened that means we should shut down
|
||||||
|
Stop,
|
||||||
|
/// The Server is currently overloaded
|
||||||
|
Overloaded,
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user