Compare commits

...

384 Commits

Author SHA1 Message Date
314145084e update schema to latest 2025-03-05 23:51:27 +01:00
5e0d615ada make schema (API) submodule static to allow easy project-forking 2025-03-05 23:09:37 +01:00
e4b7aa8766 remove dirty examples/ dir. proper and up to date examples can be found at https://gitlab.com/fabinfra/fabaccess/demos-environments for instance 2025-03-04 00:35:25 +01:00
bbf0e77571 remove obsolete test/ directory 2025-02-24 13:17:33 +01:00
40ca2acdb7 cleanup/update contributing infos 2025-02-20 16:44:37 +01:00
3e29df8b2c Remove obsolete installation docs, href the user to docs.fab-access.org instead 2025-02-20 16:18:42 +01:00
fdde8a933c Cleanup: remove /docs dir because we put it to https://docs.fab-access.org 2025-02-20 15:35:19 +01:00
Jonathan Krebs
c8f0337c1e set release date for 0.4.3 2025-02-11 16:56:25 +01:00
Jonathan Krebs
151c04d9df record changes since 0.4.2 2025-01-24 17:39:52 +01:00
Jonathan Krebs
1d2ba9eddc implement dumping and loading the full database 2025-01-24 04:45:03 +01:00
Jonathan Krebs
ee21f74d2d minimal dependency update to make it compatible with current rust 2025-01-24 04:30:37 +01:00
=
06083a63e3 Fix compilation error caused by overlooked renames: diflouroborane -> difluoroborane 2024-12-15 22:29:14 +01:00
57a0436ca1 fix typo; fixes https://gitlab.com/fabinfra/fabaccess/bffh/-/issues/68 2024-12-14 21:54:01 +01:00
Jonathan Krebs
8b15acf983 remove warnings around initiator loading. cleaner error handling remains todo. 2024-12-13 15:32:21 +01:00
Jonathan Krebs
40ba114e61 fix warnings: at the moment configuration by environment variables is not implemented 2024-12-13 15:32:21 +01:00
Jonathan Krebs
c2c34ede67 fix warnings: remove unused muts and variables 2024-12-13 15:32:21 +01:00
Jonathan Krebs
2b0fe0e868 add some error handling, mostly to quiet warnings 2024-12-13 15:32:21 +01:00
Jonathan Krebs
fbfb76c34e fix warnings: some more easy cases 2024-12-13 15:32:21 +01:00
Jonathan Krebs
971dee36fd fix warnings: replace some mem::replace with assignments 2024-12-13 15:32:21 +01:00
Jonathan Krebs
41983e6039 remove unused imports from bffhd 2024-12-13 15:32:21 +01:00
Falko Richter
ca25cd83d0 Update INSTALL.md 2024-11-20 12:29:45 +00:00
Jonathan Krebs
9805f4ee04 lightproc: drop span guard before deallocating the process
This should fix #77
2024-11-09 11:44:43 +01:00
Falko Richter
66877159f0 fixed links to the other repos 2024-07-22 06:53:45 +00:00
Nadja Reitzenstein
a9143b0cdd pin toolchain to a known good version while we get that 'fun' segfault 2023-02-23 17:00:19 +01:00
Nadja Reitzenstein
165b269d4f Fix example config 2023-02-23 14:25:44 +01:00
Nadja Reitzenstein
e35e2b7334 cargo fmt 2023-02-09 17:07:31 +01:00
Nadja Reitzenstein
98c2e3fd01 rsasl update 2023-02-09 17:07:22 +01:00
Kai Jan Kriegel
27f5413e3d i guess i forgot how to format string? 2023-02-06 18:07:42 +01:00
Kai Jan Kriegel
55e9bf6e2b check the configured space urn and not a hardcoded one 2023-02-03 21:35:53 +01:00
Kai Jan Kriegel
4cdbfd8925 fix bind / unbind card 2023-02-03 08:20:39 +01:00
Nadja Reitzenstein
7a85667a44 Whoops that was a premature push 2023-01-31 16:18:18 +01:00
Nadja Reitzenstein
cf3853263a Make spacename/instanceurl required and enable card interface 2023-01-31 16:16:00 +01:00
Nadja Reitzenstein
946a08c19c I should really read my own documentation sometimes. 2023-01-11 14:51:38 +01:00
Nadja Reitzenstein
e42a32934a Implement remaining card management 2023-01-09 17:05:48 +01:00
Nadja Reitzenstein
24c02fccff Implement partial card mgmnt 2023-01-09 17:05:48 +01:00
Nadja Reitzenstein
beecb54d38 Move miette towards edges of BFFH for more structured error reporting 2023-01-09 17:05:46 +01:00
Kai Kriegel
0716a75ee6 Add support for binary FabReader Mechanism 2023-01-02 05:00:29 +00:00
Kai Kriegel
0380e02f3f reworked CI 2023-01-02 03:59:09 +00:00
Nadja Reitzenstein
0d2cd6f376 No path deps for good reasons™ 2022-11-02 15:01:44 +01:00
TheJoKlLa
410ed8cb33 Revert binarayfabfire 2022-11-01 15:00:52 +01:00
Kai Jan Kriegel
7a941c3338 actually return the value 2022-11-01 13:24:04 +00:00
Kai Jan Kriegel
8776fa3ca2 return correct length from step 2022-11-01 13:24:04 +00:00
Kai Jan Kriegel
5c4cb32d1a initial support for binary version of FabFire 2022-11-01 13:24:04 +00:00
Nadja Reitzenstein
1971515601 Merge branch 'feature/rsasl-update' into development
* feature/rsasl-update:
  Update to latest rsasl
  Port rsasl
2022-11-01 10:48:14 +01:00
Nadja Reitzenstein
0ed53f5cc9 Update to latest rsasl 2022-11-01 10:47:51 +01:00
Nadja Reitzenstein
47524ef038 Merge remote-tracking branch 'origin/development' into development
* origin/development:
  Added paho-mqtt
2022-10-07 13:43:50 +02:00
Nadja Reitzenstein
a8cc1be87d Merge tag 'v0.4.2' into development
v0.4.2

* tag 'v0.4.2':
2022-10-07 13:43:12 +02:00
Nadja Reitzenstein
95ee4228bd Merge branch 'release/0.4.2'
* release/0.4.2: (51 commits)
  Update process initiator to make shelly-timeout doable
  Improve error messages on missing config
  Refactor Config into dhall module
  Add dumping the user db
  Fix log format settings
  Implement password change functionality
  Better error wrapper type
  Start taking control over exit on argument parsing failure
  Return a better error when --load is given a directory
  Output plentiful trace info for API calls
  log all api calls with `TRACE` level
  Add a connection-specific span to each API handler
  Runtime things furthermore
  Allow tracking cgroups with futures
  Oh whoops handle that
  Get started on supervision trees
  Attach a GroupID to all LightProcs
  Noting down improvement ideas for procs
  More ideas about how to record data
  A number of small updates batched into one commit
  ...
2022-10-07 13:43:03 +02:00
Nadja Reitzenstein
3cf152a164 Port rsasl 2022-10-05 17:28:47 +02:00
TheJoKlLa
29bfe61a2c Added paho-mqtt 2022-08-24 12:42:28 +00:00
Nadja Reitzenstein
ec1cac9443 Update process initiator to make shelly-timeout doable 2022-08-22 19:05:57 +02:00
Nadja Reitzenstein
f3278fcf05 Merge branch 'feature/env_config' into development
* feature/env_config:
  Improve error messages on missing config
  Refactor Config into dhall module
2022-07-24 17:52:53 +02:00
Nadja Reitzenstein
e3423c7786 Improve error messages on missing config 2022-07-24 17:51:17 +02:00
Nadja Reitzenstein
9346e433e1 Refactor Config into dhall module 2022-07-24 17:11:28 +02:00
Nadja Reitzenstein
aeaae4cd7b Add dumping the user db 2022-07-24 16:39:33 +02:00
Nadja Reitzenstein
218a316571 Fix log format settings 2022-07-24 16:07:49 +02:00
Nadja Reitzenstein
70c94feced Implement password change functionality 2022-07-11 12:30:26 +02:00
Nadja Reitzenstein
1fc13405e8 Merge branch 'feature/better-errors' into development
* feature/better-errors:
  Better error wrapper type
  Start taking control over exit on argument parsing failure
  Return a better error when --load is given a directory
2022-07-11 12:15:33 +02:00
Nadja Reitzenstein
a79293add1 Better error wrapper type 2022-07-11 12:14:56 +02:00
Nadja Reitzenstein
7a0a50dc3f Start taking control over exit on argument parsing failure 2022-06-24 15:17:05 +02:00
Nadja Reitzenstein
c9a8ef7db4 Return a better error when --load is given a directory
Closes: #55
2022-06-24 15:16:46 +02:00
Nadja Reitzenstein
e1377d0f79 Merge branch 'feature/spanned-api-handling' into development
* feature/spanned-api-handling:
  Output plentiful trace info for API calls
  log all api calls with `TRACE` level
  Add a connection-specific span to each API handler
2022-06-24 14:35:41 +02:00
Nadja Reitzenstein
257fbf5506 Output plentiful trace info for API calls 2022-06-24 14:35:31 +02:00
Nadja Reitzenstein
1ff3f2afb7 log all api calls with TRACE level 2022-06-24 13:57:51 +02:00
Nadja Reitzenstein
13bbe2bee9 Add a connection-specific span to each API handler 2022-06-24 12:25:52 +02:00
Nadja Reitzenstein
fac0a9ba94 Merge branch 'feature/runtime-improvements' into development
* feature/runtime-improvements:
  Runtime things furthermore
  Allow tracking cgroups with futures
  Oh whoops handle that
  Get started on supervision trees
  Attach a GroupID to all LightProcs
  Noting down improvement ideas for procs
  More ideas about how to record data
  A number of small updates batched into one commit
  Improve Drop guards
  Even more console shenanigans
  tracing more data
  Some bits work \o/
  Console is attached and compiles
  More console features
  Use `ManuallyDrop` instead of `mem::forget` where appropiate
  More console implementation stuff
  Start on the runtime console subscriber
2022-06-24 12:25:11 +02:00
Nadja Reitzenstein
57fb279092 Runtime things furthermore 2022-06-24 12:24:29 +02:00
Nadja Reitzenstein
77e0935945 Allow tracking cgroups with futures 2022-06-23 21:19:38 +02:00
Nadja Reitzenstein
e7358838d5 Oh whoops handle that 2022-06-23 17:31:57 +02:00
Nadja Reitzenstein
3075e1c027 Get started on supervision trees 2022-06-23 17:28:41 +02:00
Nadja Reitzenstein
7e113bab47 Attach a GroupID to all LightProcs 2022-06-23 17:28:18 +02:00
Nadja Reitzenstein
ff727b6d97 Noting down improvement ideas for procs 2022-06-23 16:00:21 +02:00
Nadja Reitzenstein
2f5f7cb0d7 More ideas about how to record data 2022-06-23 14:37:30 +02:00
Nadja Reitzenstein
567df800f3 A number of small updates batched into one commit 2022-06-23 13:33:29 +02:00
Nadja Reitzenstein
9a86bae45a Improve Drop guards 2022-06-23 13:22:51 +02:00
Nadja Reitzenstein
18d69063fd Even more console shenanigans 2022-06-22 19:01:58 +02:00
Nadja Reitzenstein
2d8d6f9938 tracing more data 2022-06-22 14:43:14 +02:00
Nadja Reitzenstein
aef36fa3d4 Some bits work \o/ 2022-06-21 22:48:12 +02:00
Nadja Reitzenstein
287ca9806d Console is attached and compiles 2022-06-21 22:24:40 +02:00
Nadja Reitzenstein
35c9f45f6d More console features 2022-06-21 19:12:02 +02:00
Nadja Reitzenstein
ee0593dc6f Use ManuallyDrop instead of mem::forget where appropiate 2022-06-21 16:21:13 +02:00
Nadja Reitzenstein
8a35818b4f More console implementation stuff 2022-06-21 16:20:52 +02:00
Nadja Reitzenstein
df7bd80d06 Start on the runtime console subscriber 2022-06-21 13:06:21 +02:00
Nadja Reitzenstein
58f40d98ed Implement PermissionSystem::getRoleList()
Closes: #62
2022-06-20 15:20:00 +02:00
Nadja Reitzenstein
728c33f444 Merge branch 'feature/new_initiator' into development
* feature/new_initiator:
  Process initiator working
  Reimplement the dummy initiator
2022-06-18 16:52:59 +02:00
Nadja Reitzenstein
a66303566a Process initiator working 2022-06-18 16:52:30 +02:00
Nadja Reitzenstein
6d8d1384d9 Reimplement the dummy initiator 2022-06-07 14:05:57 +02:00
Nadja Reitzenstein
9100811c50 Merge branch 'feature/bettererrors' into development
* feature/bettererrors:
  Switch out anyhow for miette
  More trace output of role checking
  Better errors when the db directory is missing
2022-06-02 17:48:54 +02:00
Nadja Reitzenstein
5f2214abe9 Switch out anyhow for miette 2022-06-02 17:46:36 +02:00
Nadja Reitzenstein
17fd08b7e5 More trace output of role checking 2022-05-31 13:45:51 +02:00
Nadja Reitzenstein
a43c38c118 Better errors when the db directory is missing 2022-05-31 13:11:48 +02:00
Nadja Reitzenstein
84a4e9791e INSTALL docs on running the bin 2022-05-31 13:08:14 +02:00
Nadja Reitzenstein
e7828cd7f5 Add note to CONTRIBUTING about user cargo config 2022-05-31 12:19:58 +02:00
Nadja Reitzenstein
7861568ca1 Cross-compilation docs 2022-05-31 12:13:12 +02:00
Nadja Reitzenstein
2cb7a28967 Merge branch 'feature/schema-updates' into development
* feature/schema-updates:
  Update api dependency in Cargo.lock
  Update api version
  Update build.rs to not output to tracked dir
  Drop git tracked pregenerated code
  Drop pregenerated modules
  Update API submodule
2022-05-31 12:11:02 +02:00
Nadja Reitzenstein
a436b93e56 Merge branch 'feature/appid' into development
* feature/appid:
  update fabfire desfire appid
2022-05-31 12:06:28 +02:00
Kai Jan Kriegel
50b4394cfd update fabfire desfire appid 2022-05-24 23:41:42 +02:00
Nadja Reitzenstein
360d6bfced Update api dependency in Cargo.lock 2022-05-20 19:34:48 +02:00
Nadja Reitzenstein
0f264bed0e Update api version 2022-05-20 19:29:21 +02:00
Nadja Reitzenstein
cd052fcaf0 Update build.rs to not output to tracked dir 2022-05-20 19:29:03 +02:00
Nadja Reitzenstein
78bd75ae05 Drop git tracked pregenerated code 2022-05-20 19:26:01 +02:00
Nadja Reitzenstein
523c091284 Drop pregenerated modules 2022-05-20 19:25:30 +02:00
Nadja Reitzenstein
7784313a95 Update API submodule 2022-05-20 18:35:13 +02:00
Nadja Reitzenstein
66c8ed4a8c Merge branch 'feature/duplicate-users' into development
* feature/duplicate-users:
  Add a duplicate user example table
  Include source error message when failing --load
2022-05-20 18:28:01 +02:00
Nadja Reitzenstein
689c2b5353 Add a duplicate user example table 2022-05-20 18:27:54 +02:00
Nadja Reitzenstein
aa71c6bf4a Include source error message when failing --load
Fixes #59
2022-05-20 18:27:33 +02:00
Nadja Reitzenstein
d35477c806 Merge branch 'feature/version-improvements' into development
* feature/version-improvements:
  Slightly more logging during authentication
  Switch to shadow-rs
  --version talks about API version too
  Start on improving a few version thingies
2022-05-20 18:19:50 +02:00
Nadja Reitzenstein
f905b1f375 Slightly more logging during authentication 2022-05-20 18:19:05 +02:00
Nadja Reitzenstein
25df5bf5b2 Switch to shadow-rs
Closes #58
2022-05-18 17:01:24 +02:00
Nadja Reitzenstein
c435f76d08 --version talks about API version too 2022-05-14 15:36:32 +02:00
Nadja Reitzenstein
d591daa884 Start on improving a few version thingies 2022-05-14 15:09:35 +02:00
Nadja Reitzenstein
bfde6c03dc Merge branch 'release/v0.4.2'
* release/v0.4.2: (31 commits)
  Bump version to 0.4.2
  Archive Cargo.lock
  Absolute path to cargo2junit
  Install cargo2junit in test build
  whoops
  Ah yes, why bother with correct documentation anyway?
  Move rustup/cargo install to only the jobs that need them
  Allow rustfmt failure until we fix capnp gen being fmt'ed
  ...
  okay I guess?
  rustup
  actually we don't need clippy for non-MR things
  okay gitlab, be that way
  and use stable goddamit
  okay make builds work better for merges
  Actually, only build if linting checks out. And make Gitlab CI work.
  Try to get the Gitlab CI to cooperate.
  Build test harnish as part of the `build` step
  Make docker containers only be built when necessary
  Correct gitlab-ci.yml
  ...
2022-05-13 18:32:28 +02:00
Nadja Reitzenstein
67ff33ba79 Bump version to 0.4.2 2022-05-13 18:31:16 +02:00
Nadja Reitzenstein
b30dc0033b Archive Cargo.lock 2022-05-13 18:30:57 +02:00
Nadja Reitzenstein
3c9777e3cf Absolute path to cargo2junit 2022-05-05 22:15:58 +02:00
Nadja Reitzenstein
d132b8f172 Install cargo2junit in test build 2022-05-05 22:11:54 +02:00
Nadja Reitzenstein
938e1ade28 whoops 2022-05-05 22:07:21 +02:00
Nadja Reitzenstein
2a1e4c59bc Ah yes, why bother with correct documentation anyway? 2022-05-05 21:56:49 +02:00
Nadja Reitzenstein
2479a6972d Move rustup/cargo install to only the jobs that need them 2022-05-05 21:27:27 +02:00
Nadja Reitzenstein
59736c088d Allow rustfmt failure until we fix capnp gen being fmt'ed 2022-05-05 21:24:30 +02:00
Nadja Reitzenstein
ae94ba0be6 ... 2022-05-05 21:19:45 +02:00
Nadja Reitzenstein
67b46a85bb okay I guess? 2022-05-05 21:18:01 +02:00
Nadja Reitzenstein
eb8aa5a352 rustup 2022-05-05 21:12:52 +02:00
Nadja Reitzenstein
26608bdf21 actually we don't need clippy for non-MR things 2022-05-05 21:10:28 +02:00
Nadja Reitzenstein
96bed54c29 okay gitlab, be that way 2022-05-05 21:09:12 +02:00
Nadja Reitzenstein
38869c6623 and use stable goddamit 2022-05-05 21:08:15 +02:00
Nadja Reitzenstein
a5f5209371 okay make builds work better for merges 2022-05-05 21:06:13 +02:00
Nadja Reitzenstein
cb63d3fef8 Actually, only build if linting checks out. And make Gitlab CI work. 2022-05-05 21:02:06 +02:00
Nadja Reitzenstein
c4d74115f8 Try to get the Gitlab CI to cooperate. 2022-05-05 19:37:35 +02:00
Nadja Reitzenstein
57b98cf15e Build test harnish as part of the build step 2022-05-05 19:34:51 +02:00
Nadja Reitzenstein
4265393c30 Make docker containers only be built when necessary 2022-05-05 19:28:40 +02:00
Nadja Reitzenstein
08d7512d01 Correct gitlab-ci.yml 2022-05-05 19:25:06 +02:00
Nadja Reitzenstein
d6858ab5a5 rustfmt 2022-05-05 19:22:02 +02:00
Nadja Reitzenstein
3078d5dab8 add pre-push hook to check formatting 2022-05-05 19:21:51 +02:00
Nadja Reitzenstein
481649c8d2 Update gitlab-ci.yml 2022-05-05 19:13:23 +02:00
Nadja Reitzenstein
f1c726f672 Make cargo test --tests only run integration tests 2022-05-05 18:02:56 +02:00
Nadja Reitzenstein
ce204b9bc1 Remove sincerely obsolete code 2022-05-05 17:51:51 +02:00
Nadja Reitzenstein
be5a600abf Move api/schema submodule to relative path 2022-05-05 17:36:53 +02:00
Nadja Reitzenstein
9e2be12fbd Make unit tests compile 2022-05-05 17:22:54 +02:00
Nadja Reitzenstein
76f59d7196 Remove things that should be in your global .gitignore 2022-05-05 15:57:29 +02:00
Nadja Reitzenstein
5f7397588a libgsasl is not required anymore 2022-05-05 15:53:11 +02:00
Nadja Reitzenstein
212f657289 add a version controlled git pre-commit hook 2022-05-05 15:52:03 +02:00
Nadja Reitzenstein
2d9f30b55b Run rustfmt 2022-05-05 15:50:44 +02:00
Nadja Reitzenstein
475cb9b9b4 Prepare for pushing 0.4.1 to main for public release
* main:
  stay on v0.2 compatible api version
  Don't default to MQTT 3.1 either
  Sets a 20 second MQTT keepalive intervall
  Make MQTT client try to reconnect on connection lost or disconnect
  Disclose machines that are used by yourself
  replace master with main in ci
  Update dependencies and move rsasl to ARM/AArch64-compatible version
  Stable release v0.2.1
2022-05-03 17:19:37 +02:00
Nadja Reitzenstein
3b0b4710f6 Correct CHANGELOG 2022-05-03 17:16:25 +02:00
Nadja Reitzenstein
cae3b3a83e Correcly dump and recreate user db on --load 2022-04-30 20:52:32 +02:00
Nadja Reitzenstein
cfaf4d509e Better error reporting for auth
Fixes: #49
2022-04-30 20:22:51 +02:00
Nadja Reitzenstein
c35d3bc6b1 User mgmnt api improvements 2022-04-28 21:00:03 +02:00
Nadja Reitzenstein
505afccbf8 Implement addUserFallible instead of addUser 2022-04-28 20:38:44 +02:00
Nadja Reitzenstein
ed10b15a10 Only set search cap for manage for now 2022-04-28 20:35:06 +02:00
Nadja Reitzenstein
869f3d5e5f Add user search impl 2022-04-28 20:33:46 +02:00
Nadja Reitzenstein
b6c9b61511 impl relevant proto changes 2022-04-27 20:27:14 +02:00
Nadja Reitzenstein
1070d9e6eb update schema submodule 2022-04-27 20:25:09 +02:00
Nadja Reitzenstein
46e3552e04 Implements a first bit of User management. 2022-04-27 20:19:04 +02:00
Nadja Reitzenstein
fe992a9446 Oh also update CHANGELOG 2022-04-27 19:22:07 +02:00
Nadja Reitzenstein
f4d793621c Remove doc warnings for now and fix others 2022-04-27 17:30:04 +02:00
Nadja Reitzenstein
4e10a981b2 Return full version string for --version
Fixes #53
2022-04-27 17:23:05 +02:00
Nadja Reitzenstein
4ca4dc124b Pinning rsasl version to commit 2022-04-27 17:21:07 +02:00
Kai Jan Kriegel
df64d43e03 ran cargo fix 2022-04-26 23:21:43 +02:00
Kai Jan Kriegel
ba99f6f131 fixed Dockerfile 2022-04-26 23:18:50 +02:00
Kai Jan Kriegel
2c4b3c9cd0 also build machine if user has read perm 2022-04-24 20:10:06 +02:00
Nadja Reitzenstein
846fb09433 Merge branch 'feature/api-0.3' into feature/cleanup
* feature/api-0.3: (37 commits)
  Rerun on changed BFFHD_BUILD_TAGGED_RELEASE env var
  Log version on start
  Make build step only use git info when not building a tagged release
  Reverse visibility check to properly disclose machines
  Only return `use` interface if machine is currently free or reserved by me
  allow bffh to build outside of a git repo
  enable mutistage auth
  Fix auth
  Update API
  update desfire crate to version on crates.io
  working Desfire auth in the api!
  fix stupid logic error
  initial integration of the X-FABFIRE mechnism
  Improve TLS support
  Fixes warnings
  Port to rsasl2
  More cleanup.
  Makes rumqttc futures run on the tokio runtime as required.
  Use our own MQTT URL dissector so existing configs don't break
  Fixing more warnings
  ...
2022-04-22 20:14:59 +02:00
Nadja Reitzenstein
3300105082 Implement changes from fabaccess-api#24 2022-04-22 20:09:34 +02:00
Nadja Reitzenstein
047f7bc1de sensible search return values for non-existant/non-visible resources 2022-04-22 19:57:58 +02:00
Nadja Reitzenstein
15c878e1d2 comp 2022-04-21 23:04:07 +02:00
Nadja Reitzenstein
5538dd6751 User admin methods 2022-04-21 23:04:07 +02:00
Nadja Reitzenstein
3eab5b8702 Make auditlog log 2022-04-21 23:04:07 +02:00
Nadja Reitzenstein
c402c71abc Keep old config system 2022-04-21 23:04:07 +02:00
Nadja Reitzenstein
e6682ca8a8 Remove copyright for now 2022-04-21 23:04:07 +02:00
Nadja Reitzenstein
b02afe5575 Splitting config 2022-04-21 23:04:07 +02:00
Kai Jan Kriegel
28d90f1078 bump base image 2022-03-29 19:55:09 +02:00
Kai Jan Kriegel
831b18128d fix fabfire mechanism integration and improve logging 2022-03-19 06:00:21 +01:00
Kai Jan Kriegel
41f8b83cd5 log outcome of loading user db 2022-03-19 05:54:18 +01:00
Kai Jan Kriegel
9bd1b917a2 modified dockerfile 2022-03-17 16:37:49 +01:00
Kai Jan Kriegel
8c6b53d050 reduce rsasl to minimal required features 2022-03-17 02:13:09 +01:00
Nadja Reitzenstein
33e8a62d2a Implement more API to make Borepin happier 2022-03-16 20:17:59 +01:00
Nadja Reitzenstein
b88c6f69cd Updating rsasl version 2022-03-16 19:32:19 +01:00
Nadja Reitzenstein
538b0b28f1 Importing X-FABFIRE auth mechanism 2022-03-16 19:32:03 +01:00
Nadja Reitzenstein
cb8cda39cd Rerun on changed BFFHD_BUILD_TAGGED_RELEASE env var 2022-03-16 19:14:24 +01:00
Nadja Reitzenstein
bd8c2d2173 Make build step only use git info when not building a tagged release 2022-03-16 19:14:24 +01:00
Kai Jan Kriegel
ba005b3f9f allow bffh to build outside of a git repo 2022-03-16 19:10:55 +01:00
Nadja Reitzenstein
29a44bdb6a Make capnp machines api work again 2022-03-16 19:01:09 +01:00
Nadja Reitzenstein
2b7044d498 Compile with new DB system 2022-03-16 18:10:59 +01:00
Nadja Reitzenstein
7f362c7ab4 Rerun on changed BFFHD_BUILD_TAGGED_RELEASE env var 2022-03-16 15:17:09 +01:00
Nadja Reitzenstein
80ceb4af34 Log version on start 2022-03-16 15:16:51 +01:00
Nadja Reitzenstein
dae9d0c93b Make build step only use git info when not building a tagged release 2022-03-16 15:13:30 +01:00
Nadja Reitzenstein
cc2b43a9f2 Reverse visibility check to properly disclose machines
Machines that you have disclose on are always shown.
Machines you *don't* have `disclose` on are *also* show *iff* you are using them.
2022-03-16 15:10:13 +01:00
Nadja Reitzenstein
069819bb9a Only return use interface if machine is currently free or reserved by me 2022-03-16 15:10:13 +01:00
Kai Jan Kriegel
4feb21e7fc allow bffh to build outside of a git repo 2022-03-16 05:42:56 +01:00
Kai Jan Kriegel
27539429f7 enable mutistage auth 2022-03-16 05:42:19 +01:00
Nadja Reitzenstein
1156174d7a Remove DB code that assumes alignment 2022-03-15 21:53:21 +01:00
Nadja Reitzenstein
a145efc948 LMDB does not guarantee alignment. 2022-03-15 21:24:21 +01:00
Nadja Reitzenstein
648026574f Commit on useradd 2022-03-15 20:00:52 +01:00
Nadja Reitzenstein
c0b311e14c Cargo fix 2022-03-15 20:00:43 +01:00
Nadja Reitzenstein
2e5f343d52 Tracing and dumping 2022-03-15 19:56:41 +01:00
Nadja Reitzenstein
48003ef51c Cleanup authentication 2022-03-15 19:16:33 +01:00
Nadja Reitzenstein
a111a86266 Impl roles 2022-03-15 19:14:04 +01:00
Nadja Reitzenstein
d7467989ef Splitting bffhd setup routines 2022-03-15 19:13:55 +01:00
Nadja Reitzenstein
c317101b93 Merge remote-tracking branch 'origin/feature/desfire-auth' into feature/api-0.3
* origin/feature/desfire-auth:
  update desfire crate to version on crates.io
  working Desfire auth in the api!
  fix stupid logic error
  initial integration of the X-FABFIRE mechnism
2022-03-15 18:58:51 +01:00
Nadja Reitzenstein
5c9b72c37d Session initialization 2022-03-15 17:52:47 +01:00
Nadja Reitzenstein
2e9c7fbc19 Make Users DB a global resource 2022-03-15 16:28:11 +01:00
Nadja Reitzenstein
4ff0abd161 DB indexing code 2022-03-15 16:27:52 +01:00
Nadja Reitzenstein
75c449c83a Fix auth 2022-03-13 23:58:03 +01:00
Nadja Reitzenstein
15f31ffd7c Update API 2022-03-13 23:31:00 +01:00
Nadja Reitzenstein
ddd8add270 User db & loading 2022-03-13 22:50:37 +01:00
Nadja Reitzenstein
c4dac55b23 Copy actor types 2022-03-13 21:54:48 +01:00
Nadja Reitzenstein
613e62c7e6 Actor loading & configuring 2022-03-13 21:34:05 +01:00
Nadja Reitzenstein
bd98f13f67 Simplify Actor impl to have a static source 2022-03-13 20:38:11 +01:00
Nadja Reitzenstein
07a7cbe42b Resources lookup impl 2022-03-13 20:33:26 +01:00
Nadja Reitzenstein
d5833f30c4 Adding machines back to config 2022-03-13 20:14:50 +01:00
Nadja Reitzenstein
cc48dcca17 DB up/down/sidegrade 2022-03-13 20:11:37 +01:00
Kai Jan Kriegel
926d200c93 update desfire crate to version on crates.io 2022-03-13 18:05:36 +01:00
Nadja Reitzenstein
df5ee9a0a1 Api framework impl 2022-03-13 17:29:21 +01:00
Nadja Reitzenstein
999463e0e9 Merge branch 'development' into feature/cleanup
* development: (21 commits)
  Update INSTALL.md
  Update INSTALL.md
  Better tls connection handling with smol::io::split
  Add development cert/key files
  Implement TLS handling
  Add rustls dependencies
  Fix #29
  Set previous user on all state changes if required
  Fix URN
  Config improvements and make shellies have a topic parameter
  Fix Machines using their name instead of their id
  Revert "added first start logic to seed db and keep state"
  Lock!
  Make machine correctly load state and set previous/current use in API
  Also catch SIGQUIT and SIGTERM and clean up properly
  Returns current user
  added first start logic to seed db and keep state
  Implement getting a machine by URN
  Also set URN and wiki links on machine lists
  Implement wiki and URN links
  ...
2022-03-12 17:31:58 +01:00
Nadja Reitzenstein
87af5fde94 Implement more API 2022-03-12 17:31:53 +01:00
Nadja Reitzenstein
ee57c2b275 Update schema 2022-03-12 15:02:15 +01:00
Nadja Reitzenstein
c5852d50fb Cleaning up code 2022-03-12 14:23:07 +01:00
Kai Jan Kriegel
5c5c9710c5 working Desfire auth in the api! 2022-03-12 10:45:09 +01:00
Nadja Reitzenstein
7015fd755a move things for the new API schema 2022-03-12 02:00:55 +01:00
Nadja Reitzenstein
b419cd5472 update schema 2022-03-12 01:56:05 +01:00
Nadja Reitzenstein
b78971a21d Update schema to new main 2022-03-12 01:28:27 +01:00
Nadja Reitzenstein
20a47d9444 Improve examples 2022-03-12 01:28:07 +01:00
Nadja Reitzenstein
f367207d01 Pull more things from 0.3.2 2022-03-12 01:27:58 +01:00
Nadja Reitzenstein
495f9cb36a Make auditlog work 2022-03-12 01:27:41 +01:00
Kai Jan Kriegel
37db05a557 fix stupid logic error 2022-03-12 00:51:42 +01:00
Kai Jan Kriegel
4611ed5b48 initial integration of the X-FABFIRE mechnism
Integrates the fabfire mechanism for use with the jorisdevice and desfire cards
2022-03-12 00:45:59 +01:00
Nadja Reitzenstein
3e4350d2cf Import auditlog from v0.3.2 2022-03-11 23:00:02 +01:00
Nadja Reitzenstein
c54b44e720 Run until signal 2022-03-11 22:43:50 +01:00
Nadja Reitzenstein
4489f710d8 Fix a segfault caused by moving into an uninitalized pointer 2022-03-11 22:43:34 +01:00
Nadja Reitzenstein
f79e73d669 update config parsing to new format 2022-03-11 22:17:51 +01:00
Nadja Reitzenstein
28340a4ad3 Improve documentation around example setup 2022-03-11 22:15:10 +01:00
Nadja Reitzenstein
13bfb2fbee Moving towards implementing the 0.3.2 featureset 2022-03-11 22:13:54 +01:00
Nadja Reitzenstein
4f36eedf6a Let's try to get this as the next v0.3 2022-03-10 20:52:34 +01:00
Nadja Reitzenstein
487dc2270d Move API back to v0.3 2022-03-10 20:52:03 +01:00
Nadja Reitzenstein
0531156b9e Improve TLS support 2022-03-09 02:40:38 +01:00
Nadja Reitzenstein
520a33f0aa Module refactor part 2 2022-03-08 18:56:03 +01:00
Nadja Reitzenstein
e643a50f4d Module refactor part 2 2022-03-08 18:52:49 +01:00
Nadja Reitzenstein
10e4ff080c Modules renaming 2022-03-08 16:41:38 +01:00
Nadja Reitzenstein
f932ff8e7a reduce warnings and do more things 2022-03-07 18:27:54 +01:00
Nadja Reitzenstein
c8623fd62b Fixes warnings 2022-03-02 17:28:41 +01:00
Nadja Reitzenstein
e130e59651 Port to rsasl2
Closes #45
2022-03-02 17:21:22 +01:00
Nadja Reitzenstein
4306b5b691 More cleanup.
Also, this MR closes #48
2022-02-26 14:45:17 +01:00
Nadja Reitzenstein
ea863e71af Makes rumqttc futures run on the tokio runtime as required. 2022-02-26 14:30:06 +01:00
Nadja Reitzenstein
e9b1ba1f50 Use our own MQTT URL dissector so existing configs don't break 2022-02-26 14:16:46 +01:00
Nadja Reitzenstein
e6cb1a958d Fixing more warnings 2022-02-26 14:02:47 +01:00
Nadja Reitzenstein
68418161d7 Replaces paho_mqtt with rumqttc 2022-02-26 14:01:06 +01:00
Nadja Reitzenstein
53cdfeda5d Fix warnings 2022-02-26 14:00:00 +01:00
Nadja Reitzenstein
07e181b107 Update dependencies 2022-02-26 11:38:53 +01:00
Nadja Reitzenstein
a7d30930ba Don't default to MQTT 3.1 either 2022-02-26 11:31:46 +01:00
Nadja Reitzenstein
337e8aa563 Sets a 20 second MQTT keepalive intervall 2022-02-26 11:31:46 +01:00
Nadja Reitzenstein
2777645205 Make MQTT client try to reconnect on connection lost or disconnect 2022-02-26 11:31:45 +01:00
Nadja Reitzenstein
89b292a8ac Disclose machines that are used by yourself 2022-02-26 11:31:24 +01:00
Kai Jan Kriegel
a67d7b4331 stay on v0.2 compatible api version 2022-02-17 15:03:56 +01:00
Nadja Reitzenstein
bdcf3bae67 Don't default to MQTT 3.1 either 2022-02-15 18:28:58 +01:00
Nadja Reitzenstein
53f871ac49 Sets a 20 second MQTT keepalive intervall 2022-02-15 17:43:50 +01:00
Nadja Reitzenstein
0d2f0a49da Make MQTT client try to reconnect on connection lost or disconnect 2022-02-14 20:00:16 +01:00
Nadja Reitzenstein
7019f2d065 Disclose machines that are used by yourself 2022-02-14 20:00:16 +01:00
Kai Kriegel
62ac767839 replace master with main in ci 2022-02-14 18:58:50 +00:00
Nadja Reitzenstein
b543b3b80d State commit 2022-02-14 17:38:48 +01:00
Nadja Reitzenstein
73162d278f Adds an example usecase for args 2022-01-17 20:00:40 +01:00
Nadja Reitzenstein
9fbacc171b Adds an example python process actor 2022-01-17 19:54:53 +01:00
Nadja Reitzenstein
4858a6a6fb Implement a simple audit log 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
19abba371e Delete outdated pass.toml 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
bf9fadbf74 Implement getAPIVersion and getServerRelease methods 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
0da3213395 Improve documentation around example setup
Fixes: #38
2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
f524079914 Implement categories support 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
17005c0536 Properly parse optional config elements 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
70bfdbbf4e Update API files 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
a0c280eae4 Export some more version metadata 2022-01-17 18:36:22 +01:00
Nadja Reitzenstein
cd42130c2f Update dependencies and move rsasl to ARM/AArch64-compatible version 2022-01-12 16:18:55 +01:00
Joris
1020e21e24 Update INSTALL.md 2022-01-11 17:52:41 +00:00
Joris
ac6dbefa6f Update INSTALL.md 2022-01-11 17:50:20 +00:00
Nadja Reitzenstein
03ff3fcf86 Modules state commit 2022-01-05 21:56:32 +01:00
Nadja Reitzenstein
2c1b522021 Whoops, lost a commit or two there 2022-01-05 21:50:04 +01:00
Nadja Reitzenstein
4778c7a8d3 Commit current state 2021-12-17 16:43:31 +01:00
Nadja Reitzenstein
d7a66e2149 Actors, Initators, and Sensors first draft 2021-12-15 23:42:16 +01:00
Nadja Reitzenstein
ac723170c9 Stable release v0.2.1 2021-12-11 03:40:31 +01:00
Nadja Reitzenstein
f397e1e636 Better tls connection handling with smol::io::split 2021-12-09 21:54:57 +01:00
Nadja Reitzenstein
9571afbcc7 Add development cert/key files 2021-12-09 21:01:41 +01:00
Nadja Reitzenstein
83f5fe8265 Implement TLS handling 2021-12-09 20:54:54 +01:00
Nadja Reitzenstein
eb2e24a48c Add rustls dependencies 2021-12-09 20:54:43 +01:00
Nadja Reitzenstein
a09c3d3880 Fix #29 2021-12-07 23:02:26 +01:00
Nadja Reitzenstein
8db5580c90 Stuff 2021-12-06 21:53:42 +01:00
Nadja Reitzenstein
6b88191dc5 Set previous user on all state changes if required
Fixes: #30
2021-12-05 23:43:36 +01:00
Nadja Reitzenstein
6e91295cc0 Fix URN 2021-12-05 23:38:05 +01:00
Nadja Reitzenstein
6d3e08955a Config improvements and make shellies have a topic parameter 2021-12-05 19:23:35 +01:00
Nadja Reitzenstein
eeb0ff306b Fix Machines using their name instead of their id 2021-12-05 18:53:02 +01:00
Kai Kriegel
fd7fec2cbe Revert "added first start logic to seed db and keep state"
This reverts commit ab3ac8f730f836fdec9130133477bfef35bcd8d4.
2021-12-02 00:42:00 +01:00
Nadja Reitzenstein
bedde0e19f Allow static generation of schema code 2021-12-01 17:12:57 +01:00
Nadja Reitzenstein
2a57ce2c28 Lock! 2021-12-01 15:47:28 +01:00
Nadja Reitzenstein
47781b445e Make machine correctly load state and set previous/current use in API 2021-12-01 15:46:52 +01:00
Nadja Reitzenstein
8c28e50cac Start with API implementation 2021-11-26 22:11:24 +01:00
Nadja Reitzenstein
26b2888a09 DB Refactor 2021-11-26 21:01:43 +01:00
Nadja Reitzenstein
9fcb7664aa Also catch SIGQUIT and SIGTERM and clean up properly 2021-11-26 03:42:30 +01:00
Nadja Reitzenstein
200179f621 Returns current user 2021-11-26 02:21:35 +00:00
Nadja Reitzenstein
b16c660058 Clean up structure a bit 2021-11-26 02:25:48 +01:00
Kai Kriegel
ab3ac8f730 added first start logic to seed db and keep state 2021-11-25 23:53:04 +00:00
Nadja Reitzenstein
76a1def456 Implement getting a machine by URN 2021-11-26 00:08:24 +01:00
Nadja Reitzenstein
80df913089 Also set URN and wiki links on machine lists 2021-11-25 23:45:00 +01:00
Nadja Reitzenstein
32894300f4 Kick out asyncio into an external crate for later 2021-11-25 23:36:17 +01:00
Nadja Reitzenstein
ad5c4061de more asyncio foo 2021-11-24 21:39:11 +01:00
Nadja Reitzenstein
ec78aa6fc9 . 2021-11-24 02:44:34 +01:00
Nadja Reitzenstein
55d6609e33 Executor compiles 2021-11-14 17:51:48 +01:00
Nadja Reitzenstein
24be65b3d9 LightProc fork working 2021-11-14 17:50:59 +01:00
Nadja Reitzenstein
3231b51f89 Make more things burn less 2021-10-28 01:10:35 +02:00
Nadja Reitzenstein
150b2e68d9 More fragmentation 2021-10-28 00:32:25 +02:00
Nadja Reitzenstein
4e60b5d767 Move API into it's own crate 2021-10-27 23:42:50 +02:00
Nadja Reitzenstein
00077a7510 Also, don't publish 2021-10-27 23:40:29 +02:00
Nadja Reitzenstein
0cca818cc1 Restructure 2021-10-27 23:20:46 +02:00
Nadja Reitzenstein
a336f83e75 Make clion complain less 2021-10-27 21:37:54 +02:00
Nadja Reitzenstein
b95d21a092 Burn more CPUs! 2021-10-27 21:32:50 +02:00
Nadja Reitzenstein
4844fcc0c9 Remove old code 2021-10-27 18:01:15 +02:00
Nadja Reitzenstein
a14d3fa072 stuff 2021-10-27 17:53:00 +02:00
Nadja Reitzenstein
082b4cc28a init bastion 2021-10-27 17:03:50 +02:00
Nadja Reitzenstein
4ff6263db7 Build cleanup 2021-10-27 17:03:34 +02:00
Nadja Reitzenstein
48978326af Clean up dependencies 2021-10-27 14:49:45 +02:00
Nadja Reitzenstein
937d271b20 It's evening 2021-10-20 20:56:47 +02:00
Nadja Reitzenstein
9e244aab7e All the things. 2021-10-20 18:37:50 +02:00
Nadja Reitzenstein
a7754f057b Cleanup of permissions.rs 2021-10-20 14:11:56 +02:00
Nadja Reitzenstein
efe2da87d3 Further cleanup 2021-10-20 13:47:44 +02:00
Nadja Reitzenstein
eec6c3b60c Add Permissions back 2021-10-20 13:47:32 +02:00
Nadja Reitzenstein
541f8585c0 Update config.rs to compile 2021-10-20 12:58:05 +02:00
Nadja Reitzenstein
7bcb0712ae Implement wiki and URN links 2021-10-20 09:43:39 +02:00
Nadja Reitzenstein
80b6807f21 Current state commit 2021-10-19 11:16:24 +02:00
Nadja Reitzenstein
4d2b0ea29c Update schema 2021-10-18 11:41:10 +02:00
Nadja Reitzenstein
41142ecb4c Schema update™ 2021-10-18 11:29:25 +02:00
Nadja Reitzenstein
d837e1c364 Make tests compile 2021-10-18 11:28:50 +02:00
Nadja Reitzenstein
fb8cbfc864 Working new State encoding 2021-10-18 10:39:31 +02:00
Nadja Reitzenstein
f2679a3408 Serde part works now 2021-10-13 14:15:52 +02:00
Nadja Reitzenstein
8d7a4ac5be the worst part of all of this is that it works :D 2021-10-13 04:57:40 +02:00
Nadja Reitzenstein
33131f38c4 Whoo boy this is a big one 2021-10-07 16:44:01 +02:00
Nadja Reitzenstein
6a6bc4e452 Tear it down so you can rebuild it again 2021-10-06 13:53:14 +02:00
Nadja Reitzenstein
65830af01d Stuff 2021-09-30 10:26:04 +02:00
Nadja Reitzenstein
0a9ae09984 Move to new schema 2021-09-30 10:26:04 +02:00
Nadja Reitzenstein
2cc7a800f5 split cfg 2021-09-30 10:26:04 +02:00
Nadja Reitzenstein
04d3c5d267 Batch module draft 2021-09-30 10:26:04 +02:00
Nadja Reitzenstein
14f7ad32f3 Alpha Release 0.3.0 2021-09-30 10:10:33 +02:00
Nadja Reitzenstein
14402d627c Be consistent in the api 2021-09-21 22:57:50 +02:00
Nadja Reitzenstein
75f8911c1f cleanup 2021-09-21 22:57:50 +02:00
Kai Kriegel
c5f72d9508 added volume for adapter scripts 2021-09-21 19:17:48 +02:00
Nadja Reitzenstein
ba73fe80c5 Re-enable MQTT 2021-09-21 18:45:35 +02:00
Nadja Reitzenstein
73f63238a8 state 2021-09-21 17:49:21 +02:00
Nadja Reitzenstein
04052b6193 better output and examples 2021-09-21 09:22:54 +02:00
Nadja Reitzenstein
aa58657122 Let's not break, shall we? ^^' 2021-09-21 09:06:49 +02:00
Nadja Reitzenstein
bd635d97ac Make work 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
143416a308 Update example config 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
f4fead00e6 Only return caps the user can have 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
27791ed19b Move around Ownership until it compiles 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
e678e67d32 Pointerwise moving is even easier 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
d29b0c207d Unused lifetime parameter are unused. 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
4e3bb44040 Start the user API 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
006ae0af68 Better user 2021-09-21 07:54:18 +02:00
Nadja Reitzenstein
2fe6aa41c1 Dump RoleIdentifier the right way around 2021-09-21 07:54:18 +02:00
Kai Kriegel
ce7f678afa added a docker volume for bffh db 2021-09-20 20:21:34 +00:00
Nadja Reitzenstein
c2fa76de42 update schema submodule 2021-09-19 21:55:06 +02:00
Kai Kriegel
3709c13af5 fix config file for docker 2021-09-19 19:45:52 +00:00
Nadja Reitzenstein
bf840a2c94 GetMachine impl 2021-09-19 20:34:03 +02:00
Nadja Reitzenstein
ba7a59c3de Stuff to make work 2021-09-19 19:48:16 +02:00
Nadja Reitzenstein
abc9126137 Fix clippy lints 2021-09-19 19:48:16 +02:00
Kai Kriegel
b033b6dc07 Update docker build system 2021-09-19 16:30:14 +00:00
Kai Kriegel
7d2213f239 Update .gitlab-ci.yml 2021-09-19 16:27:33 +00:00
Kai Kriegel
1cab3ab264 Update .gitlab-ci.yml 2021-09-19 16:24:58 +00:00
Nadja Reitzenstein
660fe5ed9e Most of Machine implemented 2021-09-19 15:58:37 +02:00
Nadja Reitzenstein
e5903961d1 More API implementation 2021-09-19 15:58:37 +02:00
Nadja Reitzenstein
b8a9b64953 Remove removed schemata include 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
a1355aaa6a Fix all tests 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
8ceee0bd94 Better database abstraction 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
d0b73c9b49 make compile 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
05cbfc7199 Update schema to latest main 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
4362eaf36d Commit updated Cargo.lock 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
8e1c0ea0a3 Setup new schema modules 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
320521d28b Update crate dependencies 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
678a274544 Make the build script smarter 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
076675370a Update capnp crate version 2021-09-19 15:58:37 +02:00
Gregor Reitzenstein
38dd0e7055 Switch schema to new structure 2021-09-19 15:58:37 +02:00
Nadja Reitzenstein
913c4ea746 Add feature requests text to CONTRIBUTING.md 2021-09-19 15:58:14 +02:00
Nadja Reitzenstein
000ffbc2dc Update CONTRIBUTING.md 2021-09-19 15:44:13 +02:00
Gregor Reitzenstein
16c94ae473 Merge branch 'feature/configchecker' into 'development'
do a basic config check

See merge request fabinfra/fabaccess/bffh!8
2021-08-29 07:09:14 +00:00
Kai Kriegel
8d337248a8 only do a basic config check
Printing of normalized dhall will have to wait until i figure out how to supply the needed type info to the serializer
2021-08-28 23:20:41 +02:00
Kai Kriegel
4349a6fee6 Updated submodule schema 2021-08-28 23:20:31 +02:00
Kai Kriegel
fc614d7ce2 added config check 2021-08-27 21:51:44 +02:00
Gregor Reitzenstein
8ce5c2f6ff Minimal draft implementation of getGiveBack
It's untested but it should(TM) work.
2021-03-23 15:24:58 +01:00
Gregor Reitzenstein
dcdbc42274 Ignore IDEA setting files 2021-03-23 14:24:48 +01:00
Gregor Reitzenstein
19a17f80ba Update lock and API 2021-03-23 14:20:58 +01:00
179 changed files with 20277 additions and 4900 deletions

2
.gitignore vendored
View File

@ -1,3 +1 @@
/target /target
**/*.rs.bk
tags

View File

@ -1,121 +1,350 @@
# Official language image. Look for the different tagged releases at: # Define slightly different stages.
# https://hub.docker.com/r/library/rust/tags/ # Additionally, lint the code before anything else to fail more quickly
image: "rust:latest" stages:
- lint
- check
- build
- test
- release
- dockerify
# Optional: Pick zero or more services to be used on all builds. default:
# Only needed when using a docker container to run your tests in. image: "registry.gitlab.com/fabinfra/rust-builder:latest"
# Check out: http://docs.gitlab.com/ce/ci/docker/using_docker_images.html#what-is-a-service tags:
# services: - linux
# - mysql:latest - docker
# - redis:latest - fabinfra
# - postgres:latest
variables: variables:
GIT_SUBMODULE_STRATEGY: recursive GIT_SUBMODULE_STRATEGY: recursive
# CARGO_HOME: $CI_PROJECT_DIR/cargo CARGO_HOME: $CI_PROJECT_DIR/cargo
APT_CACHE_DIR: $CI_PROJECT_DIR/apt APT_CACHE_DIR: $CI_PROJECT_DIR/apt
FF_USE_FASTZIP: "true" # enable fastzip - a faster zip implementation that also supports level configuration.
ARTIFACT_COMPRESSION_LEVEL: fast # can also be set to fastest, fast, slow and slowest. If just enabling fastzip is not enough try setting this to fastest or fast.
CACHE_COMPRESSION_LEVEL: fastest # same as above, but for caches
TRANSFER_METER_FREQUENCY: 5s # will display transfer progress every 5 seconds for artifacts and remote caches.
# install build dependencies
before_script:
- apt-get update -yqq
- apt-get install -o dir::cache::archives="$APT_CACHE_DIR" -yqq --no-install-recommends capnproto build-essential cmake clang libclang-dev libgsasl7-dev
- rustup update
- rustup component add rustfmt
- rustup component add clippy
# Use clippy to lint the project
lint:clippy:
allow_failure: true
script:
- rustc --version && cargo --version # Print version info for debugging
- cargo clippy -- -D warnings
only:
- master
- development
- merge_requests
tags:
- linux
- docker
# Use rustfmt to check formating of the project
lint:fmt:
allow_failure: true
script:
- rustc --version && cargo --version # Print version info for debugging
- cargo fmt -- --check # TODO: Do we want to enforce formating?
only:
- master
- development
- merge_requests
tags:
- linux
- docker
# Use cargo to test the project
test:cargo:
script:
- rustc --version && cargo --version # Print version info for debugging
- cargo test --workspace --verbose
only:
- master
- development
- merge_requests
tags:
- linux
- docker
build:docker-master:
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
before_script:
- ''
script:
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:latest
only:
- master
tags:
- linux
- docker
build:docker-releases:
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
before_script:
- ''
script:
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
only:
- tags
tags:
- linux
- docker
build:docker-development:
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
before_script:
- ''
script:
- mkdir -p /kaniko/.docker
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:dev-latest
only:
- development
tags:
- linux
- docker
# cache dependencies and build environment to speed up setup # cache dependencies and build environment to speed up setup
cache: cache:
key: "$CI_COMMIT_REF_SLUG" key: "$CI_COMMIT_REF_SLUG"
paths: paths:
- apt/ - apt/
# - cargo/ - cargo/
- target/ - target/
.lints:
stage: lint
allow_failure: true
only:
- merge_requests
# Use clippy lints
lint:clippy:
extends: .lints
script:
- cargo clippy -V
- echo -e "\e[0Ksection_start:`date +%s`:clippy_output\r\e[0Kcargo clippy output"
- cargo clippy -- --no-deps
- echo -e "\e[0Ksection_end:`date +%s`:clippy_output\r\e[0K"
# Use rustfmt to check formating
lint:fmt:
extends: .lints
script:
- cargo fmt --version
- echo -e "\e[0Ksection_start:`date +%s`:rustfmt_output\r\e[0KChanges suggested by rustfmt"
- cargo fmt --check -- -v
- echo -e "\e[0Ksection_end:`date +%s`:rustfmt_output\r\e[0K"
# Check if the code builds on rust stable
stable:check:
stage: check
only:
- main
- development
- merge_requests
script:
- rustc +stable --version && cargo --version
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo check"
- cargo check --verbose
- echo -e "\e[0Ksection_end:`date +%s`:build_output\r\e[0K"
# Check if the code builds on rust stable on armv7
stable:check:armhf:
stage: check
only:
- main
- development
- merge_requests
before_script:
- mkdir -p $CARGO_HOME
- cp cargo-cross-config $CARGO_HOME/config.toml
script:
- rustc +stable --version && cargo --version
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo check with target armv7-unknown-linux-gnueabihf"
- cargo check --verbose --target armv7-unknown-linux-gnueabihf
- echo -e "\e[0Ksection_end:`date +%s`:build_output\r\e[0K"
# Check if the code builds on rust stable on arm64
stable:check:arm64:
stage: check
only:
- main
- development
- merge_requests
before_script:
- mkdir -p $CARGO_HOME
- cp cargo-cross-config $CARGO_HOME/config.toml
script:
- rustc +stable --version && cargo --version
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo check with target aarch64-unknown-linux-gnu"
- cargo check --verbose --target aarch64-unknown-linux-gnu
- echo -e "\e[0Ksection_end:`date +%s`:build_output\r\e[0K"
# Check if the code builds on rust stable
stable:build:amd64:
stage: build
only:
- main
- development
- merge_requests
script:
- rustc +stable --version && cargo --version
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo build with target x86_64-unknown-linux-gnu"
- cargo build --release --target x86_64-unknown-linux-gnu
- echo -e "\e[0Ksection_end:`date +%s`:build_output\r\e[0K"
artifacts:
paths:
- target/x86_64-unknown-linux-gnu/release/bffhd
# Check if the code builds on rust stable on armv7
stable:build:armhf:
stage: build
only:
- main
- development
before_script:
- mkdir -p $CARGO_HOME
- cp cargo-cross-config $CARGO_HOME/config.toml
script:
- rustc +stable --version && cargo --version
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo build with target armv7-unknown-linux-gnueabihf"
- cargo build --release --target armv7-unknown-linux-gnueabihf
- echo -e "\e[0Ksection_end:`date +%s`:build_output\r\e[0K"
artifacts:
paths:
- target/armv7-unknown-linux-gnueabihf/release/bffhd
# Check if the code builds on rust stable on arm64
stable:build:arm64:
stage: build
only:
- main
- development
before_script:
- mkdir -p $CARGO_HOME
- cp cargo-cross-config $CARGO_HOME/config.toml
script:
- rustc +stable --version && cargo --version
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo build with target aarch64-unknown-linux-gnu"
- cargo build --release --target aarch64-unknown-linux-gnu
- echo -e "\e[0Ksection_end:`date +%s`:build_output\r\e[0K"
artifacts:
paths:
- target/aarch64-unknown-linux-gnu/release/bffhd
stable:test:
stage: build
needs: ["stable:check"]
only:
- main
- development
- merge_requests
script:
- echo -e "\e[0Ksection_start:`date +%s`:build_output\r\e[0KOutput of cargo test --no-run"
- cargo test --verbose --no-run --workspace
.tests:
stage: test
needs: ["stable:test"]
script:
- cargo test --workspace $TEST_TARGET -- -Z unstable-options --format json --report-time | cargo2junit > report.xml
artifacts:
when: always
reports:
junit:
- report.xml
only:
- main
- development
- merge_requests
# Run unit tests
unit test 1:3:
variables:
TEST_TARGET: "--lib"
extends: .tests
unit test 2:3:
variables:
TEST_TARGET: "--bins"
extends: .tests
unit test 3:3:
variables:
TEST_TARGET: "--examples"
extends: .tests
upload_binaries:
stage: release
image: curlimages/curl:latest
before_script: []
cache: []
dependencies:
- stable:build:amd64
- stable:build:armhf
- stable:build:arm64
script:
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file target/aarch64-unknown-linux-gnu/release/bffhd "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/bffhd/${CI_COMMIT_TAG}/bffhd_${VERSION}_linux_arm64"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file target/x86_64-unknown-linux-gnu/release/bffhd "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/bffhd/${CI_COMMIT_TAG}/bffhd_${VERSION}_linux_amd64"'
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file target/armv7-unknown-linux-gnueabihf/release/bffhd "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/bffhd/${VERSION}/bffhd_${VERSION}_linux_arm"'
rules:
- if: $CI_COMMIT_TAG =~ "release/.*"
when: never
- if: $CI_COMMIT_BRANCH == "main"
release_prepare:
stage: release
rules:
- if: $CI_COMMIT_TAG =~ "release/.*"
when: never
- if: $CI_COMMIT_BRANCH == "main"
script:
- VERSION="cargo metadata --format-version 1 | jq -C '.packages | .[] | select(.name == "diflouroborane") | .version' -r"
- echo $VERSION > release.env
artifacts:
reports:
dotenv: release.env
release_job:
stage: release
needs:
- job: release_prepare
artifacts: true
image: registry.gitlab.com/gitlab-org/release-cli:latest
rules:
- if: $CI_COMMIT_TAG =~ "release/.*"
when: never
- if: $CI_COMMIT_BRANCH == "main"
script:
- echo "Creating GitLab release…"
release:
name: "BFFH $VERSION"
description: "GitLab CI auto-created release"
tag_name: "release/$VERSION"
assets:
links:
- name: 'bffhd AMD64'
url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/bffhd/${VERSION}/bffhd_${VERSION}_linux_amd64"
- name: 'bffhd ARMv7'
url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/bffhd/${VERSION}/bffhd_${VERSION}_linux_arm"
- name: 'bffhd ARM64'
url: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/bffhd/${VERSION}/bffhd_${VERSION}_linux_arm64"
build:docker-releases:
stage: dockerify
image: jdrouet/docker-with-buildx:latest
dependencies:
- stable:build:amd64
- stable:build:armhf
- stable:build:arm64
tags:
- linux
- docker
- fabinfra
variables:
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
TRIVY_NO_PROGRESS: "true"
TRIVY_CACHE_DIR: ".trivycache/"
services:
- docker:dind
before_script:
- export TRIVY_VERSION=$(wget -qO - "https://api.github.com/repos/aquasecurity/trivy/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/')
- echo $TRIVY_VERSION
- wget --no-verbose https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_Linux-64bit.tar.gz -O - | tar -zxvf -
script:
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker buildx create --name cibuilder --driver docker-container --use
- docker buildx ls
- docker buildx inspect --bootstrap
- docker buildx build --platform linux/arm/v7,linux/arm64,linux/amd64 -t $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG .
- docker buildx build --load --platform linux/amd64 -t $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG .
# Build report
- ./trivy image --exit-code 0 --format template --template "@contrib/gitlab.tpl" -o gl-container-scanning-report.json $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
# Print report
- ./trivy image --exit-code 0 --severity HIGH $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
# Fail on severe vulnerabilities
- ./trivy image --exit-code 1 --severity CRITICAL $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
cache:
paths:
- .trivycache/
artifacts:
reports:
container_scanning: gl-container-scanning-report.json
rules:
- if: $CI_COMMIT_TAG =~ "release/.*"
when: never
build:docker-development:
stage: dockerify
image: jdrouet/docker-with-buildx:latest
dependencies:
- stable:build:amd64
- stable:build:armhf
- stable:build:arm64
tags:
- linux
- docker
- fabinfra
variables:
DOCKER_HOST: tcp://docker:2375/
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: ""
TRIVY_NO_PROGRESS: "true"
TRIVY_CACHE_DIR: ".trivycache/"
services:
- docker:dind
before_script:
- export TRIVY_VERSION=$(wget -qO - "https://api.github.com/repos/aquasecurity/trivy/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/')
- echo $TRIVY_VERSION
- wget --no-verbose https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_Linux-64bit.tar.gz -O - | tar -zxvf -
script:
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker buildx create --name cibuilder --driver docker-container --use
- docker buildx ls
- docker buildx inspect --bootstrap
- docker buildx build --platform linux/arm/v7,linux/arm64,linux/amd64 -t $CI_REGISTRY_IMAGE:development .
- docker buildx build --load --platform linux/amd64 -t $CI_REGISTRY_IMAGE:development .
# Build report
- ./trivy image --exit-code 0 --format template --template "@contrib/gitlab.tpl" -o gl-container-scanning-report.json $CI_REGISTRY_IMAGE:development
# Print report
- ./trivy image --exit-code 0 --severity HIGH $CI_REGISTRY_IMAGE:development
# Fail on severe vulnerabilities
- ./trivy image --exit-code 1 --severity CRITICAL $CI_REGISTRY_IMAGE:development
- docker push $CI_REGISTRY_IMAGE:development
cache:
paths:
- .trivycache/
artifacts:
reports:
container_scanning: gl-container-scanning-report.json
only:
- development

5
.gitmodules vendored
View File

@ -1,3 +1,4 @@
[submodule "schema"] [submodule "schema"]
path = schema path = api/schema
url = https://gitlab.com/fabinfra/fabaccess/fabaccess-api.git url = https://gitlab.com/fabinfra/fabaccess/fabaccess-api
branch = main

View File

@ -1,5 +1,25 @@
# Revision history for Difluoroborane # Revision history for Difluoroborane
## 0.4.3 -- 2025-02-11
* Adds binary version of FabFire authenitcation protocol
* Adds commands to dump and restore the full database as a TOML text file (`--dump-db` and `--load-db`)
* allows compilation with current stable Rust (1.84)
- Attention: The database format still relies on Rust data layout, so when updating the compiler, the database must be transfered as TOML dump.
Therefore, the `rust-toolchain.toml` file pinning `rustc` to version `1.66` is still in place.
* resolves a crash (use after free) when disconnecting a client.
* resolves some compiler warnings
## 0.4.2 -- TODO
## 0.4.1 -- 2022-04-24
* Initial full implementation of the FabAccess 0.3 API, "Spigots of Berlin".
## 0.3.0 -- 2021-10-01
* A version seen by enough people that the version number needs to be skipped but never a formally released version
## 0.2.0 -- 2021-02-23 ## 0.2.0 -- 2021-02-23
* Dammit, missed by four days. * Dammit, missed by four days.

View File

@ -2,27 +2,38 @@
Thank you for your interest in helping out the FabAccess system! Thank you for your interest in helping out the FabAccess system!
To help develop Diflouroborane you will need a Rust toolchain. I heavily recommend installing You found a bug, an exploit or a feature that doesn't work like it's documented? Please tell us about it, see [Issues](#issues)
[rustup](https://rustup.rs) even if your distribution provides a recent enough rustc, simply because
it allows to easily switch compilers between several versions of both stable and nightly. It also
allows you to download the respective stdlib crate, giving you the option of an offline reference.
## Git Workflow / Branching You have a feature request? Great, check out the paragraph on [Feature Requests](#feature-requests)
We use a stable master / moving development workflow. This means that all /new/ development should ## Issues
happen on the `development` branch which is regularly merged into `master` as releases. The
exception of course are bug- and hotfixes that can target whichever branch.
If you want to add a new feature please work off the development branch. We suggest you create While we try to not have any bugs or exploits or documentation bugs we're not perfect either. Thanks for helping us out!
yourself a feature branch, e.g. using `git switch development; git checkout -b
feature/my-cool-feature`.
Using a feature branch keeps your local `development` branch clean, making it easier to later rebase
your feature branch onto it before you open a pull/merge request.
When you want feedback on your current progress or are ready to have it merged upstream open a merge We have labels that help us sort issues better, so if you know what would be the correct ones, please tag your issue with one or multiple keywords. See [Labels](https://gitlab.com/fabinfra/fabaccess/bffh/-/labels) to get an overview of all keywords and their use case.
request. Don't worry we don't bite! ^^
Especially for **bugs** and **exploits**, please mark your issue as "confidential" if you think it impacts the `stable` branch. If you're not sure, mark it as confidential anyway. It's easier to publish information than it is to un-publish information. You may also contact as by [mail](https://fab-access.org/impressum).
## Tests ## Feature Requests
Sadly, still very much `// TODO:`. We're working on it! :/ We also like new feature requests of course!
But before you open an issue in this repo for a feature request, please first check a few things:
1. Is it a feature that needs to be implemented in more than just the backend server? For example, is it something also having a GUI-component or something that you want to be able to do via the API? If so it's better suited over at the
[Lastenheft](https://gitlab.com/fabinfra/fabaccess_lastenheft) because that's where the required coordination for that will end up happening
2. Who else needs that feature? Is this something super specific to your environment/application or something that others will want too? If it's something that's relevant for more people please also tell us that in the feature request.
3. Can you already get partway or all the way there using what's there already? If so please also tell us what you're currently doing and what doesn't work or why you dislike your current solution.
## Contributing Code
To help develop Difluoroborane you will need a Rust toolchain. We heavily recommend installing [rustup](https://rustup.rs) even if your distribution provides a recent enough rustc, simply because it allows to easily switch compilers between several versions of both stable and nightly. It also allows you to download the respective stdlib crate, giving you the option of an offline reference.
We use a stable release branch / moving development workflow. This means that all *new* development should happen on the `development` branch which is regularly merged into `stable` as releases. The exception of course are bug- and hotfixes that can target whichever branch.
If you want to add a new feature please work off the development branch. We suggest you create yourself a feature branch, e.g. using
```git switch development; git checkout -b feature/my-cool-feature```
Using a feature branch keeps your local `development` branch clean, making it easier to later rebase your feature branch onto it before you open a pull/merge request.
When you want feedback on your current progress or are ready to have it merged upstream open a merge request. Don't worry, we don't bite! ^^

3372
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,72 +1,128 @@
[package] [package]
name = "diflouroborane" name = "difluoroborane"
version = "0.2.0" version = "0.4.2"
authors = [ "Gregor Reitzenstein <me@dequbed.space>" authors = [ "dequbed <me@dequbed.space>"
, "Kai Jan Kriegel <kai@kjkriegel.de>" , "Kai Jan Kriegel <kai@kjkriegel.de>"
, "Joseph Langosch <thejoklla@gmail.com>" , "Joseph Langosch <thejoklla@gmail.com>"
, "Jannis Rieger <omniskopus@gmail.com>" , "Jannis Rieger <omniskopus@gmail.com>"
] ]
license = "GPL-3.0" license = "GPL-3.0"
edition = "2018" edition = "2021"
publish = false
readme = "README.md"
build = "build.rs"
[features] [profile.release]
default = ["lmdb"] opt-level = 3
debug = true
lto = "thin"
# Use LMDB for internal kv-stores [lib]
lmdb = [] path = "bffhd/lib.rs"
# Don't run unit tests on `cargo test --tests`, only run integration tests.
test = false
[[bin]]
name = "bffhd"
path = "bin/bffhd/main.rs"
# Don't run unit tests on `cargo test --tests`, only run integration tests.
test = false
[dependencies] [dependencies]
futures = { version = "0.3", features = ["thread-pool", "compat"] } libc = "0.2.101"
nix = "0.23.1"
uuid = { version = "0.8.2", features = ["serde", "v4"] }
async-trait = "0.1.51"
pin-utils = "0.1.0"
futures-util = "0.3" futures-util = "0.3"
futures-signals = "0.3" futures-lite = "1.12.0"
async-net = "1.6.1"
async-io = "1.7.0"
async-process = "1.4.0"
backtrace = "0.3.65"
miette = { version = "4.7.1", features = ["fancy"] }
thiserror = "1.0.31"
toml = "0.5.8"
smol = "1.0" # Well-known paths/dirs for e.g. cache
dirs = "4.0.0"
signal-hook = "0.1" # Runtime
executor = { path = "runtime/executor" }
lightproc = { path = "runtime/lightproc" }
console = { path = "runtime/console" }
slog = { version = "2.5", features = ["max_level_trace"] } # Catch&Handle POSIX process signals
slog-term = "2.6" signal-hook = "0.3.13"
slog-async = "2.5" signal-hook-async-std = "0.2.2"
capnp = "0.13" # Argument parsing for bin/bffhd.rs
capnp-rpc = "0.13" clap = { version = "3.1.6", features = ["cargo"] }
capnp-futures = "0.13"
serde = { version = "1.0", features = ["derive"] } # Internal Databases
toml = "0.5" lmdb-rkv = "0.14.0"
flexbuffers = "0.1" rkyv = { version = "0.7", features = [] }
ptr_meta = "0.1"
rkyv_typename = "0.7"
rkyv_dyn = "0.7"
inventory = "0.1"
linkme = "0.3"
chrono = { version = "0.4", features = ["serde"] }
serde_dhall = { version = "0.10", default-features = false } # Password hashing for internal users
rust-argon2 = "0.8.3"
rand = "0.8.4"
uuid = { version = "0.8", features = ["serde", "v4"] } # Async aware logging and tracing
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter", "registry", "std"] }
tracing-futures = { version = "0.2", features = ["futures-03"] }
clap = "2.33" # API
api = { path = "api" }
capnp = "0.14"
capnp-rpc = "0.14.1"
# TODO update this if bindgen breaks (again) # API Authentication
rsasl = "0.2.3" desfire = "0.2.0-alpha3"
#rsasl = { path = "../../rsasl" }
# rumqtt needs tokio which I'm trying to get away from hex = { version = "0.4.3", features = ["serde"] }
paho-mqtt = { git = "https://github.com/dequbed/paho.mqtt.rust.git", branch = "master", features = ["build_bindgen"] }
#mlua = { version = "0.4", features = ["async", "luajit"] } futures-signals = "0.3.22"
async-oneshot = "0.5"
async-channel = "1.6"
libc = "0.2" # Config and Database (De)Serialization
lmdb-rkv = "0.14" serde = { version = "1.0.130", features = ["derive"] }
erased-serde = "0.3"
async-trait = "0.1" serde_dhall = { version = "0.10.1", default-features = false }
serde_json = "1.0"
once_cell = "1.8"
lazy_static = "1.4.0" lazy_static = "1.4.0"
rust-argon2 = "0.8" rustls = "0.20"
rand = "0.7" rustls-pemfile = "0.3.0"
futures-rustls = "0.22"
async-channel = "1.5" rumqttc = "0.11.0"
easy-parallel = "3.1" async-compat = "0.2.1"
genawaiter = "0.99" url = "2.2.2"
rustls-native-certs = "0.6.1"
[build-dependencies] shadow-rs = "0.11"
capnpc = "0.13"
[dependencies.rsasl]
version = "2.2.0"
default_features = false
features = ["unstable_custom_mechanism", "provider", "registry_static", "config_builder", "plain"]
[dev-dependencies] [dev-dependencies]
futures-test = "0.3" futures-test = "0.3.16"
tempfile = "3.2"
[build-dependencies]
shadow-rs = "0.11"
[workspace]
members = ["runtime/*", "modules/*", "api"]

View File

@ -1,22 +1,25 @@
# Setup build image for multistage build FROM --platform=$BUILDPLATFORM alpine:latest as copy
FROM rust:latest as builder ARG TARGETPLATFORM
# install build deps RUN case "$TARGETPLATFORM" in \
RUN apt-get update && apt-get upgrade -y "linux/arm/v7") echo armv7-unknown-linux-gnueabihf > /rust_target.txt ;; \
RUN apt-get install -yqq --no-install-recommends capnproto build-essential cmake clang libclang-dev libgsasl7-dev "linux/arm/v6") echo arm-unknown-linux-gnueabihf > /rust_target.txt ;; \
"linux/arm64") echo aarch64-unknown-linux-gnu > /rust_target.txt ;; \
"linux/amd64") echo x86_64-unknown-linux-gnu > /rust_target.txt ;; \
*) exit 1 ;; \
esac
WORKDIR /usr/src/bffh WORKDIR /usr/src/bffh
COPY . . COPY . .
RUN cargo install --path . RUN cp target/$(cat /rust_target.txt)/release/bffhd ./bffhd.bin
# Setup deployable image # Setup deployable image
FROM debian:buster-slim FROM ubuntu:22.04
# Install runtime deps RUN apt-get update && apt-get upgrade -y
RUN apt-get update && apt-get upgrade -yqq RUN apt-get install -yqq --no-install-recommends python3 python3-pip
RUN apt-get install -yqq libgsasl7 && rm -rf /var/lib/apt/lists/* RUN pip3 install paho-mqtt
COPY --from=builder /usr/local/cargo/bin/diflouroborane /usr/local/bin/diflouroborane COPY --from=copy /usr/src/bffh/bffhd.bin /usr/local/bin/bffhd
#COPY --from=builder /usr/src/bffh/examples/bffh.dhall /etc/diflouroborane.dhall
# RUN diflouroborane --print-default > /etc/diflouroborane.toml
VOLUME /etc/bffh/ VOLUME /etc/bffh/
VOLUME /var/lib/bffh/
VOLUME /usr/local/lib/bffh/adapters/
EXPOSE 59661 EXPOSE 59661
ENTRYPOINT ["sh", "-c", "diflouroborane -c /etc/bffh/bffh.dhall --load=/etc/bffh; diflouroborane -c /etc/bffh/bffh.dhall"] ENTRYPOINT ["sh", "-c", "bffhd -c /etc/bffh/bffh.dhall --load=/etc/bffh/users.toml; bffhd -c /etc/bffh/bffh.dhall"]

View File

@ -1,35 +0,0 @@
## Installation
Currently there are no distribution packages available.
However installation is reasonably straight-forward, since Diflouroborane compiles into a single
mostly static binary with few dependencies.
At the moment only Linux is supported. If you managed to compile Diflouroborane please open an issue
outlining your steps or add a merge request expanding this part. Thanks!
### Requirements
General requirements; scroll down for distribution-specific instructions
- GNU SASL (libgsasl).
* If you want to compile Diflouroborane from source you will potentially also need development
headers
- capnproto
- rustc stable / nightly >= 1.48
* If your distribution does not provide a recent enough rustc, [rustup](https://rustup.rs/) helps
installing a local toolchain and keeping it up to date.
###### Arch Linux:
```shell
$ pacman -S gsasl rust capnproto
```
### Compiling from source
Diflouroborane uses Cargo, so compilation boils down to:
```shell
$ cargo build --release
```
The compiled binary can then be found in `./target/release/diflouroborane`

View File

@ -1,8 +1,8 @@
# FabAccess Diflouroborane # FabAccess Difluoroborane
Diflouroborane (shorter: BFFH, the chemical formula for Diflouroborane) is the server part of Difluoroborane (shorter: BFFH, the chemical formula for Difluoroborane) is the server part of
FabAccess. FabAccess.
It provides a server-side implementation of the [FabAccess API](/fabinfra/fabaccess/fabaccess-api). It provides a server-side implementation of the [FabAccess API](https://gitlab.com/fabinfra/fabaccess/fabaccess-api).
## What is this? ## What is this?
@ -13,14 +13,14 @@ to be used for all other things one would like to give exclusive access to even
dangerous or expensive to use (think 3D printers, smart lightbulbs, meeting rooms). dangerous or expensive to use (think 3D printers, smart lightbulbs, meeting rooms).
FabAccess uses a Client/Server architecture with a [Cap'n Proto](https://capnproto.org/) API. You FabAccess uses a Client/Server architecture with a [Cap'n Proto](https://capnproto.org/) API. You
can find the API schema files over [in their own repository](/fabinfra/fabaccess/fabaccess-api). can find the API schema files over [in their own repository](https://gitlab.com/fabinfra/fabaccess/fabaccess-api).
The reference client is [Borepin](/fabinfra/fabaccess/borepin), written in C#/Xamarin to be able to The reference client is [Borepin](https://gitlab.com/fabinfra/fabaccess/borepin), written in C#/Xamarin to be able to
be ported to as many platforms as possible. be ported to as many platforms as possible.
## Installation ## Installation
See [INSTALL.md](INSTALL.md) See [https://fab-access.org/install](https://fab-access.org/install)
## Contributing ## Contributing

19
api/Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "api"
version = "0.3.2"
edition = "2021"
build = "build.rs"
publish = false
[features]
generated = []
gen_static = []
[dependencies]
capnp = "0.14.3"
capnpc = "0.14.4"
[build-dependencies]
capnpc = "0.14.4"
# Used in build.rs to iterate over all files in schema/
walkdir = "2.3.2"

42
api/build.rs Normal file
View File

@ -0,0 +1,42 @@
use walkdir::{DirEntry, WalkDir};
fn is_hidden(entry: &DirEntry) -> bool {
entry
.file_name()
.to_str()
.map(|s| s.starts_with('.'))
.unwrap_or(false)
}
fn generate_api() {
println!("cargo:rerun-if-changed=schema");
let mut compile_command = ::capnpc::CompilerCommand::new();
compile_command
.src_prefix("schema")
.default_parent_module(vec!["schema".to_string()]);
for entry in WalkDir::new("schema")
.max_depth(2)
.into_iter()
.filter_entry(|e| !is_hidden(e))
.filter_map(Result::ok) // Filter all entries that access failed on
.filter(|e| !e.file_type().is_dir()) // Filter directories
// Filter non-schema files
.filter(|e| {
e.file_name()
.to_str()
.map(|s| s.ends_with(".capnp"))
.unwrap_or(false)
})
{
println!("Collecting schema file {}", entry.path().display());
compile_command.file(entry.path());
}
println!("Compiling schemas...");
compile_command.run().expect("Failed to generate API code");
}
fn main() {
generate_api();
}

1
api/schema Submodule

@ -0,0 +1 @@
Subproject commit f3f53dafb6b7d23a19947f2a32d4ed5ee4e91d22

7
api/src/lib.rs Normal file
View File

@ -0,0 +1,7 @@
//! FabAccess generated API bindings
//!
//! This crate contains slightly nicer and better documented bindings for the FabAccess API.
#[allow(dead_code)]
pub mod schema;
pub use schema::*;

41
api/src/schema.rs Normal file
View File

@ -0,0 +1,41 @@
pub use capnpc::schema_capnp;
pub mod authenticationsystem_capnp {
include!(concat!(env!("OUT_DIR"), "/authenticationsystem_capnp.rs"));
}
pub mod connection_capnp {
include!(concat!(env!("OUT_DIR"), "/connection_capnp.rs"));
}
pub mod general_capnp {
include!(concat!(env!("OUT_DIR"), "/general_capnp.rs"));
}
pub mod machine_capnp {
include!(concat!(env!("OUT_DIR"), "/machine_capnp.rs"));
}
pub mod machinesystem_capnp {
include!(concat!(env!("OUT_DIR"), "/machinesystem_capnp.rs"));
}
pub mod permissionsystem_capnp {
include!(concat!(env!("OUT_DIR"), "/permissionsystem_capnp.rs"));
}
pub mod role_capnp {
include!(concat!(env!("OUT_DIR"), "/role_capnp.rs"));
}
pub mod space_capnp {
include!(concat!(env!("OUT_DIR"), "/space_capnp.rs"));
}
pub mod user_capnp {
include!(concat!(env!("OUT_DIR"), "/user_capnp.rs"));
}
pub mod usersystem_capnp {
include!(concat!(env!("OUT_DIR"), "/usersystem_capnp.rs"));
}

25
bffhd/actors/dummy.rs Normal file
View File

@ -0,0 +1,25 @@
use futures_util::future;
use futures_util::future::BoxFuture;
use std::collections::HashMap;
use crate::actors::Actor;
use crate::db::ArchivedValue;
use crate::resources::state::State;
pub struct Dummy {
name: String,
params: HashMap<String, String>,
}
impl Dummy {
pub fn new(name: String, params: HashMap<String, String>) -> Self {
Self { name, params }
}
}
impl Actor for Dummy {
fn apply(&mut self, state: ArchivedValue<State>) -> BoxFuture<'static, ()> {
tracing::info!(name=%self.name, params=?self.params, ?state, "dummy actor updating state");
Box::pin(future::ready(()))
}
}

289
bffhd/actors/mod.rs Normal file
View File

@ -0,0 +1,289 @@
use crate::actors::shelly::Shelly;
use crate::resources::state::State;
use crate::{Config, ResourcesHandle};
use async_compat::CompatExt;
use executor::pool::Executor;
use futures_signals::signal::Signal;
use futures_util::future::BoxFuture;
use rumqttc::{AsyncClient, ConnectionError, Event, Incoming, MqttOptions};
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use miette::Diagnostic;
use std::task::{Context, Poll};
use std::time::Duration;
use thiserror::Error;
use once_cell::sync::Lazy;
use rumqttc::ConnectReturnCode::Success;
use crate::actors::dummy::Dummy;
use crate::actors::process::Process;
use crate::db::ArchivedValue;
use rustls::RootCertStore;
use url::Url;
mod dummy;
mod process;
mod shelly;
pub trait Actor {
fn apply(&mut self, state: ArchivedValue<State>) -> BoxFuture<'static, ()>;
}
pub struct ActorDriver<S: 'static> {
signal: S,
actor: Box<dyn Actor + Send + Sync>,
future: Option<BoxFuture<'static, ()>>,
}
impl<S: Signal<Item = ArchivedValue<State>>> ActorDriver<S> {
pub fn new(signal: S, actor: Box<dyn Actor + Send + Sync>) -> Self {
Self {
signal,
actor,
future: None,
}
}
}
impl<S> Future for ActorDriver<S>
where
S: Signal<Item = ArchivedValue<State>> + Unpin + Send,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
// Work until there is no more work to do.
loop {
// Poll the `apply` future. And ensure it's completed before the next one is started
match self
.future
.as_mut()
.map(|future| Future::poll(Pin::new(future), cx))
{
// Skip and poll for a new future to do
None => {}
// This apply future is done, get a new one
Some(Poll::Ready(_)) => self.future = None,
// This future would block so we return to continue work another time
Some(Poll::Pending) => return Poll::Pending,
}
// Poll the signal and apply any change that happen to the inner Actuator
match Pin::new(&mut self.signal).poll_change(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(state)) => {
// This future MUST be polled before we exit from the Actor::poll because if we
// do not do that it will not register the dependency and thus NOT BE POLLED.
let f = self.actor.apply(state);
self.future.replace(f);
}
}
}
}
}
static ROOT_CERTS: Lazy<RootCertStore> = Lazy::new(|| {
let span = tracing::info_span!("loading system certificates");
let _guard = span.enter();
let mut store = RootCertStore::empty();
match rustls_native_certs::load_native_certs() {
Ok(certs) => {
let certs: Vec<Vec<u8>> = certs.into_iter().map(|c| c.0).collect();
let (loaded, ignored) = store.add_parsable_certificates(&certs[..]);
if ignored != 0 {
tracing::info!(loaded, ignored, "certificates loaded, some ignored");
} else {
tracing::info!(loaded, "certificates loaded");
}
}
Err(error) => {
tracing::error!(%error, "failed to load system certificates");
}
}
store
});
#[derive(Debug, Error, Diagnostic)]
pub enum ActorError {
#[error("failed to parse MQTT url")]
UrlParseError(
#[from]
#[source]
url::ParseError,
),
#[error("MQTT config is invalid")]
InvalidConfig,
#[error("MQTT connection failed")]
ConnectionError(
#[from]
#[source]
rumqttc::ConnectionError,
),
}
pub fn load(
executor: Executor,
config: &Config,
resources: ResourcesHandle,
) -> Result<(), ActorError> {
let span = tracing::info_span!("loading actors");
let _guard = span;
let mqtt_url = Url::parse(config.mqtt_url.as_str())?;
let (transport, default_port) = match mqtt_url.scheme() {
"mqtts" | "ssl" => (
rumqttc::Transport::tls_with_config(
rumqttc::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(ROOT_CERTS.clone())
.with_no_client_auth()
.into(),
),
8883,
),
"mqtt" | "tcp" => (rumqttc::Transport::tcp(), 1883),
scheme => {
tracing::error!(%scheme, "MQTT url uses invalid scheme");
return Err(ActorError::InvalidConfig);
}
};
let host = mqtt_url.host_str().ok_or_else(|| {
tracing::error!("MQTT url must contain a hostname");
ActorError::InvalidConfig
})?;
let port = mqtt_url.port().unwrap_or(default_port);
let mut mqttoptions = MqttOptions::new("bffh", host, port);
mqttoptions
.set_transport(transport)
.set_keep_alive(Duration::from_secs(20));
if !mqtt_url.username().is_empty() {
mqttoptions.set_credentials(mqtt_url.username(), mqtt_url.password().unwrap_or_default());
}
let (mqtt, mut eventloop) = AsyncClient::new(mqttoptions, 256);
let mut eventloop = executor.run(
async move {
match eventloop.poll().await {
Ok(Event::Incoming(Incoming::Connect(_connect))) => {}
Ok(Event::Incoming(Incoming::ConnAck(connack))) => {
if connack.code == Success {
tracing::debug!(?connack, "MQTT connection established");
} else {
tracing::error!(?connack, "MQTT connect failed");
}
}
Ok(event) => {
tracing::warn!(?event, "Got unexpected mqtt event");
}
Err(error) => {
tracing::error!(?error, "MQTT connection failed");
return Err(ActorError::ConnectionError(error));
}
}
Ok(eventloop)
}
.compat(),
)?;
executor.spawn(
async move {
let mut fault = false;
loop {
match eventloop.poll().compat().await {
Ok(_) => {
fault = false;
// TODO: Handle incoming MQTT messages
}
Err(ConnectionError::Cancel)
| Err(ConnectionError::StreamDone)
| Err(ConnectionError::RequestsDone) => {
// Normal exit
tracing::info!("MQTT request queue closed, stopping client.");
return;
}
Err(ConnectionError::Timeout(_)) => {
tracing::error!("MQTT operation timed out!");
tracing::warn!(
"MQTT client will continue, but messages may have been lost."
)
// Timeout does not close the client
}
Err(ConnectionError::Io(error)) if fault => {
tracing::error!(?error, "MQTT recurring IO error, closing client");
// Repeating IO errors close client. Any Ok() in between resets fault to false.
return;
}
Err(ConnectionError::Io(error)) => {
fault = true;
tracing::error!(?error, "MQTT encountered IO error");
// *First* IO error does not close the client.
}
Err(error) => {
tracing::error!(?error, "MQTT client encountered unhandled error");
return;
}
}
}
}
.compat(),
);
let mut actor_map: HashMap<String, _> = config
.actor_connections
.iter()
.filter_map(|(k, v)| {
if let Some(resource) = resources.get_by_id(v) {
Some((k.clone(), resource.get_signal()))
} else {
tracing::error!(actor=%k, machine=%v, "Machine configured for actor not found!");
None
}
})
.collect();
for (name, cfg) in config.actors.iter() {
if let Some(sig) = actor_map.remove(name) {
if let Some(actor) = load_single(name, &cfg.module, &cfg.params, mqtt.clone()) {
let driver = ActorDriver::new(sig, actor);
tracing::debug!(module_name=%cfg.module, %name, "starting actor task");
executor.spawn(driver);
} else {
tracing::error!(module_name=%cfg.module, %name, "Actor module type not found");
}
} else {
tracing::warn!(actor=%name, ?config, "Actor has no machine configured. Skipping!");
}
}
Ok(())
}
fn load_single(
name: &String,
module_name: &String,
params: &HashMap<String, String>,
client: AsyncClient,
) -> Option<Box<dyn Actor + Sync + Send>> {
tracing::info!(%name, %module_name, ?params, "Loading actor");
match module_name.as_ref() {
"Dummy" => Some(Box::new(Dummy::new(name.clone(), params.clone()))),
"Process" => Process::new(name.clone(), params).map(|a| a.into_boxed_actuator()),
"Shelly" => Some(Box::new(Shelly::new(name.clone(), client, params))),
_ => None,
}
}

88
bffhd/actors/process.rs Normal file
View File

@ -0,0 +1,88 @@
use futures_util::future::BoxFuture;
use std::collections::HashMap;
use std::process::{Command, Stdio};
use crate::actors::Actor;
use crate::db::ArchivedValue;
use crate::resources::modules::fabaccess::ArchivedStatus;
use crate::resources::state::State;
pub struct Process {
name: String,
cmd: String,
args: Vec<String>,
}
impl Process {
pub fn new(name: String, params: &HashMap<String, String>) -> Option<Self> {
let cmd = params.get("cmd").map(|s| s.to_string())?;
let args = params
.get("args")
.map(|argv| argv.split_whitespace().map(|s| s.to_string()).collect())
.unwrap_or_else(Vec::new);
Some(Self { name, cmd, args })
}
pub fn into_boxed_actuator(self) -> Box<dyn Actor + Sync + Send> {
Box::new(self)
}
}
impl Actor for Process {
fn apply(&mut self, state: ArchivedValue<State>) -> BoxFuture<'static, ()> {
tracing::debug!(name=%self.name, cmd=%self.cmd, ?state,
"Process actor updating state");
let mut command = Command::new(&self.cmd);
command
.stdin(Stdio::null())
.args(self.args.iter())
.arg(&self.name);
match &state.as_ref().inner.state {
ArchivedStatus::Free => {
command.arg("free");
}
ArchivedStatus::InUse(by) => {
command.arg("inuse").arg(by.id.as_str());
}
ArchivedStatus::ToCheck(by) => {
command.arg("tocheck").arg(by.id.as_str());
}
ArchivedStatus::Blocked(by) => {
command.arg("blocked").arg(by.id.as_str());
}
ArchivedStatus::Disabled => {
command.arg("disabled");
}
ArchivedStatus::Reserved(by) => {
command.arg("reserved").arg(by.id.as_str());
}
}
let name = self.name.clone();
Box::pin(async move {
match command.output() {
Ok(retv) if retv.status.success() => {
tracing::trace!("Actor was successful");
let outstr = String::from_utf8_lossy(&retv.stdout);
for line in outstr.lines() {
tracing::debug!(%name, %line, "actor stdout");
}
}
Ok(retv) => {
tracing::warn!(%name, ?state, code=?retv.status,
"Actor returned nonzero exitcode"
);
if !retv.stderr.is_empty() {
let errstr = String::from_utf8_lossy(&retv.stderr);
for line in errstr.lines() {
tracing::warn!(%name, %line, "actor stderr");
}
}
}
Err(error) => tracing::warn!(%name, ?error, "process actor failed to run cmd"),
}
})
}
}

67
bffhd/actors/shelly.rs Normal file
View File

@ -0,0 +1,67 @@
use futures_util::future::BoxFuture;
use std::collections::HashMap;
use crate::actors::Actor;
use crate::db::ArchivedValue;
use crate::resources::modules::fabaccess::ArchivedStatus;
use crate::resources::state::State;
use rumqttc::{AsyncClient, QoS};
/// An actuator for a Shellie connected listening on one MQTT broker
///
/// This actuator will toggle the shellie with the given `name`.
/// If you need to toggle shellies on multiple brokers you need multiple instanced of this
/// actuator with different clients.
pub struct Shelly {
name: String,
client: AsyncClient,
topic: String,
}
impl Shelly {
pub fn new(name: String, client: AsyncClient, params: &HashMap<String, String>) -> Self {
let topic = if let Some(topic) = params.get("topic") {
format!("shellies/{}/relay/0/command", topic)
} else {
format!("shellies/{}/relay/0/command", name)
};
tracing::debug!(%name,%topic,"Starting shelly module");
Shelly {
name,
client,
topic,
}
}
/// Set the name to a new one. This changes the shelly that will be activated
pub fn set_name(&mut self, new_name: String) {
tracing::debug!(old=%self.name, new=%new_name, "Renaming shelly actor");
self.name = new_name;
}
}
impl Actor for Shelly {
fn apply(&mut self, state: ArchivedValue<State>) -> BoxFuture<'static, ()> {
tracing::debug!(?state, name=%self.name,
"Shelly changing state"
);
let pl = match state.as_ref().inner.state {
ArchivedStatus::InUse(_) => "on",
_ => "off",
};
let name = self.name.clone();
let client = self.client.clone();
let topic = self.topic.clone();
let f = async move {
let res = client.publish(topic, QoS::AtLeastOnce, false, pl).await;
if let Err(error) = res {
tracing::error!(?error, %name, "`Shelly` actor failed to update state");
}
};
return Box::pin(f);
}
}

65
bffhd/audit.rs Normal file
View File

@ -0,0 +1,65 @@
use miette::Diagnostic;
use once_cell::sync::OnceCell;
use std::fs::{File, OpenOptions};
use std::io;
use std::io::{LineWriter, Write};
use std::sync::Mutex;
use thiserror::Error;
use crate::Config;
use serde::{Deserialize, Serialize};
use serde_json::Serializer;
pub static AUDIT: OnceCell<AuditLog> = OnceCell::new();
// TODO: Make the audit log a tracing layer
#[derive(Debug)]
pub struct AuditLog {
writer: Mutex<LineWriter<File>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuditLogLine<'a> {
timestamp: i64,
machine: &'a str,
state: &'a str,
}
#[derive(Debug, Error, Diagnostic)]
#[error(transparent)]
#[repr(transparent)]
pub struct Error(#[from] pub io::Error);
impl AuditLog {
pub fn new(config: &Config) -> Result<&'static Self, Error> {
AUDIT.get_or_try_init(|| {
tracing::debug!(path = %config.auditlog_path.display(), "Initializing audit log");
let fd = OpenOptions::new()
.create(true)
.append(true)
.open(&config.auditlog_path)?;
let writer = Mutex::new(LineWriter::new(fd));
Ok(Self { writer })
})
}
pub fn log(&self, machine: &str, state: &str) -> io::Result<()> {
let timestamp = chrono::Utc::now().timestamp();
let line = AuditLogLine {
timestamp,
machine,
state,
};
tracing::debug!(?line, "writing audit log line");
let mut guard = self.writer.lock().unwrap();
let mut writer: &mut LineWriter<File> = &mut *guard;
let mut ser = Serializer::new(&mut writer);
line.serialize(&mut ser)
.expect("failed to serialize audit log line");
writer.write_all("\n".as_bytes())?;
Ok(())
}
}

View File

@ -0,0 +1,38 @@
mod server;
pub use server::FabFire;
use rsasl::mechname::Mechname;
use rsasl::registry::{Matches, Mechanism, Named, Side, MECHANISMS};
const MECHNAME: &'static Mechname = &Mechname::const_new_unchecked(b"X-FABFIRE");
#[linkme::distributed_slice(MECHANISMS)]
pub static FABFIRE: Mechanism = Mechanism::build(
MECHNAME,
300,
None,
Some(FabFire::new_server),
Side::Client,
|_| Some(Matches::<Select>::name()),
|_| true,
);
struct Select;
impl Named for Select {
fn mech() -> &'static Mechanism {
&FABFIRE
}
}
use rsasl::property::SizedProperty;
use std::marker::PhantomData;
// All Property types must implement Debug.
#[derive(Debug)]
// The `PhantomData` in the constructor is only used so external crates can't construct this type.
pub struct FabFireCardKey(PhantomData<()>);
impl SizedProperty<'_> for FabFireCardKey {
type Value = [u8; 16];
const DESCRIPTION: &'static str = "A AES128 key for a FabFire card";
}

View File

@ -0,0 +1,742 @@
use desfire::desfire::desfire::MAX_BYTES_PER_TRANSACTION;
use desfire::desfire::Desfire;
use desfire::error::Error as DesfireError;
use desfire::iso7816_4::apduresponse::APDUResponse;
use rsasl::mechanism::{
Authentication, Demand, DemandReply, MechanismData, MechanismError, MechanismErrorKind,
Provider, State, ThisProvider,
};
use rsasl::prelude::{MessageSent, SASLConfig, SASLError, SessionError};
use rsasl::property::AuthId;
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use std::fmt::{Debug, Display, Formatter};
use std::io::Write;
use crate::authentication::fabfire::FabFireCardKey;
enum FabFireError {
ParseError,
SerializationError,
DeserializationError(serde_json::Error),
CardError(DesfireError),
InvalidMagic(String),
InvalidToken(String),
InvalidURN(String),
InvalidCredentials(String),
Session(SessionError),
}
impl Debug for FabFireError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
FabFireError::ParseError => write!(f, "ParseError"),
FabFireError::SerializationError => write!(f, "SerializationError"),
FabFireError::DeserializationError(e) => write!(f, "DeserializationError: {}", e),
FabFireError::CardError(err) => write!(f, "CardError: {}", err),
FabFireError::InvalidMagic(magic) => write!(f, "InvalidMagic: {}", magic),
FabFireError::InvalidToken(token) => write!(f, "InvalidToken: {}", token),
FabFireError::InvalidURN(urn) => write!(f, "InvalidURN: {}", urn),
FabFireError::InvalidCredentials(credentials) => {
write!(f, "InvalidCredentials: {}", credentials)
}
FabFireError::Session(err) => write!(f, "Session: {}", err),
}
}
}
impl Display for FabFireError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
FabFireError::ParseError => write!(f, "ParseError"),
FabFireError::SerializationError => write!(f, "SerializationError"),
FabFireError::DeserializationError(e) => write!(f, "DeserializationError: {}", e),
FabFireError::CardError(err) => write!(f, "CardError: {}", err),
FabFireError::InvalidMagic(magic) => write!(f, "InvalidMagic: {}", magic),
FabFireError::InvalidToken(token) => write!(f, "InvalidToken: {}", token),
FabFireError::InvalidURN(urn) => write!(f, "InvalidURN: {}", urn),
FabFireError::InvalidCredentials(credentials) => {
write!(f, "InvalidCredentials: {}", credentials)
}
FabFireError::Session(err) => write!(f, "Session: {}", err),
}
}
}
impl std::error::Error for FabFireError {}
impl MechanismError for FabFireError {
fn kind(&self) -> MechanismErrorKind {
match self {
FabFireError::ParseError => MechanismErrorKind::Parse,
FabFireError::SerializationError => MechanismErrorKind::Protocol,
FabFireError::DeserializationError(_) => MechanismErrorKind::Parse,
FabFireError::CardError(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidMagic(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidToken(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidURN(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidCredentials(_) => MechanismErrorKind::Protocol,
FabFireError::Session(_) => MechanismErrorKind::Protocol,
}
}
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct CardInfo {
#[serde(rename = "UID", with = "hex")]
uid: [u8; 7],
key_old: Option<Box<[u8]>>,
key_new: Option<Box<[u8]>>,
}
struct KeyInfo {
authid: String,
key_id: u8,
key: Box<[u8]>,
}
struct AuthInfo {
rnd_a: Vec<u8>,
rnd_b: Vec<u8>,
iv: Vec<u8>,
}
#[allow(non_camel_case_types)]
#[derive(Debug, Deserialize, Serialize)]
#[serde(tag = "Cmd")]
enum CardCommand {
message {
#[serde(rename = "MssgID", skip_serializing_if = "Option::is_none")]
msg_id: Option<u32>,
#[serde(rename = "ClrTxt", skip_serializing_if = "Option::is_none")]
clr_txt: Option<String>,
#[serde(rename = "AddnTxt", skip_serializing_if = "Option::is_none")]
addn_txt: Option<String>,
},
sendPICC {
#[serde(
deserialize_with = "hex::deserialize",
serialize_with = "hex::serialize_upper"
)]
data: Vec<u8>,
},
readPICC {
#[serde(
deserialize_with = "hex::deserialize",
serialize_with = "hex::serialize_upper"
)]
data: Vec<u8>,
},
haltPICC,
Key {
data: String,
},
ConfirmUser,
}
enum Step {
New,
SelectApp,
VerifyMagic,
GetURN,
GetToken,
Authenticate1,
Authenticate2,
}
pub struct FabFire {
step: Step,
card_info: Option<CardInfo>,
key_info: Option<KeyInfo>,
auth_info: Option<AuthInfo>,
app_id: u32,
local_urn: String,
desfire: Desfire,
}
const MAGIC: &'static str = "FABACCESS\0DESFIRE\01.0\0";
impl FabFire {
pub fn new_server(_sasl: &SASLConfig) -> Result<Box<dyn Authentication>, SASLError> {
Ok(Box::new(Self {
step: Step::New,
card_info: None,
key_info: None,
auth_info: None,
app_id: 0x464142,
local_urn: "urn:fabaccess:lab:innovisionlab".to_string(),
desfire: Desfire {
card: None,
session_key: None,
cbc_iv: None,
},
}))
}
}
impl Authentication for FabFire {
fn step(
&mut self,
session: &mut MechanismData<'_, '_>,
input: Option<&[u8]>,
writer: &mut dyn Write,
) -> Result<State, SessionError> {
match self.step {
Step::New => {
tracing::trace!("Step: New");
//receive card info (especially card UID) from reader
return match input {
None => Err(SessionError::InputDataRequired),
Some(cardinfo) => {
self.card_info = match serde_json::from_slice(cardinfo) {
Ok(card_info) => Some(card_info),
Err(e) => {
tracing::error!("Deserializing card_info failed: {:?}", e);
return Err(FabFireError::DeserializationError(e).into());
}
};
//select application
let buf = match self.desfire.select_application_cmd(self.app_id) {
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => data,
Err(e) => {
tracing::error!(
"Failed to convert APDUCommand to Vec<u8>: {:?}",
e
);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
let cmd = CardCommand::sendPICC { data: buf };
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::SelectApp;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to serialize APDUCommand: {:?}", e);
Err(FabFireError::SerializationError.into())
}
};
}
};
}
Step::SelectApp => {
tracing::trace!("Step: SelectApp");
// check that we successfully selected the application
let response: CardCommand = match input {
None => {
return Err(SessionError::InputDataRequired);
}
Some(buf) => match serde_json::from_slice(buf)
.map_err(|e| FabFireError::DeserializationError(e))
{
Ok(response) => response,
Err(e) => {
tracing::error!("Deserializing data from card failed: {:?}", e);
return Err(e.into());
}
},
};
let apdu_response = match response {
CardCommand::readPICC { data } => APDUResponse::new(&*data),
_ => {
tracing::error!("Unexpected response: {:?}", response);
return Err(FabFireError::ParseError.into());
}
};
apdu_response
.check()
.map_err(|e| FabFireError::CardError(e))?;
// request the contents of the file containing the magic string
const MAGIC_FILE_ID: u8 = 0x01;
let buf = match self
.desfire
.read_data_chunk_cmd(MAGIC_FILE_ID, 0, MAGIC.len())
{
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => data,
Err(e) => {
tracing::error!("Failed to convert APDUCommand to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
let cmd = CardCommand::sendPICC { data: buf };
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::VerifyMagic;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to serialize APDUCommand: {:?}", e);
Err(FabFireError::SerializationError.into())
}
};
}
Step::VerifyMagic => {
tracing::trace!("Step: VerifyMagic");
// verify the magic string to determine that we have a valid fabfire card
let response: CardCommand = match input {
None => {
return Err(SessionError::InputDataRequired);
}
Some(buf) => match serde_json::from_slice(buf)
.map_err(|e| FabFireError::DeserializationError(e))
{
Ok(response) => response,
Err(e) => {
tracing::error!("Deserializing data from card failed: {:?}", e);
return Err(e.into());
}
},
};
let apdu_response = match response {
CardCommand::readPICC { data } => APDUResponse::new(&*data),
_ => {
tracing::error!("Unexpected response: {:?}", response);
return Err(FabFireError::ParseError.into());
}
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
if std::str::from_utf8(data.as_slice()) != Ok(MAGIC) {
tracing::error!("Invalid magic string");
return Err(FabFireError::ParseError.into());
}
}
None => {
tracing::error!("No data returned from card");
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Got invalid APDUResponse: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
// request the contents of the file containing the URN
const URN_FILE_ID: u8 = 0x02;
let buf = match self.desfire.read_data_chunk_cmd(
URN_FILE_ID,
0,
self.local_urn.as_bytes().len(),
) {
// TODO: support urn longer than 47 Bytes
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => data,
Err(e) => {
tracing::error!("Failed to convert APDUCommand to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
let cmd = CardCommand::sendPICC { data: buf };
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::GetURN;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to serialize APDUCommand: {:?}", e);
Err(FabFireError::SerializationError.into())
}
};
}
Step::GetURN => {
tracing::trace!("Step: GetURN");
// parse the urn and match it to our local urn
let response: CardCommand = match input {
None => {
return Err(SessionError::InputDataRequired);
}
Some(buf) => match serde_json::from_slice(buf)
.map_err(|e| FabFireError::DeserializationError(e))
{
Ok(response) => response,
Err(e) => {
tracing::error!("Deserializing data from card failed: {:?}", e);
return Err(e.into());
}
},
};
let apdu_response = match response {
CardCommand::readPICC { data } => APDUResponse::new(&*data),
_ => {
tracing::error!("Unexpected response: {:?}", response);
return Err(FabFireError::ParseError.into());
}
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
let received_urn = String::from_utf8(data).unwrap();
if received_urn != self.local_urn {
tracing::error!(
"URN mismatch: {:?} != {:?}",
received_urn,
self.local_urn
);
return Err(FabFireError::ParseError.into());
}
}
None => {
tracing::error!("No data returned from card");
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Got invalid APDUResponse: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
// request the contents of the file containing the URN
const TOKEN_FILE_ID: u8 = 0x03;
let buf = match self.desfire.read_data_chunk_cmd(
TOKEN_FILE_ID,
0,
MAX_BYTES_PER_TRANSACTION,
) {
// TODO: support data longer than 47 Bytes
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => data,
Err(e) => {
tracing::error!("Failed to convert APDUCommand to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
let cmd = CardCommand::sendPICC { data: buf };
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::GetToken;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to serialize APDUCommand: {:?}", e);
Err(FabFireError::SerializationError.into())
}
};
}
Step::GetToken => {
// println!("Step: GetToken");
// parse the token and select the appropriate user
let response: CardCommand = match input {
None => {
return Err(SessionError::InputDataRequired);
}
Some(buf) => match serde_json::from_slice(buf)
.map_err(|e| FabFireError::DeserializationError(e))
{
Ok(response) => response,
Err(e) => {
tracing::error!("Deserializing data from card failed: {:?}", e);
return Err(e.into());
}
},
};
let apdu_response = match response {
CardCommand::readPICC { data } => APDUResponse::new(&*data),
_ => {
tracing::error!("Unexpected response: {:?}", response);
return Err(FabFireError::ParseError.into());
}
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
let authid = String::from_utf8(data)
.unwrap()
.trim_matches(char::from(0))
.to_string();
let prov = ThisProvider::<AuthId>::with(&authid);
let key = session
.need_with::<FabFireCardKey, _, _>(&prov, |key| {
Ok(Box::from(key.as_slice()))
})?;
self.key_info = Some(KeyInfo {
authid,
key_id: 0x01,
key,
});
}
None => {
tracing::error!("No data in response");
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Failed to check response: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
let buf = match self
.desfire
.authenticate_iso_aes_challenge_cmd(self.key_info.as_ref().unwrap().key_id)
{
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => data,
Err(e) => {
tracing::error!("Failed to convert to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to create authenticate command: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
let cmd = CardCommand::sendPICC { data: buf };
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::Authenticate1;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to serialize command: {:?}", e);
Err(FabFireError::SerializationError.into())
}
};
}
Step::Authenticate1 => {
tracing::trace!("Step: Authenticate1");
let response: CardCommand = match input {
None => {
return Err(SessionError::InputDataRequired);
}
Some(buf) => match serde_json::from_slice(buf)
.map_err(|e| FabFireError::DeserializationError(e))
{
Ok(response) => response,
Err(e) => {
tracing::error!("Failed to deserialize response: {:?}", e);
return Err(e.into());
}
},
};
let apdu_response = match response {
CardCommand::readPICC { data } => APDUResponse::new(&*data),
_ => {
tracing::error!("Unexpected response: {:?}", response);
return Err(FabFireError::ParseError.into());
}
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
let rnd_b_enc = data.as_slice();
//FIXME: This is ugly, we should find a better way to make the function testable
//TODO: Check if we need a CSPRNG here
let rnd_a: [u8; 16] = rand::random();
let (cmd_challenge_response, rnd_b, iv) = self
.desfire
.authenticate_iso_aes_response_cmd(
rnd_b_enc,
&*(self.key_info.as_ref().unwrap().key),
&rnd_a,
)
.unwrap();
self.auth_info = Some(AuthInfo {
rnd_a: Vec::<u8>::from(rnd_a),
rnd_b,
iv,
});
let buf = match Vec::<u8>::try_from(cmd_challenge_response) {
Ok(data) => data,
Err(e) => {
tracing::error!("Failed to convert to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
let cmd = CardCommand::sendPICC { data: buf };
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::Authenticate2;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to serialize command: {:?}", e);
Err(FabFireError::SerializationError.into())
}
};
}
None => {
tracing::error!("Got invalid response: {:?}", apdu_response);
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Failed to check response: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
}
Step::Authenticate2 => {
// println!("Step: Authenticate2");
let response: CardCommand = match input {
None => {
return Err(SessionError::InputDataRequired);
}
Some(buf) => match serde_json::from_slice(buf)
.map_err(|e| FabFireError::DeserializationError(e))
{
Ok(response) => response,
Err(e) => {
tracing::error!("Failed to deserialize response: {:?}", e);
return Err(e.into());
}
},
};
let apdu_response = match response {
CardCommand::readPICC { data } => APDUResponse::new(&*data),
_ => {
tracing::error!("Got invalid response: {:?}", response);
return Err(FabFireError::ParseError.into());
}
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => match self.auth_info.as_ref() {
None => {
return Err(FabFireError::ParseError.into());
}
Some(auth_info) => {
if self
.desfire
.authenticate_iso_aes_verify(
data.as_slice(),
auth_info.rnd_a.as_slice(),
auth_info.rnd_b.as_slice(),
&*(self.key_info.as_ref().unwrap().key),
auth_info.iv.as_slice(),
)
.is_ok()
{
let cmd = CardCommand::message {
msg_id: Some(4),
clr_txt: None,
addn_txt: Some("".to_string()),
};
return match serde_json::to_vec(&cmd) {
Ok(send_buf) => {
self.step = Step::Authenticate1;
writer
.write_all(&send_buf)
.map_err(|e| SessionError::Io { source: e })?;
struct Prov<'a> {
authid: &'a str,
}
impl<'a> Provider<'a> for Prov<'a> {
fn provide(
&self,
req: &mut Demand<'a>,
) -> DemandReply<()>
{
req.provide_ref::<AuthId>(self.authid)?
.done()
}
}
let prov = Prov {
authid: &self.key_info.as_ref().unwrap().authid,
};
session.validate(&prov)?;
return Ok(State::Finished(MessageSent::Yes));
}
Err(e) => {
tracing::error!(
"Failed to serialize command: {:?}",
e
);
Err(FabFireError::SerializationError.into())
}
};
}
}
},
None => {
tracing::error!("got empty response");
return Err(FabFireError::ParseError.into());
}
};
}
Err(_e) => {
tracing::error!("Got invalid response: {:?}", apdu_response);
return Err(
FabFireError::InvalidCredentials(format!("{}", apdu_response)).into(),
);
}
}
}
}
return Ok(State::Finished(MessageSent::No));
}
}

View File

@ -0,0 +1,25 @@
mod server;
pub use server::FabFire;
use rsasl::mechname::Mechname;
use rsasl::registry::{Matches, Mechanism, Named, Side, MECHANISMS};
const MECHNAME: &'static Mechname = &Mechname::const_new_unchecked(b"X-FABFIRE-BIN");
#[linkme::distributed_slice(MECHANISMS)]
pub static FABFIRE: Mechanism = Mechanism::build(
MECHNAME,
300,
None,
Some(FabFire::new_server),
Side::Client,
|_| Some(Matches::<Select>::name()),
|_| true,
);
struct Select;
impl Named for Select {
fn mech() -> &'static Mechanism {
&FABFIRE
}
}

View File

@ -0,0 +1,532 @@
use desfire::desfire::desfire::MAX_BYTES_PER_TRANSACTION;
use desfire::desfire::Desfire;
use desfire::error::Error as DesfireError;
use desfire::iso7816_4::apduresponse::APDUResponse;
use rsasl::mechanism::{
Authentication, Demand, DemandReply, MechanismData, MechanismError, MechanismErrorKind,
Provider, State, ThisProvider,
};
use rsasl::prelude::{MessageSent, SASLConfig, SASLError, SessionError};
use rsasl::property::AuthId;
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use std::fmt::{Debug, Display, Formatter};
use std::io::Write;
use crate::authentication::fabfire::FabFireCardKey;
use crate::CONFIG;
enum FabFireError {
ParseError,
SerializationError,
DeserializationError(serde_json::Error),
CardError(DesfireError),
InvalidMagic(String),
InvalidToken(String),
InvalidURN(String),
InvalidCredentials(String),
Session(SessionError),
}
impl Debug for FabFireError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
FabFireError::ParseError => write!(f, "ParseError"),
FabFireError::SerializationError => write!(f, "SerializationError"),
FabFireError::DeserializationError(e) => write!(f, "DeserializationError: {}", e),
FabFireError::CardError(err) => write!(f, "CardError: {}", err),
FabFireError::InvalidMagic(magic) => write!(f, "InvalidMagic: {}", magic),
FabFireError::InvalidToken(token) => write!(f, "InvalidToken: {}", token),
FabFireError::InvalidURN(urn) => write!(f, "InvalidURN: {}", urn),
FabFireError::InvalidCredentials(credentials) => {
write!(f, "InvalidCredentials: {}", credentials)
}
FabFireError::Session(err) => write!(f, "Session: {}", err),
}
}
}
impl Display for FabFireError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
FabFireError::ParseError => write!(f, "ParseError"),
FabFireError::SerializationError => write!(f, "SerializationError"),
FabFireError::DeserializationError(e) => write!(f, "DeserializationError: {}", e),
FabFireError::CardError(err) => write!(f, "CardError: {}", err),
FabFireError::InvalidMagic(magic) => write!(f, "InvalidMagic: {}", magic),
FabFireError::InvalidToken(token) => write!(f, "InvalidToken: {}", token),
FabFireError::InvalidURN(urn) => write!(f, "InvalidURN: {}", urn),
FabFireError::InvalidCredentials(credentials) => {
write!(f, "InvalidCredentials: {}", credentials)
}
FabFireError::Session(err) => write!(f, "Session: {}", err),
}
}
}
impl std::error::Error for FabFireError {}
impl MechanismError for FabFireError {
fn kind(&self) -> MechanismErrorKind {
match self {
FabFireError::ParseError => MechanismErrorKind::Parse,
FabFireError::SerializationError => MechanismErrorKind::Protocol,
FabFireError::DeserializationError(_) => MechanismErrorKind::Parse,
FabFireError::CardError(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidMagic(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidToken(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidURN(_) => MechanismErrorKind::Protocol,
FabFireError::InvalidCredentials(_) => MechanismErrorKind::Protocol,
FabFireError::Session(_) => MechanismErrorKind::Protocol,
}
}
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct CardInfo {
#[serde(rename = "UID", with = "hex")]
uid: [u8; 7],
key_old: Option<Box<[u8]>>,
key_new: Option<Box<[u8]>>,
}
struct KeyInfo {
authid: String,
key_id: u8,
key: Box<[u8]>,
}
struct AuthInfo {
rnd_a: Vec<u8>,
rnd_b: Vec<u8>,
iv: Vec<u8>,
}
enum Step {
New,
SelectApp,
VerifyMagic,
GetURN,
GetToken,
Authenticate1,
Authenticate2,
}
pub struct FabFire {
step: Step,
card_info: Option<CardInfo>,
key_info: Option<KeyInfo>,
auth_info: Option<AuthInfo>,
app_id: u32,
local_urn: String,
desfire: Desfire,
}
const MAGIC: &'static str = "FABACCESS\0DESFIRE\01.0\0";
impl FabFire {
pub fn new_server(_sasl: &SASLConfig) -> Result<Box<dyn Authentication>, SASLError> {
let space = if let Some(space) = CONFIG.get().map(|c| c.spacename.as_str()) {
space
} else {
tracing::error!("No space configured");
"generic"
};
Ok(Box::new(Self {
step: Step::New,
card_info: None,
key_info: None,
auth_info: None,
app_id: 0x464142,
local_urn: format!("urn:fabaccess:lab:{space}"),
desfire: Desfire {
card: None,
session_key: None,
cbc_iv: None,
},
}))
}
}
impl Authentication for FabFire {
fn step(
&mut self,
session: &mut MechanismData<'_, '_>,
input: Option<&[u8]>,
writer: &mut dyn Write,
) -> Result<State, SessionError> {
match self.step {
Step::New => {
tracing::trace!("Step: New");
//receive card info (especially card UID) from reader
return match input {
None => Err(SessionError::InputDataRequired),
Some(_) => {
//select application
return match self.desfire.select_application_cmd(self.app_id) {
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => {
self.step = Step::SelectApp;
writer
.write_all(&data)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!(
"Failed to convert APDUCommand to Vec<u8>: {:?}",
e
);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
}
};
}
Step::SelectApp => {
tracing::trace!("Step: SelectApp");
// check that we successfully selected the application
let apdu_response = match input {
Some(data) => APDUResponse::new(data),
None => return Err(SessionError::InputDataRequired),
};
apdu_response
.check()
.map_err(|e| FabFireError::CardError(e))?;
// request the contents of the file containing the magic string
const MAGIC_FILE_ID: u8 = 0x01;
return match self
.desfire
.read_data_chunk_cmd(MAGIC_FILE_ID, 0, MAGIC.len())
{
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => {
self.step = Step::VerifyMagic;
writer
.write_all(&data)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to convert APDUCommand to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
}
Step::VerifyMagic => {
tracing::trace!("Step: VerifyMagic");
// verify the magic string to determine that we have a valid fabfire card
let apdu_response = match input {
Some(data) => APDUResponse::new(data),
None => return Err(SessionError::InputDataRequired),
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
if std::str::from_utf8(data.as_slice()) != Ok(MAGIC) {
tracing::error!("Invalid magic string");
return Err(FabFireError::ParseError.into());
}
}
None => {
tracing::error!("No data returned from card");
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Got invalid APDUResponse: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
// request the contents of the file containing the URN
const URN_FILE_ID: u8 = 0x02;
return match self.desfire.read_data_chunk_cmd(
URN_FILE_ID,
0,
self.local_urn.as_bytes().len(),
) {
// TODO: support urn longer than 47 Bytes
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => {
self.step = Step::GetURN;
writer
.write_all(&data)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to convert APDUCommand to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
}
Step::GetURN => {
tracing::trace!("Step: GetURN");
// parse the urn and match it to our local urn
let apdu_response = match input {
Some(data) => APDUResponse::new(data),
None => return Err(SessionError::InputDataRequired),
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
let received_urn = String::from_utf8(data).unwrap();
if received_urn != self.local_urn {
tracing::error!(
"URN mismatch: {:?} != {:?}",
received_urn,
self.local_urn
);
return Err(FabFireError::ParseError.into());
}
}
None => {
tracing::error!("No data returned from card");
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Got invalid APDUResponse: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
// request the contents of the file containing the URN
const TOKEN_FILE_ID: u8 = 0x03;
return match self.desfire.read_data_chunk_cmd(
TOKEN_FILE_ID,
0,
MAX_BYTES_PER_TRANSACTION,
) {
// TODO: support data longer than 47 Bytes
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => {
self.step = Step::GetToken;
writer
.write_all(&data)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to convert APDUCommand to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to generate APDUCommand: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
}
Step::GetToken => {
// println!("Step: GetToken");
// parse the token and select the appropriate user
let apdu_response = match input {
Some(data) => APDUResponse::new(data),
None => return Err(SessionError::InputDataRequired),
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
let authid = String::from_utf8(data)
.unwrap()
.trim_matches(char::from(0))
.to_string();
let prov = ThisProvider::<AuthId>::with(&authid);
let key = session
.need_with::<FabFireCardKey, _, _>(&prov, |key| {
Ok(Box::from(key.as_slice()))
})?;
self.key_info = Some(KeyInfo {
authid,
key_id: 0x01,
key,
});
}
None => {
tracing::error!("No data in response");
return Err(FabFireError::ParseError.into());
}
};
}
Err(e) => {
tracing::error!("Failed to check response: {:?}", e);
return Err(FabFireError::ParseError.into());
}
}
return match self
.desfire
.authenticate_iso_aes_challenge_cmd(self.key_info.as_ref().unwrap().key_id)
{
Ok(buf) => match Vec::<u8>::try_from(buf) {
Ok(data) => {
self.step = Step::Authenticate1;
writer
.write_all(&data)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to convert to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
},
Err(e) => {
tracing::error!("Failed to create authenticate command: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
};
}
Step::Authenticate1 => {
tracing::trace!("Step: Authenticate1");
let apdu_response = match input {
Some(data) => APDUResponse::new(data),
None => return Err(SessionError::InputDataRequired),
};
return match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => {
let rnd_b_enc = data.as_slice();
//FIXME: This is ugly, we should find a better way to make the function testable
//TODO: Check if we need a CSPRNG here
let rnd_a: [u8; 16] = rand::random();
let (cmd_challenge_response, rnd_b, iv) = self
.desfire
.authenticate_iso_aes_response_cmd(
rnd_b_enc,
&*(self.key_info.as_ref().unwrap().key),
&rnd_a,
)
.unwrap();
self.auth_info = Some(AuthInfo {
rnd_a: Vec::<u8>::from(rnd_a),
rnd_b,
iv,
});
match Vec::<u8>::try_from(cmd_challenge_response) {
Ok(data) => {
self.step = Step::Authenticate2;
writer
.write_all(&data)
.map_err(|e| SessionError::Io { source: e })?;
Ok(State::Running)
}
Err(e) => {
tracing::error!("Failed to convert to Vec<u8>: {:?}", e);
return Err(FabFireError::SerializationError.into());
}
}
}
None => {
tracing::error!("Got invalid response: {:?}", apdu_response);
Err(FabFireError::ParseError.into())
}
}
}
Err(e) => {
tracing::error!("Failed to check response: {:?}", e);
Err(FabFireError::ParseError.into())
}
};
}
Step::Authenticate2 => {
// println!("Step: Authenticate2");
let apdu_response = match input {
Some(data) => APDUResponse::new(data),
None => return Err(SessionError::InputDataRequired),
};
match apdu_response.check() {
Ok(_) => {
match apdu_response.body {
Some(data) => match self.auth_info.as_ref() {
None => {
return Err(FabFireError::ParseError.into());
}
Some(auth_info) => {
if self
.desfire
.authenticate_iso_aes_verify(
data.as_slice(),
auth_info.rnd_a.as_slice(),
auth_info.rnd_b.as_slice(),
&*(self.key_info.as_ref().unwrap().key),
auth_info.iv.as_slice(),
)
.is_ok()
{
struct Prov<'a> {
authid: &'a str,
}
impl<'a> Provider<'a> for Prov<'a> {
fn provide(
&self,
req: &mut Demand<'a>,
) -> DemandReply<()>
{
req.provide_ref::<AuthId>(self.authid)?.done()
}
}
let prov = Prov {
authid: &self.key_info.as_ref().unwrap().authid,
};
session.validate(&prov)?;
return Ok(State::Finished(MessageSent::Yes));
}
}
},
None => {
tracing::error!("got empty response");
return Err(FabFireError::ParseError.into());
}
};
}
Err(_e) => {
tracing::error!("Got invalid response: {:?}", apdu_response);
return Err(
FabFireError::InvalidCredentials(format!("{}", apdu_response)).into(),
);
}
}
}
}
return Ok(State::Finished(MessageSent::No));
}
}

157
bffhd/authentication/mod.rs Normal file
View File

@ -0,0 +1,157 @@
use crate::users::Users;
use miette::{IntoDiagnostic, WrapErr};
use rsasl::callback::{CallbackError, Context, Request, SessionCallback, SessionData};
use rsasl::mechanism::SessionError;
use rsasl::prelude::{Mechname, SASLConfig, SASLServer, Session, Validation};
use rsasl::property::{AuthId, AuthzId, Password};
use rsasl::validate::{Validate, ValidationError};
use std::sync::Arc;
use crate::authentication::fabfire::FabFireCardKey;
use crate::users::db::User;
mod fabfire;
mod fabfire_bin;
struct Callback {
users: Users,
span: tracing::Span,
}
impl Callback {
pub fn new(users: Users) -> Self {
let span = tracing::info_span!("SASL callback");
Self { users, span }
}
}
impl SessionCallback for Callback {
fn callback(
&self,
_session_data: &SessionData,
context: &Context,
request: &mut Request,
) -> Result<(), SessionError> {
if let Some(authid) = context.get_ref::<AuthId>() {
request.satisfy_with::<FabFireCardKey, _>(|| {
let user = self.users.get_user(authid).ok_or(CallbackError::NoValue)?;
let kv = user
.userdata
.kv
.get("cardkey")
.ok_or(CallbackError::NoValue)?;
let card_key =
<[u8; 16]>::try_from(hex::decode(kv).map_err(|_| CallbackError::NoValue)?)
.map_err(|_| CallbackError::NoValue)?;
Ok(card_key)
})?;
}
Ok(())
}
fn validate(
&self,
session_data: &SessionData,
context: &Context,
validate: &mut Validate<'_>,
) -> Result<(), ValidationError> {
let span = tracing::info_span!(parent: &self.span, "validate");
let _guard = span.enter();
if validate.is::<V>() {
match session_data.mechanism().mechanism.as_str() {
"PLAIN" => {
let authcid = context
.get_ref::<AuthId>()
.ok_or(ValidationError::MissingRequiredProperty)?;
let authzid = context
.get_ref::<AuthzId>()
.ok_or(ValidationError::MissingRequiredProperty)?;
let password = context
.get_ref::<Password>()
.ok_or(ValidationError::MissingRequiredProperty)?;
if !authzid.is_empty() {
return Ok(());
}
if let Some(user) = self.users.get_user(authcid) {
match user.check_password(password) {
Ok(true) => validate.finalize::<V>(user),
Ok(false) => {
tracing::warn!(authid=%authcid, "AUTH FAILED: bad password");
}
Err(error) => {
tracing::warn!(authid=%authcid, "Bad DB entry: {}", error);
}
}
} else {
tracing::warn!(authid=%authcid, "AUTH FAILED: no such user");
}
}
"X-FABFIRE" | "X-FABFIRE-BIN" => {
let authcid = context
.get_ref::<AuthId>()
.ok_or(ValidationError::MissingRequiredProperty)?;
if let Some(user) = self.users.get_user(authcid) {
validate.finalize::<V>(user)
}
}
_ => {}
}
}
Ok(())
}
}
pub struct V;
impl Validation for V {
type Value = User;
}
#[derive(Clone)]
struct Inner {
rsasl: Arc<SASLConfig>,
}
impl Inner {
pub fn new(rsasl: Arc<SASLConfig>) -> Self {
Self { rsasl }
}
}
#[derive(Clone)]
pub struct AuthenticationHandle {
inner: Inner,
}
impl AuthenticationHandle {
pub fn new(userdb: Users) -> Self {
let span = tracing::debug_span!("authentication");
let _guard = span.enter();
let config = SASLConfig::builder()
.with_defaults()
.with_callback(Callback::new(userdb))
.unwrap();
let mechs: Vec<&'static str> = SASLServer::<V>::new(config.clone())
.get_available()
.into_iter()
.map(|m| m.mechanism.as_str())
.collect();
tracing::info!(available_mechs = mechs.len(), "initialized sasl backend");
tracing::debug!(?mechs, "available mechs");
Self {
inner: Inner::new(config),
}
}
pub fn start(&self, mechanism: &Mechname) -> miette::Result<Session<V>> {
Ok(SASLServer::new(self.inner.rsasl.clone())
.start_suggested(mechanism)
.into_diagnostic()
.wrap_err("Failed to start a SASL authentication with the given mechanism")?)
}
pub fn sess(&self) -> SASLServer<V> {
SASLServer::new(self.inner.rsasl.clone())
}
}

View File

@ -0,0 +1,22 @@
use crate::authorization::roles::Roles;
use crate::Users;
pub mod permissions;
pub mod roles;
#[derive(Clone)]
pub struct AuthorizationHandle {
users: Users,
roles: Roles,
}
impl AuthorizationHandle {
pub fn new(users: Users, roles: Roles) -> Self {
Self { users, roles }
}
pub fn get_user_roles(&self, uid: impl AsRef<str>) -> Option<Vec<String>> {
let user = self.users.get_user(uid.as_ref())?;
Some(user.userdata.roles.clone())
}
}

View File

@ -0,0 +1,358 @@
//! Access control logic
//!
use std::cmp::Ordering;
use std::convert::{Into, TryFrom};
use std::fmt;
fn is_sep_char(c: char) -> bool {
c == '.'
}
#[derive(Debug, Clone, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
/// A set of privileges to a thing
pub struct PrivilegesBuf {
/// Which permission is required to know about the existance of this thing
pub disclose: PermissionBuf,
/// Which permission is required to read this thing
pub read: PermissionBuf,
/// Which permission is required to write parts of this thing
pub write: PermissionBuf,
/// Which permission is required to manage all parts of this thing
pub manage: PermissionBuf,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
#[repr(transparent)]
#[serde(transparent)]
/// An owned permission string
///
/// This is under the hood just a fancy std::String.
// TODO: What is the possible fallout from homograph attacks?
// i.e. "bffh.perm" is not the same as "bffհ.реrm" (Armenian 'հ':Հ and Cyrillic 'е':Е)
// See also https://util.unicode.org/UnicodeJsps/confusables.jsp
pub struct PermissionBuf {
inner: String,
}
impl PermissionBuf {
#[inline(always)]
/// Allocate an empty `PermissionBuf`
pub fn new() -> Self {
PermissionBuf {
inner: String::new(),
}
}
#[inline(always)]
/// Allocate a `PermissionBuf` with the given capacity given to the internal [`String`]
pub fn with_capacity(cap: usize) -> Self {
PermissionBuf {
inner: String::with_capacity(cap),
}
}
#[inline(always)]
pub fn as_permission(&self) -> &Permission {
self.as_ref()
}
pub fn push<P: AsRef<Permission>>(&mut self, perm: P) {
self._push(perm.as_ref())
}
pub fn _push(&mut self, perm: &Permission) {
// in general we always need a separator unless the last byte is one or the string is empty
let need_sep = self
.inner
.chars()
.rev()
.next()
.map(|c| !is_sep_char(c))
.unwrap_or(false);
if need_sep {
self.inner.push('.')
}
self.inner.push_str(perm.as_str())
}
#[inline(always)]
pub const fn from_string_unchecked(inner: String) -> Self {
Self { inner }
}
#[inline]
pub fn from_perm(perm: &Permission) -> Self {
Self {
inner: perm.as_str().to_string(),
}
}
#[inline(always)]
pub fn into_string(self) -> String {
self.inner
}
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl AsRef<String> for PermissionBuf {
#[inline(always)]
fn as_ref(&self) -> &String {
&self.inner
}
}
impl AsRef<str> for PermissionBuf {
#[inline(always)]
fn as_ref(&self) -> &str {
&self.inner.as_str()
}
}
impl AsRef<Permission> for PermissionBuf {
#[inline]
fn as_ref(&self) -> &Permission {
Permission::new(self)
}
}
impl PartialOrd for PermissionBuf {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a: &Permission = self.as_ref();
a.partial_cmp(other.as_ref())
}
}
impl fmt::Display for PermissionBuf {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
#[derive(PartialEq, Eq, Hash, Debug)]
#[repr(transparent)]
/// A borrowed permission string
///
/// Permissions have total equality and partial ordering.
/// Specifically permissions on the same path in a tree can be compared for specificity.
/// This means that ```(bffh.perm) > (bffh.perm.sub) == true```
/// but ```(bffh.perm) > (unrelated.but.more.specific.perm) == false```.
/// This allows to check if PermRule a grants Perm b by checking `a > b`.
pub struct Permission(str);
impl Permission {
#[inline(always)]
// We can't make this `const` just yet because `str` is always a fat pointer meaning we can't
// just const cast it, and `CoerceUnsized` and friends are currently unstable.
pub fn new<S: AsRef<str> + ?Sized>(s: &S) -> &Permission {
// Safe because s is a valid reference
unsafe { &*(s.as_ref() as *const str as *const Permission) }
}
#[inline(always)]
pub fn as_str(&self) -> &str {
&self.0
}
#[inline(always)]
pub fn iter(&self) -> std::str::Split<char> {
self.0.split('.')
}
}
impl PartialOrd for Permission {
fn partial_cmp(&self, other: &Permission) -> Option<Ordering> {
let mut i = self.iter();
let mut j = other.iter();
let (mut l, mut r);
while {
l = i.next();
r = j.next();
l.is_some() && r.is_some()
} {
if l.unwrap() != r.unwrap() {
return None;
}
}
match (l, r) {
(None, None) => Some(Ordering::Equal),
(Some(_), None) => Some(Ordering::Less),
(None, Some(_)) => Some(Ordering::Greater),
(Some(_), Some(_)) => unreachable!(
"Broken contract in Permission::partial_cmp: sides \
should never be both Some!"
),
}
}
}
impl AsRef<Permission> for Permission {
#[inline]
fn as_ref(&self) -> &Permission {
self
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
#[serde(try_from = "String")]
#[serde(into = "String")]
pub enum PermRule {
/// The permission is precise,
///
/// i.e. `Base("bffh.perm")` grants bffh.perm but does not grant permission for bffh.perm.sub
Base(PermissionBuf),
/// The permissions is for the children of the node
///
/// i.e. `Children("bffh.perm")` grants bffh.perm.sub, bffh.perm.sub.two *BUT NOT* bffh.perm
/// itself.
Children(PermissionBuf),
/// The permissions is for the subtree marked by the node
///
/// i.e. `Children("bffh.perm")` grants bffh.perm.sub, bffh.perm.sub.two and also bffh.perm
/// itself.
Subtree(PermissionBuf),
// This lacks what LDAP calls "ONELEVEL": The ability to grant the exact children but not several
// levels deep, i.e. `Onelevel("bffh.perm")` grants bffh.perm.sub *BUT NOT* bffh.perm.sub.two or
// bffh.perm itself.
// I can't think of a reason to use that so I'm skipping it for now.
}
impl PermRule {
// Does this rule match that permission
pub fn match_perm<P: AsRef<Permission> + ?Sized>(&self, perm: &P) -> bool {
match self {
PermRule::Base(ref base) => base.as_permission() == perm.as_ref(),
PermRule::Children(ref parent) => parent.as_permission() > perm.as_ref(),
PermRule::Subtree(ref parent) => parent.as_permission() >= perm.as_ref(),
}
}
}
impl fmt::Display for PermRule {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PermRule::Base(perm) => write!(f, "{}", perm),
PermRule::Children(parent) => write!(f, "{}.+", parent),
PermRule::Subtree(parent) => write!(f, "{}.*", parent),
}
}
}
impl Into<String> for PermRule {
fn into(self) -> String {
match self {
PermRule::Base(perm) => perm.into_string(),
PermRule::Children(mut perm) => {
perm.push(Permission::new("+"));
perm.into_string()
}
PermRule::Subtree(mut perm) => {
perm.push(Permission::new("+"));
perm.into_string()
}
}
}
}
impl TryFrom<String> for PermRule {
type Error = &'static str;
fn try_from(mut input: String) -> std::result::Result<Self, Self::Error> {
// Check out specifically the last two chars
let len = input.len();
if len <= 2 {
Err("Input string for PermRule is too short")
} else {
match &input[len - 2..len] {
".+" => {
input.truncate(len - 2);
Ok(PermRule::Children(PermissionBuf::from_string_unchecked(
input,
)))
}
".*" => {
input.truncate(len - 2);
Ok(PermRule::Subtree(PermissionBuf::from_string_unchecked(
input,
)))
}
_ => Ok(PermRule::Base(PermissionBuf::from_string_unchecked(input))),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn permission_ord_test() {
assert!(
PermissionBuf::from_string_unchecked("bffh.perm".to_string())
> PermissionBuf::from_string_unchecked("bffh.perm.sub".to_string())
);
}
#[test]
fn permission_simple_check_test() {
let perm = PermissionBuf::from_string_unchecked("test.perm".to_string());
let rule = PermRule::Base(perm.clone());
assert!(rule.match_perm(&perm));
}
#[test]
fn permission_children_checks_only_children() {
let perm = PermissionBuf::from_string_unchecked("test.perm".to_string());
let rule = PermRule::Children(perm.clone());
assert_eq!(rule.match_perm(&perm), false);
let perm2 = PermissionBuf::from_string_unchecked("test.perm.child".to_string());
let perm3 = PermissionBuf::from_string_unchecked("test.perm.child.deeper".to_string());
assert!(rule.match_perm(&perm2));
assert!(rule.match_perm(&perm3));
}
#[test]
fn permission_subtree_checks_base() {
let perm = PermissionBuf::from_string_unchecked("test.perm".to_string());
let rule = PermRule::Subtree(perm.clone());
assert!(rule.match_perm(&perm));
let perm2 = PermissionBuf::from_string_unchecked("test.perm.child".to_string());
let perm3 = PermissionBuf::from_string_unchecked("test.perm.child.deeper".to_string());
assert!(rule.match_perm(&perm2));
assert!(rule.match_perm(&perm3));
}
#[test]
fn rules_from_string_test() {
assert_eq!(
PermRule::Base(PermissionBuf::from_string_unchecked(
"bffh.perm".to_string()
)),
PermRule::try_from("bffh.perm".to_string()).unwrap()
);
assert_eq!(
PermRule::Children(PermissionBuf::from_string_unchecked(
"bffh.perm".to_string()
)),
PermRule::try_from("bffh.perm.+".to_string()).unwrap()
);
assert_eq!(
PermRule::Subtree(PermissionBuf::from_string_unchecked(
"bffh.perm".to_string()
)),
PermRule::try_from("bffh.perm.*".to_string()).unwrap()
);
}
#[test]
fn rules_from_string_edgecases_test() {
assert!(PermRule::try_from("*".to_string()).is_err());
assert!(PermRule::try_from("+".to_string()).is_err());
}
}

View File

@ -0,0 +1,173 @@
use crate::authorization::permissions::{PermRule, Permission};
use crate::users::db::UserData;
use once_cell::sync::OnceCell;
use std::collections::{HashMap, HashSet};
use std::fmt;
static ROLES: OnceCell<HashMap<String, Role>> = OnceCell::new();
#[derive(Copy, Clone)]
pub struct Roles {
roles: &'static HashMap<String, Role>,
}
impl Roles {
pub fn new(roles: HashMap<String, Role>) -> Self {
let span = tracing::debug_span!("roles", "Creating Roles handle");
let _guard = span.enter();
let this = ROLES.get_or_init(|| {
tracing::debug!("Initializing global roles…");
roles
});
Self { roles: this }
}
pub fn get(self, roleid: &str) -> Option<&Role> {
self.roles.get(roleid)
}
pub fn list(&self) -> impl Iterator<Item = &String> {
self.roles.keys()
}
/// Tally a role dependency tree into a set
///
/// A Default implementation exists which adapter may overwrite with more efficient
/// implementations.
fn tally_role(&self, roles: &mut HashMap<String, Role>, role_id: &String) {
if let Some(role) = self.get(role_id) {
// Only check and tally parents of a role at the role itself if it's the first time we
// see it
if !roles.contains_key(role_id) {
for parent in role.parents.iter() {
self.tally_role(roles, parent);
}
roles.insert(role_id.clone(), role.clone());
}
}
}
fn collect_permrules(&self, user: &UserData) -> Vec<PermRule> {
let mut roleset = HashMap::new();
for role_id in user.roles.iter() {
self.tally_role(&mut roleset, role_id);
}
let mut output = Vec::new();
// Iter all unique role->permissions we've found and early return on match.
for (_roleid, role) in roleset.iter() {
output.extend(role.permissions.iter().cloned())
}
output
}
fn permitted_tally(
&self,
roles: &mut HashSet<String>,
role_id: &String,
perm: &Permission,
) -> bool {
let _guard = tracing::debug_span!("tally", %role_id, perm=perm.as_str());
if let Some(role) = self.get(role_id) {
// Only check and tally parents of a role at the role itself if it's the first time we
// see it
if !roles.contains(role_id) {
for perm_rule in role.permissions.iter() {
if perm_rule.match_perm(perm) {
tracing::debug!("Permission granted by direct role");
return true;
}
}
for parent in role.parents.iter() {
if self.permitted_tally(roles, parent, perm) {
tracing::debug!(%parent, "Permission granted by parent role");
return true;
}
}
roles.insert(role_id.clone());
}
}
tracing::trace!(%role_id, "Permission not granted by role");
false
}
pub fn is_permitted(&self, user: &UserData, perm: impl AsRef<Permission>) -> bool {
let perm = perm.as_ref();
tracing::debug!(perm = perm.as_str(), "Checking permission");
let mut seen = HashSet::new();
for role_id in user.roles.iter() {
if self.permitted_tally(&mut seen, role_id, perm.as_ref()) {
return true;
}
}
false
}
}
/// A "Role" from the Authorization perspective
///
/// You can think of a role as a bundle of permissions relating to other roles. In most cases a
/// role represents a real-world education or apprenticeship, which gives a person the education
/// necessary to use a machine safely.
/// Roles are assigned permissions which in most cases evaluate to granting a person the right to
/// use certain (potentially) dangerous machines.
/// Using this indirection makes administration easier in certain ways; instead of maintaining
/// permissions on users directly the user is given a role after having been educated on the safety
/// of a machine; if later on a similar enough machine is put to use the administrator can just add
/// the permission for that machine to an already existing role instead of manually having to
/// assign to all users.
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct Role {
// If a role doesn't define parents, default to an empty Vec.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
/// A Role can have parents, inheriting all permissions
///
/// This makes situations where different levels of access are required easier: Each higher
/// level of access sets the lower levels of access as parent, inheriting their permission; if
/// you are allowed to manage a machine you are then also allowed to use it and so on
parents: Vec<String>,
// If a role doesn't define permissions, default to an empty Vec.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
permissions: Vec<PermRule>,
}
impl Role {
pub fn new(parents: Vec<String>, permissions: Vec<PermRule>) -> Self {
Self {
parents,
permissions,
}
}
}
impl fmt::Display for Role {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "parents:")?;
if self.parents.is_empty() {
writeln!(f, " []")?;
} else {
writeln!(f, "")?;
for p in self.parents.iter() {
writeln!(f, " - {}", p)?;
}
}
write!(f, "permissions:")?;
if self.permissions.is_empty() {
writeln!(f, " []")?;
} else {
writeln!(f, "")?;
for p in self.permissions.iter() {
writeln!(f, " - {}", p)?;
}
}
Ok(())
}
}

View File

@ -0,0 +1,198 @@
use capnp::capability::Promise;
use capnp::Error;
use capnp_rpc::pry;
use rsasl::mechname::Mechname;
use rsasl::prelude::State as SaslState;
use rsasl::prelude::{MessageSent, Session};
use std::fmt;
use std::fmt::{Formatter, Write};
use tracing::Span;
use crate::authentication::V;
use crate::capnp::session::APISession;
use crate::session::SessionManager;
use api::authenticationsystem_capnp::authentication::{
AbortParams, AbortResults, Server as AuthenticationSystem, StepParams, StepResults,
};
use api::authenticationsystem_capnp::{response, response::Error as ErrorCode};
const TARGET: &str = "bffh::api::authenticationsystem";
pub struct Authentication {
span: Span,
state: State,
}
impl Authentication {
pub fn new(
parent: &Span,
mechanism: &Mechname, /* TODO: this is stored in session as well, get it out of there. */
session: Session<V>,
sessionmanager: SessionManager,
) -> Self {
let span = tracing::info_span!(
target: TARGET,
parent: parent,
"Authentication",
mechanism = mechanism.as_str()
);
tracing::trace!(
target: TARGET,
parent: &span,
"constructing valid authentication system"
);
Self {
span,
state: State::Running(session, sessionmanager),
}
}
pub fn invalid_mechanism() -> Self {
let span = tracing::info_span!(target: TARGET, "Authentication",);
tracing::trace!(
target: TARGET,
parent: &span,
"constructing invalid mechanism authentication system"
);
Self {
span,
state: State::InvalidMechanism,
}
}
fn build_error(&self, response: response::Builder) {
if let State::Running(_, _) = self.state {
return;
}
let mut builder = response.init_failed();
match self.state {
State::InvalidMechanism => builder.set_code(ErrorCode::BadMechanism),
State::Finished => builder.set_code(ErrorCode::Aborted),
State::Aborted => builder.set_code(ErrorCode::Aborted),
_ => unreachable!(),
}
}
}
impl fmt::Display for Authentication {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_str("Authentication(")?;
match &self.state {
State::InvalidMechanism => f.write_str("invalid mechanism")?,
State::Finished => f.write_str("finished")?,
State::Aborted => f.write_str("aborted")?,
State::Running(_, _) => f.write_str("running")?,
}
f.write_char(')')
}
}
enum State {
InvalidMechanism,
Finished,
Aborted,
Running(Session<V>, SessionManager),
}
impl AuthenticationSystem for Authentication {
fn step(&mut self, params: StepParams, mut results: StepResults) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "step",).entered();
tracing::trace!(params.data = "<authentication data>", "method call");
#[repr(transparent)]
struct Response {
union_field: &'static str,
}
impl fmt::Display for Response {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_str("Response(")?;
f.write_str(self.union_field)?;
f.write_char(')')
}
}
let response;
let mut builder = results.get();
if let State::Running(mut session, manager) =
std::mem::replace(&mut self.state, State::Aborted)
{
let data: &[u8] = pry!(pry!(params.get()).get_data());
let mut out = Vec::new();
match session.step(Some(data), &mut out) {
Ok(SaslState::Finished(sent)) => {
self.state = State::Finished;
if let Some(user) = session.validation() {
let session = manager.open(&self.span, user);
response = Response {
union_field: "successful",
};
let mut builder = builder.init_successful();
if sent == MessageSent::Yes {
builder.set_additional_data(out.as_slice());
}
APISession::build(session, builder)
} else {
let mut builder = builder.init_failed();
builder.set_code(ErrorCode::InvalidCredentials);
response = Response {
union_field: "error",
};
}
}
Ok(SaslState::Running) => {
self.state = State::Running(session, manager);
builder.set_challenge(out.as_slice());
response = Response {
union_field: "challenge",
};
}
Err(_) => {
self.state = State::Aborted;
self.build_error(builder);
response = Response {
union_field: "error",
};
}
}
} else {
self.build_error(builder);
response = Response {
union_field: "error",
};
}
tracing::trace!(
results = %response,
"method return"
);
Promise::ok(())
}
fn abort(&mut self, _: AbortParams, _: AbortResults) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: TARGET,
parent: &self.span,
"abort",
)
.entered();
tracing::trace!("method call");
self.state = State::Aborted;
tracing::trace!("method return");
Promise::ok(())
}
}

63
bffhd/capnp/config.rs Normal file
View File

@ -0,0 +1,63 @@
use std::fmt::Formatter;
use std::net::ToSocketAddrs;
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use crate::config::deser_option;
#[derive(Debug, Clone, Serialize, Deserialize)]
/// API Socket Configuration block.
///
/// One configuration block can result in several sockets if the given `address` resolves to more
/// than one SocketAddr. BFFH will attempt to bind to all of them.
pub struct Listen {
pub address: String,
#[serde(
default,
skip_serializing_if = "Option::is_none",
deserialize_with = "deser_option"
)]
pub port: Option<u16>,
}
impl Listen {
pub fn to_tuple(&self) -> (&str, u16) {
(self.address.as_str(), self.port.unwrap_or(DEFAULT_PORT))
}
}
impl std::fmt::Display for Listen {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}:{}", &self.address, self.port.unwrap_or(DEFAULT_PORT))
}
}
impl ToSocketAddrs for Listen {
type Iter = <(String, u16) as ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
if let Some(port) = self.port {
(self.address.as_str(), port).to_socket_addrs()
} else {
(self.address.as_str(), DEFAULT_PORT).to_socket_addrs()
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct TlsListen {
pub certfile: PathBuf,
pub keyfile: PathBuf,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ciphers: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tls_min_version: Option<String>,
#[serde(default = "Vec::new", skip_serializing_if = "Vec::is_empty")]
pub protocols: Vec<String>,
}
// The default port in the non-assignable i.e. free-use area
pub const DEFAULT_PORT: u16 = 59661;

171
bffhd/capnp/connection.rs Normal file
View File

@ -0,0 +1,171 @@
use api::connection_capnp::bootstrap;
pub use api::connection_capnp::bootstrap::Client;
use std::fmt;
use std::fmt::{Formatter, Write};
use std::net::SocketAddr;
use crate::authentication::AuthenticationHandle;
use crate::capnp::authenticationsystem::Authentication;
use crate::session::SessionManager;
use capnp::capability::Promise;
use capnp_rpc::pry;
use rsasl::mechname::Mechname;
use tracing::Span;
/// Cap'n Proto API Handler
pub struct BootCap {
peer_addr: SocketAddr,
authentication: AuthenticationHandle,
sessionmanager: SessionManager,
span: Span,
}
impl BootCap {
pub fn new(
peer_addr: SocketAddr,
authentication: AuthenticationHandle,
sessionmanager: SessionManager,
span: Span,
) -> Self {
Self {
peer_addr,
authentication,
sessionmanager,
span,
}
}
}
impl bootstrap::Server for BootCap {
fn get_a_p_i_version(
&mut self,
_: bootstrap::GetAPIVersionParams,
_: bootstrap::GetAPIVersionResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: "bffh::api",
"Bootstrap",
method = "getAPIVersion",
)
.entered();
tracing::trace!("method call");
Promise::ok(())
}
fn get_server_release(
&mut self,
_: bootstrap::GetServerReleaseParams,
mut result: bootstrap::GetServerReleaseResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: "bffh::api",
"Bootstrap",
method = "getServerRelease",
)
.entered();
tracing::trace!("method call");
let mut builder = result.get();
builder.set_name("bffhd");
builder.set_release(crate::env::VERSION);
tracing::trace!(
results.name = "bffhd",
results.release = crate::env::VERSION,
"method return"
);
Promise::ok(())
}
fn mechanisms(
&mut self,
_params: bootstrap::MechanismsParams,
mut result: bootstrap::MechanismsResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: "bffh::api",
"mechanisms",
)
.entered();
tracing::trace!(target: "bffh::api", "method call");
let builder = result.get();
let mechs: Vec<_> = self
.authentication
.sess()
.get_available()
.into_iter()
.map(|m| m.mechanism.as_str())
.collect();
let mut mechbuilder = builder.init_mechs(mechs.len() as u32);
for (i, m) in mechs.iter().enumerate() {
mechbuilder.set(i as u32, m);
}
struct DisMechs<'a>(Vec<&'a str>);
impl fmt::Display for DisMechs<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_char('[')?;
let mut first = true;
for mechanism in self.0.iter() {
if first {
first = false;
f.write_str(mechanism)?;
} else {
f.write_str(" ,")?;
f.write_str(mechanism)?;
}
}
f.write_char(']')?;
Ok(())
}
}
tracing::trace!(
results.mechs = %DisMechs(mechs),
"method return"
);
Promise::ok(())
}
fn create_session(
&mut self,
params: bootstrap::CreateSessionParams,
mut result: bootstrap::CreateSessionResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: "bffh::api",
"createSession",
)
.entered();
let params = pry!(params.get());
let mechanism: &str = pry!(params.get_mechanism());
tracing::trace!(params.mechanism = mechanism, "method call");
let mechname = Mechname::parse(mechanism.as_bytes());
let auth = if let Ok(mechname) = mechname {
if let Ok(session) = self.authentication.start(mechname) {
Authentication::new(&self.span, mechname, session, self.sessionmanager.clone())
} else {
Authentication::invalid_mechanism()
}
} else {
Authentication::invalid_mechanism()
};
tracing::trace!(
results.authentication = %auth,
"method return"
);
let mut builder = result.get();
builder.set_authentication(capnp_rpc::new_client(auth));
Promise::ok(())
}
}

374
bffhd/capnp/machine.rs Normal file
View File

@ -0,0 +1,374 @@
use crate::capnp::user::User;
use crate::resources::modules::fabaccess::{ArchivedStatus, Status};
use crate::resources::Resource;
use crate::session::SessionHandle;
use api::general_capnp::optional;
use api::machine_capnp::machine::{
self, admin, admin::Server as AdminServer, check, check::Server as CheckServer,
in_use as inuse, in_use::Server as InUseServer, info, info::Server as InfoServer, manage,
manage::Server as ManageServer, use_, use_::Server as UseServer, MachineState,
};
use capnp::capability::Promise;
use capnp_rpc::pry;
#[derive(Clone)]
pub struct Machine {
session: SessionHandle,
resource: Resource,
}
impl Machine {
pub fn new(session: SessionHandle, resource: Resource) -> Self {
Self { session, resource }
}
pub fn build_into(self, mut builder: machine::Builder) {
builder.set_id(self.resource.get_id());
builder.set_name(self.resource.get_name());
if let Some(ref desc) = self.resource.get_description().description {
builder.set_description(desc);
}
if let Some(ref wiki) = self.resource.get_description().wiki {
builder.set_wiki(wiki);
}
if let Some(ref category) = self.resource.get_description().category {
builder.set_category(category);
}
builder.set_urn(&format!(
"urn:fabaccess:resource:{}",
self.resource.get_id()
));
{
let user = self.session.get_user_ref();
let state = self.resource.get_state_ref();
let state = state.as_ref();
if self.session.has_write(&self.resource)
&& match &state.inner.state {
ArchivedStatus::Free => true,
ArchivedStatus::Reserved(reserver) if reserver == &user => true,
_ => false,
}
{
builder.set_use(capnp_rpc::new_client(self.clone()));
}
if self.session.has_manage(&self.resource) {
builder.set_manage(capnp_rpc::new_client(self.clone()));
}
// TODO: admin perm
let s = match &state.inner.state {
ArchivedStatus::Free => MachineState::Free,
ArchivedStatus::Disabled => MachineState::Disabled,
ArchivedStatus::Blocked(_) => MachineState::Blocked,
ArchivedStatus::InUse(owner) => {
if owner == &user {
builder.set_inuse(capnp_rpc::new_client(self.clone()));
}
MachineState::InUse
}
ArchivedStatus::Reserved(_) => MachineState::Reserved,
ArchivedStatus::ToCheck(_) => MachineState::ToCheck,
};
if self.session.has_read(&self.resource) {
builder.set_state(s);
}
}
builder.set_info(capnp_rpc::new_client(self));
}
/// Builds a machine into the given builder. Re
pub fn build(session: SessionHandle, resource: Resource, builder: machine::Builder) {
let this = Self::new(session.clone(), resource.clone());
this.build_into(builder)
}
pub fn optional_build(
session: SessionHandle,
resource: Resource,
builder: optional::Builder<machine::Owned>,
) {
let this = Self::new(session.clone(), resource.clone());
if this.resource.visible(&session) || session.has_read(&resource) {
let builder = builder.init_just();
this.build_into(builder);
}
}
}
impl InfoServer for Machine {
fn get_property_list(
&mut self,
_: info::GetPropertyListParams,
_: info::GetPropertyListResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn get_reservation_list(
&mut self,
_: info::GetReservationListParams,
_: info::GetReservationListResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
}
impl UseServer for Machine {
fn use_(&mut self, _: use_::UseParams, _: use_::UseResults) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
let session = self.session.clone();
Promise::from_future(async move {
let user = session.get_user_ref();
resource.try_update(session, Status::InUse(user)).await;
Ok(())
})
}
fn reserve(
&mut self,
_: use_::ReserveParams,
_: use_::ReserveResults,
) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
let session = self.session.clone();
Promise::from_future(async move {
let user = session.get_user_ref();
resource.try_update(session, Status::Reserved(user)).await;
Ok(())
})
}
fn reserveto(
&mut self,
_: use_::ReservetoParams,
_: use_::ReservetoResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
}
impl InUseServer for Machine {
fn give_back(
&mut self,
_: inuse::GiveBackParams,
_: inuse::GiveBackResults,
) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
let session = self.session.clone();
Promise::from_future(async move {
resource.give_back(session.clone()).await;
Ok(())
})
}
fn send_raw_data(
&mut self,
_: inuse::SendRawDataParams,
_: inuse::SendRawDataResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
}
impl CheckServer for Machine {
fn check(
&mut self,
_: check::CheckParams,
_: check::CheckResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn reject(
&mut self,
_: check::RejectParams,
_: check::RejectResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
}
impl ManageServer for Machine {
fn get_machine_info_extended(
&mut self,
_: manage::GetMachineInfoExtendedParams,
mut result: manage::GetMachineInfoExtendedResults,
) -> Promise<(), ::capnp::Error> {
let mut builder = result.get();
User::build_optional(
&self.session,
self.resource.get_current_user(),
builder.reborrow().init_current_user(),
);
User::build_optional(
&self.session,
self.resource.get_previous_user(),
builder.init_last_user(),
);
Promise::ok(())
}
fn set_property(
&mut self,
_: manage::SetPropertyParams,
_: manage::SetPropertyResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn remove_property(
&mut self,
_: manage::RemovePropertyParams,
_: manage::RemovePropertyResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn force_use(
&mut self,
_: manage::ForceUseParams,
_: manage::ForceUseResults,
) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
let session = self.session.clone();
Promise::from_future(async move {
resource
.force_set(Status::InUse(session.get_user_ref()))
.await;
Ok(())
})
}
fn force_free(
&mut self,
_: manage::ForceFreeParams,
_: manage::ForceFreeResults,
) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
let _session = self.session.clone();
Promise::from_future(async move {
resource.force_set(Status::Free).await;
Ok(())
})
}
fn force_transfer(
&mut self,
_: manage::ForceTransferParams,
_: manage::ForceTransferResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn block(
&mut self,
_: manage::BlockParams,
_: manage::BlockResults,
) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
let session = self.session.clone();
Promise::from_future(async move {
resource
.force_set(Status::Blocked(session.get_user_ref()))
.await;
Ok(())
})
}
fn disabled(
&mut self,
_: manage::DisabledParams,
_: manage::DisabledResults,
) -> Promise<(), ::capnp::Error> {
let resource = self.resource.clone();
Promise::from_future(async move {
resource.force_set(Status::Disabled).await;
Ok(())
})
}
}
impl AdminServer for Machine {
fn force_set_state(
&mut self,
params: admin::ForceSetStateParams,
_: admin::ForceSetStateResults,
) -> Promise<(), ::capnp::Error> {
use api::schema::machine_capnp::machine::MachineState as APIMState;
let user = self.session.get_user_ref();
let state = match pry!(pry!(params.get()).get_state()) {
APIMState::Free => Status::Free,
APIMState::Blocked => Status::Blocked(user),
APIMState::Disabled => Status::Disabled,
APIMState::InUse => Status::InUse(user),
APIMState::Reserved => Status::Reserved(user),
APIMState::ToCheck => Status::ToCheck(user),
APIMState::Totakeover => {
return Promise::err(::capnp::Error::unimplemented(
"totakeover not implemented".to_string(),
))
}
};
let resource = self.resource.clone();
Promise::from_future(async move {
resource.force_set(state).await;
Ok(())
})
}
fn force_set_user(
&mut self,
_: admin::ForceSetUserParams,
_: admin::ForceSetUserResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn get_admin_property_list(
&mut self,
_: admin::GetAdminPropertyListParams,
_: admin::GetAdminPropertyListResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn set_admin_property(
&mut self,
_: admin::SetAdminPropertyParams,
_: admin::SetAdminPropertyResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn remove_admin_property(
&mut self,
_: admin::RemoveAdminPropertyParams,
_: admin::RemoveAdminPropertyResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
}

View File

@ -0,0 +1,133 @@
use crate::capnp::machine::Machine;
use crate::resources::search::ResourcesHandle;
use crate::resources::Resource;
use crate::session::SessionHandle;
use crate::RESOURCES;
use api::machinesystem_capnp::machine_system::info;
use capnp::capability::Promise;
use capnp_rpc::pry;
use tracing::Span;
const TARGET: &str = "bffh::api::machinesystem";
#[derive(Clone)]
pub struct Machines {
span: Span,
session: SessionHandle,
resources: ResourcesHandle,
}
impl Machines {
pub fn new(session: SessionHandle) -> Self {
let span = tracing::info_span!(
target: TARGET,
parent: &session.span,
"MachineSystem",
);
// FIXME no unwrap bad
Self {
span,
session,
resources: RESOURCES.get().unwrap().clone(),
}
}
}
impl info::Server for Machines {
fn get_machine_list(
&mut self,
_: info::GetMachineListParams,
mut result: info::GetMachineListResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: TARGET,
parent: &self.span,
"getMachineList",
)
.entered();
tracing::trace!("method call");
let machine_list: Vec<(usize, &Resource)> = self
.resources
.list_all()
.into_iter()
.filter(|resource| resource.visible(&self.session))
.enumerate()
.collect();
let mut builder = result.get().init_machine_list(machine_list.len() as u32);
for (i, m) in machine_list {
let resource = m.clone();
let mbuilder = builder.reborrow().get(i as u32);
Machine::build(self.session.clone(), resource, mbuilder);
}
// TODO: indicate result?
tracing::trace!("method return");
Promise::ok(())
}
fn get_machine(
&mut self,
params: info::GetMachineParams,
mut result: info::GetMachineResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: TARGET,
parent: &self.span,
"getMachine",
)
.entered();
let params = pry!(params.get());
let id = pry!(params.get_id());
tracing::trace!(params.id = id, "method call");
if let Some(resource) = self.resources.get_by_id(id) {
tracing::trace!(results = "Just", results.inner = id, "method return");
let builder = result.get();
Machine::optional_build(self.session.clone(), resource.clone(), builder);
} else {
tracing::trace!(results = "Nothing", "method return");
}
Promise::ok(())
}
fn get_machine_u_r_n(
&mut self,
params: info::GetMachineURNParams,
mut result: info::GetMachineURNResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(
target: TARGET,
parent: &self.span,
"getMachineURN",
)
.entered();
let params = pry!(params.get());
let urn = pry!(params.get_urn());
tracing::trace!(params.urn = urn, "method call");
if let Some(resource) = self.resources.get_by_urn(urn) {
tracing::trace!(
results = "Just",
results.inner = resource.get_id(),
"method return"
);
let builder = result.get();
Machine::optional_build(self.session.clone(), resource.clone(), builder);
} else {
tracing::trace!(results = "Nothing", "method return");
}
Promise::ok(())
}
}

208
bffhd/capnp/mod.rs Normal file
View File

@ -0,0 +1,208 @@
use miette::Diagnostic;
use thiserror::Error;
use async_net::TcpListener;
use capnp_rpc::rpc_twoparty_capnp::Side;
use capnp_rpc::twoparty::VatNetwork;
use capnp_rpc::RpcSystem;
use executor::prelude::{Executor, SupervisionRegistry};
use futures_rustls::server::TlsStream;
use futures_rustls::TlsAcceptor;
use futures_util::stream::FuturesUnordered;
use futures_util::{stream, AsyncRead, AsyncWrite, StreamExt};
use std::future::Future;
use std::io;
use std::net::{IpAddr, SocketAddr};
use crate::authentication::AuthenticationHandle;
use crate::session::SessionManager;
mod config;
pub use config::{Listen, TlsListen};
mod authenticationsystem;
mod connection;
mod machine;
mod machinesystem;
mod permissionsystem;
mod session;
mod user;
mod user_system;
pub struct APIServer {
executor: Executor<'static>,
sockets: Vec<TcpListener>,
acceptor: TlsAcceptor,
sessionmanager: SessionManager,
authentication: AuthenticationHandle,
}
#[derive(Debug, Error, Diagnostic)]
#[error("Reached Void error, this should not be possible")]
pub enum Error {}
impl APIServer {
pub fn new(
executor: Executor<'static>,
sockets: Vec<TcpListener>,
acceptor: TlsAcceptor,
sessionmanager: SessionManager,
authentication: AuthenticationHandle,
) -> Self {
Self {
executor,
sockets,
acceptor,
sessionmanager,
authentication,
}
}
pub async fn bind(
executor: Executor<'static>,
listens: impl IntoIterator<Item = &Listen>,
acceptor: TlsAcceptor,
sessionmanager: SessionManager,
authentication: AuthenticationHandle,
) -> Result<Self, Error> {
let span = tracing::info_span!("binding API listen sockets");
let _guard = span.enter();
let sockets = FuturesUnordered::new();
listens
.into_iter()
.map(|a| async move { (async_net::resolve(a.to_tuple()).await, a) })
.collect::<FuturesUnordered<_>>()
.filter_map(|(res, addr)| async move {
match res {
Ok(a) => Some(a),
Err(e) => {
tracing::error!("Failed to resolve {:?}: {}", addr, e);
None
}
}
})
.for_each(|addrs| async {
for addr in addrs {
sockets.push(async move { (TcpListener::bind(addr).await, addr) })
}
})
.await;
let sockets: Vec<TcpListener> = sockets
.filter_map(|(res, addr)| async move {
match res {
Ok(s) => {
tracing::info!("Opened listen socket on {}", addr);
Some(s)
}
Err(e) => {
tracing::error!("Failed to open socket on {}: {}", addr, e);
None
}
}
})
.collect()
.await;
tracing::info!("listening on {:?}", sockets);
if sockets.is_empty() {
tracing::warn!("No usable listen addresses configured for the API server!");
}
Ok(Self::new(
executor,
sockets,
acceptor,
sessionmanager,
authentication,
))
}
pub async fn handle_until(self, stop: impl Future) {
stream::select_all(
self.sockets
.iter()
.map(|tcplistener| tcplistener.incoming()),
)
.take_until(stop)
.for_each(|stream| async {
match stream {
Ok(stream) => {
if let Ok(peer_addr) = stream.peer_addr() {
self.handle(peer_addr, self.acceptor.accept(stream))
} else {
tracing::error!(?stream, "failing a TCP connection with no peer addr");
}
}
Err(e) => tracing::warn!("Failed to accept stream: {}", e),
}
})
.await;
tracing::info!("closing down API handler");
}
fn handle<IO: 'static + Unpin + AsyncRead + AsyncWrite>(
&self,
peer_addr: SocketAddr,
stream: impl Future<Output = io::Result<TlsStream<IO>>>,
) {
let span = tracing::trace_span!("api.handle");
let _guard = span.enter();
struct Peer {
ip: IpAddr,
port: u16,
}
let peer = Peer {
ip: peer_addr.ip(),
port: peer_addr.port(),
};
tracing::debug!(
%peer.ip,
peer.port,
"spawning api handler"
);
let connection_span = tracing::info_span!(
target: "bffh::api",
"connection",
%peer.ip,
peer.port,
);
let f = async move {
tracing::trace!(parent: &connection_span, "starting tls exchange");
let stream = match stream.await {
Ok(stream) => stream,
Err(error) => {
tracing::error!(parent: &connection_span, %error, "TLS handshake failed");
return;
}
};
let (rx, tx) = futures_lite::io::split(stream);
let vat = VatNetwork::new(rx, tx, Side::Server, Default::default());
let bootstrap: connection::Client = capnp_rpc::new_client(connection::BootCap::new(
peer_addr,
self.authentication.clone(),
self.sessionmanager.clone(),
connection_span.clone(),
));
if let Err(error) = RpcSystem::new(Box::new(vat), Some(bootstrap.client)).await {
tracing::error!(
parent: &connection_span,
%error,
"error occured during rpc handling",
);
}
};
let cgroup = SupervisionRegistry::with(SupervisionRegistry::new_group);
self.executor.spawn_local_cgroup(f, cgroup);
}
}

View File

@ -0,0 +1,48 @@
use crate::Roles;
use api::permissionsystem_capnp::permission_system::info::{
GetRoleListParams, GetRoleListResults, Server as PermissionSystem,
};
use capnp::capability::Promise;
use capnp::Error;
use tracing::Span;
use crate::session::SessionHandle;
const TARGET: &str = "bffh::api::permissionsystem";
pub struct Permissions {
span: Span,
roles: Roles,
}
impl Permissions {
pub fn new(session: SessionHandle) -> Self {
let span = tracing::info_span!(target: TARGET, "PermissionSystem",);
Self {
span,
roles: session.roles,
}
}
}
impl PermissionSystem for Permissions {
fn get_role_list(
&mut self,
_: GetRoleListParams,
mut results: GetRoleListResults,
) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "getRoleList",).entered();
tracing::trace!("method call");
let roles = self.roles.list().collect::<Vec<&String>>();
let builder = results.get();
let mut b = builder.init_role_list(roles.len() as u32);
for (i, role) in roles.into_iter().enumerate() {
let mut role_builder = b.reborrow().get(i as u32);
role_builder.set_name(role);
}
tracing::trace!("method return");
Promise::ok(())
}
}

40
bffhd/capnp/session.rs Normal file
View File

@ -0,0 +1,40 @@
use crate::authorization::permissions::Permission;
use api::authenticationsystem_capnp::response::successful::Builder;
use crate::capnp::machinesystem::Machines;
use crate::capnp::permissionsystem::Permissions;
use crate::capnp::user_system::Users;
use crate::session::SessionHandle;
#[derive(Debug, Clone)]
pub struct APISession;
impl APISession {
pub fn new() -> Self {
Self
}
pub fn build(session: SessionHandle, builder: Builder) {
let mut builder = builder.init_session();
{
let mut b = builder.reborrow().init_machine_system();
b.set_info(capnp_rpc::new_client(Machines::new(session.clone())));
}
{
let mut b = builder.reborrow().init_user_system();
let u = Users::new(session.clone());
if session.has_perm(Permission::new("bffh.users.manage")) {
b.set_manage(capnp_rpc::new_client(u.clone()));
b.set_search(capnp_rpc::new_client(u.clone()));
}
b.set_info(capnp_rpc::new_client(u));
}
{
let mut b = builder.init_permission_system();
b.set_info(capnp_rpc::new_client(Permissions::new(session)));
}
}
}

402
bffhd/capnp/user.rs Normal file
View File

@ -0,0 +1,402 @@
use crate::authorization::permissions::Permission;
use crate::session::SessionHandle;
use crate::users::{db, UserRef};
use crate::CONFIG;
use api::general_capnp::optional;
use api::user_capnp::user::card_d_e_s_fire_e_v2::{
BindParams, BindResults, GenCardTokenParams, GenCardTokenResults, GetMetaInfoParams,
GetMetaInfoResults, GetSpaceInfoParams, GetSpaceInfoResults, GetTokenListParams,
GetTokenListResults, UnbindParams, UnbindResults,
};
use api::user_capnp::user::{self, admin, card_d_e_s_fire_e_v2, info, manage};
use capnp::capability::Promise;
use capnp::Error;
use capnp_rpc::pry;
use std::borrow::Cow;
use std::io::Write;
use uuid::Uuid;
const TARGET: &str = "bffh::api::user";
#[derive(Clone)]
pub struct User {
span: tracing::Span,
session: SessionHandle,
user: UserRef,
}
impl User {
pub fn new(session: SessionHandle, user: UserRef) -> Self {
let span = tracing::info_span!(target: TARGET, "User");
Self {
span,
session,
user,
}
}
pub fn new_self(session: SessionHandle) -> Self {
let user = session.get_user_ref();
Self::new(session, user)
}
pub fn build_optional(
session: &SessionHandle,
user: Option<UserRef>,
builder: optional::Builder<user::Owned>,
) {
if let Some(user) = user.and_then(|u| session.users.get_user(u.get_username())) {
let builder = builder.init_just();
Self::fill(&session, user, builder);
}
}
pub fn build(session: SessionHandle, builder: user::Builder) {
let this = Self::new_self(session);
let user = this.session.get_user();
Self::fill(&this.session, user, builder);
}
pub fn fill(session: &SessionHandle, user: db::User, mut builder: user::Builder) {
builder.set_username(user.id.as_str());
// We have permissions on ourself
let is_me = &session.get_user_ref().id == &user.id;
let client = Self::new(session.clone(), UserRef::new(user.id));
if is_me || session.has_perm(Permission::new("bffh.users.info")) {
builder.set_info(capnp_rpc::new_client(client.clone()));
}
if is_me {
builder.set_manage(capnp_rpc::new_client(client.clone()));
}
if session.has_perm(Permission::new("bffh.users.admin")) {
builder.set_admin(capnp_rpc::new_client(client.clone()));
builder.set_card_d_e_s_fire_e_v2(capnp_rpc::new_client(client));
}
}
}
impl info::Server for User {
fn list_roles(
&mut self,
_: info::ListRolesParams,
mut result: info::ListRolesResults,
) -> Promise<(), ::capnp::Error> {
if let Some(user) = self.session.users.get_user(self.user.get_username()) {
let mut builder = result.get().init_roles(user.userdata.roles.len() as u32);
for (i, role) in user.userdata.roles.into_iter().enumerate() {
let mut b = builder.reborrow().get(i as u32);
b.set_name(role.as_str());
}
}
Promise::ok(())
}
}
impl manage::Server for User {
fn pwd(
&mut self,
params: manage::PwdParams,
_results: manage::PwdResults,
) -> Promise<(), ::capnp::Error> {
let params = pry!(params.get());
let old_pw = pry!(params.get_old_pwd());
let new_pw = pry!(params.get_new_pwd());
let uid = self.user.get_username();
if let Some(mut user) = self.session.users.get_user(uid) {
if let Ok(true) = user.check_password(old_pw.as_bytes()) {
user.set_pw(new_pw.as_bytes());
pry!(self.session.users.put_user(uid, &user));
}
}
Promise::ok(())
}
}
impl admin::Server for User {
fn get_user_info_extended(
&mut self,
_: admin::GetUserInfoExtendedParams,
_: admin::GetUserInfoExtendedResults,
) -> Promise<(), ::capnp::Error> {
Promise::err(::capnp::Error::unimplemented(
"method not implemented".to_string(),
))
}
fn add_role(
&mut self,
param: admin::AddRoleParams,
_: admin::AddRoleResults,
) -> Promise<(), ::capnp::Error> {
let rolename = pry!(pry!(pry!(param.get()).get_role()).get_name());
if let Some(_role) = self.session.roles.get(rolename) {
let mut target = self
.session
.users
.get_user(self.user.get_username())
.unwrap();
// Only update if needed
if !target.userdata.roles.iter().any(|r| r.as_str() == rolename) {
target.userdata.roles.push(rolename.to_string());
pry!(self.session
.users
.put_user(self.user.get_username(), &target));
}
}
Promise::ok(())
}
fn remove_role(
&mut self,
param: admin::RemoveRoleParams,
_: admin::RemoveRoleResults,
) -> Promise<(), ::capnp::Error> {
let rolename = pry!(pry!(pry!(param.get()).get_role()).get_name());
if let Some(_role) = self.session.roles.get(rolename) {
let mut target = self
.session
.users
.get_user(self.user.get_username())
.unwrap();
// Only update if needed
if target.userdata.roles.iter().any(|r| r.as_str() == rolename) {
target.userdata.roles.retain(|r| r.as_str() != rolename);
pry!(self.session
.users
.put_user(self.user.get_username(), &target));
}
}
Promise::ok(())
}
fn pwd(
&mut self,
param: admin::PwdParams,
_: admin::PwdResults,
) -> Promise<(), ::capnp::Error> {
let new_pw = pry!(pry!(param.get()).get_new_pwd());
let uid = self.user.get_username();
if let Some(mut user) = self.session.users.get_user(uid) {
user.set_pw(new_pw.as_bytes());
pry!(self.session.users.put_user(uid, &user));
}
Promise::ok(())
}
}
impl card_d_e_s_fire_e_v2::Server for User {
fn get_token_list(
&mut self,
_: GetTokenListParams,
mut results: GetTokenListResults,
) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "get_token_list").entered();
tracing::trace!("method call");
// TODO: This only supports a single token per user
let user = pry!(self
.session
.users
.get_user(self.user.get_username())
.ok_or_else(|| Error::failed(format!(
"User API object with nonexisting user \"{}\"",
self.user.get_username()
))));
let tk = user
.userdata
.kv
.get("cardtoken")
.map(|ck| hex::decode(ck).ok())
.flatten()
.unwrap_or_else(|| {
tracing::debug!(user.id = &user.id, "no tokens stored");
Vec::new()
});
if !tk.is_empty() {
let b = results.get();
let mut lb = b.init_token_list(1);
lb.set(0, &tk[..]);
}
Promise::ok(())
}
fn bind(&mut self, params: BindParams, _: BindResults) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "bind").entered();
let params = pry!(params.get());
let card_key = pry!(params.get_auth_key());
let token = pry!(params.get_token());
let token: Cow<'_, str> = if let Ok(url) = std::str::from_utf8(token) {
Cow::Borrowed(url)
} else {
Cow::Owned(hex::encode(token))
};
tracing::trace!(
params.token = token.as_ref(),
params.auth_key = "<censored>",
"method call"
);
let card_key = hex::encode(card_key);
let mut user = pry!(self
.session
.users
.get_user(self.user.get_username())
.ok_or_else(|| Error::failed(format!(
"User API object with nonexisting user \"{}\"",
self.user.get_username()
))));
let prev_token = user.userdata.kv.get("cardtoken");
let prev_cardk = user.userdata.kv.get("cardkey");
match (prev_token, prev_cardk) {
(Some(prev_token), Some(prev_cardk))
if prev_token.as_str() == &token && prev_cardk.as_str() == card_key.as_str() =>
{
tracing::info!(
user.id, token = token.as_ref(),
"new token and card key are identical, skipping no-op"
);
return Promise::ok(());
},
(Some(prev_token), Some(_))
if prev_token.as_str() == token /* above guard means prev_cardk != card_key */ =>
{
tracing::warn!(
token = token.as_ref(),
"trying to overwrite card key for existing token, ignoring!"
);
return Promise::ok(());
},
(Some(prev_token), None) => tracing::warn!(
user.id, prev_token,
"token already set for user but no card key, setting new pair unconditionally!"
),
(None, Some(_)) => tracing::warn!(
user.id,
"card key already set for user but no token, setting new pair unconditionally!"
),
(Some(_), Some(_)) | (None, None) => tracing::debug!(
user.id, token = token.as_ref(),
"Adding new card key/token pair"
),
}
user.userdata
.kv
.insert("cardtoken".to_string(), token.to_string());
user.userdata.kv.insert("cardkey".to_string(), card_key);
pry!(self.session.users.put_user(self.user.get_username(), &user));
Promise::ok(())
}
fn unbind(&mut self, params: UnbindParams, _: UnbindResults) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "unbind").entered();
let params = pry!(params.get());
let token = pry!(params.get_token());
let token: Cow<'_, str> = if let Ok(url) = std::str::from_utf8(token) {
Cow::Borrowed(url)
} else {
Cow::Owned(hex::encode(token))
};
tracing::trace!(params.token = token.as_ref(), "method call");
let mut user = pry!(self
.session
.users
.get_user(self.user.get_username())
.ok_or_else(|| Error::failed(format!(
"User API object with nonexisting user \"{}\"",
self.user.get_username()
))));
if let Some(prev_token) = user.userdata.kv.get("cardtoken") {
if token.as_ref() == prev_token.as_str() {
tracing::debug!(
user.id,
token = token.as_ref(),
"removing card key/token pair"
);
user.userdata.kv.remove("cardtoken");
user.userdata.kv.remove("cardkey");
}
}
pry!(self.session.users.put_user(self.user.get_username(), &user));
Promise::ok(())
}
fn gen_card_token(
&mut self,
_: GenCardTokenParams,
mut results: GenCardTokenResults,
) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "gen_card_token").entered();
tracing::trace!("method call");
results.get().set_token(Uuid::new_v4().as_bytes());
Promise::ok(())
}
fn get_meta_info(
&mut self,
_: GetMetaInfoParams,
mut results: GetMetaInfoResults,
) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "get_meta_info").entered();
tracing::trace!("method call");
results.get().set_bytes(b"FABACCESS\x00DESFIRE\x001.0\x00");
Promise::ok(())
}
fn get_space_info(
&mut self,
_: GetSpaceInfoParams,
mut results: GetSpaceInfoResults,
) -> Promise<(), Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "get_space_info").entered();
tracing::trace!("method call");
let space = if let Some(space) = CONFIG.get().map(|c| c.spacename.as_str()) {
space
} else {
return Promise::err(Error::failed("No space name configured".to_string()));
};
let url = if let Some(url) = CONFIG.get().map(|c| c.instanceurl.as_str()) {
url
} else {
return Promise::err(Error::failed("No instance url configured".to_string()));
};
let mut data = Vec::new();
write!(&mut data, "urn:fabaccess:lab:{space}\x00{url}").unwrap();
results.get().set_bytes(&data);
Promise::ok(())
}
}

158
bffhd/capnp/user_system.rs Normal file
View File

@ -0,0 +1,158 @@
use api::usersystem_capnp::user_system::{info, manage, search};
use capnp::capability::Promise;
use capnp_rpc::pry;
use tracing::Span;
use crate::capnp::user::User;
use crate::session::SessionHandle;
use crate::users::{db, UserRef};
const TARGET: &str = "bffh::api::usersystem";
#[derive(Clone)]
pub struct Users {
span: Span,
session: SessionHandle,
}
impl Users {
pub fn new(session: SessionHandle) -> Self {
let span = tracing::info_span!(target: TARGET, "UserSystem",);
Self { span, session }
}
}
impl info::Server for Users {
fn get_user_self(
&mut self,
_: info::GetUserSelfParams,
mut result: info::GetUserSelfResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "getUserSelf").entered();
tracing::trace!("method call");
let builder = result.get();
User::build(self.session.clone(), builder);
tracing::trace!("method return");
Promise::ok(())
}
}
impl manage::Server for Users {
fn get_user_list(
&mut self,
_: manage::GetUserListParams,
mut result: manage::GetUserListResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "getUserList",).entered();
tracing::trace!("method call");
let userdb = self.session.users.into_inner();
let users = pry!(userdb
.get_all()
.map_err(|e| capnp::Error::failed(format!("UserDB error: {:?}", e))));
let mut builder = result.get().init_user_list(users.len() as u32);
for (i, (id, userdata)) in users.into_iter().enumerate() {
let user = db::User { id, userdata };
User::fill(&self.session, user, builder.reborrow().get(i as u32));
}
tracing::trace!("method return");
Promise::ok(())
}
fn add_user_fallible(
&mut self,
params: manage::AddUserFallibleParams,
mut result: manage::AddUserFallibleResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "addUserFallible").entered();
let params = pry!(params.get());
let username = pry!(params.get_username());
let password = pry!(params.get_password());
// FIXME: saslprep passwords & usernames before storing them
tracing::trace!(
params.username = username,
params.password = "<redacted>",
"method call"
);
let builder = result.get();
if !username.is_empty() && !password.is_empty() {
if self.session.users.get_user(username).is_none() {
let user = db::User::new_with_plain_pw(username, password);
pry!(self.session.users.put_user(username, &user));
let builder = builder.init_successful();
User::fill(&self.session, user, builder);
} else {
let mut builder = builder.init_failed();
builder.set_error(manage::add_user_error::AddUserError::AlreadyExists);
tracing::warn!("Failed to add user: Username taken");
}
} else {
if username.is_empty() {
let mut builder = builder.init_failed();
builder.set_error(manage::add_user_error::AddUserError::UsernameInvalid);
tracing::warn!("Failed to add user: Username empty");
} else if password.is_empty() {
let mut builder = builder.init_failed();
builder.set_error(manage::add_user_error::AddUserError::PasswordInvalid);
tracing::warn!("Failed to add user: Password empty");
}
}
tracing::trace!("method return");
Promise::ok(())
}
fn remove_user(
&mut self,
params: manage::RemoveUserParams,
_: manage::RemoveUserResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "removeUser",).entered();
let who: &str = pry!(pry!(pry!(params.get()).get_user()).get_username());
tracing::trace!(params.user = who, "method call");
if let Err(e) = self.session.users.del_user(who) {
tracing::warn!("Failed to delete user: {:?}", e);
} else {
tracing::info!("Deleted user {}", who);
}
tracing::trace!("method return");
Promise::ok(())
}
}
impl search::Server for Users {
fn get_user_by_name(
&mut self,
params: search::GetUserByNameParams,
mut result: search::GetUserByNameResults,
) -> Promise<(), ::capnp::Error> {
let _guard = self.span.enter();
let _span = tracing::trace_span!(target: TARGET, "getUserByName",).entered();
let username: &str = pry!(pry!(params.get()).get_username());
tracing::trace!(params.username = username, "method call");
let userref = UserRef::new(username.to_string());
User::build_optional(&self.session, Some(userref), result.get());
tracing::trace!("method return");
Promise::ok(())
}
}

172
bffhd/config/dhall.rs Normal file
View File

@ -0,0 +1,172 @@
use std::collections::HashMap;
use std::default::Default;
use std::fmt::Debug;
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
use crate::authorization::permissions::PrivilegesBuf;
use crate::authorization::roles::Role;
use crate::capnp::{Listen, TlsListen};
use crate::logging::LogConfig;
use std::path::Path;
#[derive(Debug)]
struct DhallConfig<'a> {
path: &'a Path,
}
pub fn read_config_file(path: impl AsRef<Path>) -> Result<Config, serde_dhall::Error> {
serde_dhall::from_file(path).parse().map_err(Into::into)
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
/// A description of a machine
///
/// This is the struct that a machine is serialized to/from.
/// Combining this with the actual state of the system will return a machine
pub struct MachineDescription {
/// The name of the machine. Doesn't need to be unique but is what humans will be presented.
pub name: String,
/// An optional description of the Machine.
#[serde(
default,
skip_serializing_if = "Option::is_none",
deserialize_with = "deser_option"
)]
pub description: Option<String>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
deserialize_with = "deser_option"
)]
pub wiki: Option<String>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
deserialize_with = "deser_option"
)]
pub category: Option<String>,
/// The permission required
#[serde(flatten)]
pub privs: PrivilegesBuf,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// A list of address/port pairs to listen on.
pub listens: Vec<Listen>,
/// Machine descriptions to load
pub machines: HashMap<String, MachineDescription>,
/// Actors to load and their configuration options
pub actors: HashMap<String, ModuleConfig>,
/// Initiators to load and their configuration options
pub initiators: HashMap<String, ModuleConfig>,
pub mqtt_url: String,
pub actor_connections: Vec<(String, String)>,
pub init_connections: Vec<(String, String)>,
pub db_path: PathBuf,
pub auditlog_path: PathBuf,
pub roles: HashMap<String, Role>,
#[serde(flatten)]
pub tlsconfig: TlsListen,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tlskeylog: Option<PathBuf>,
#[serde(default, skip)]
pub verbosity: isize,
#[serde(default, skip)]
pub logging: LogConfig,
pub spacename: String,
pub instanceurl: String,
}
impl Config {
pub fn is_quiet(&self) -> bool {
self.verbosity < 0
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ModuleConfig {
pub module: String,
pub params: HashMap<String, String>,
}
pub(crate) fn deser_option<'de, D, T>(d: D) -> std::result::Result<Option<T>, D::Error>
where
D: serde::Deserializer<'de>,
T: serde::Deserialize<'de>,
{
Ok(T::deserialize(d).ok())
}
impl Default for Config {
fn default() -> Self {
let mut actors: HashMap<String, ModuleConfig> = HashMap::new();
let mut initiators: HashMap<String, ModuleConfig> = HashMap::new();
let machines = HashMap::new();
actors.insert(
"Actor".to_string(),
ModuleConfig {
module: "Shelly".to_string(),
params: HashMap::new(),
},
);
initiators.insert(
"Initiator".to_string(),
ModuleConfig {
module: "TCP-Listen".to_string(),
params: HashMap::new(),
},
);
Config {
listens: vec![Listen {
address: "127.0.0.1".to_string(),
port: None,
}],
actors,
initiators,
machines,
mqtt_url: "tcp://localhost:1883".to_string(),
actor_connections: vec![("Testmachine".to_string(), "Actor".to_string())],
init_connections: vec![("Initiator".to_string(), "Testmachine".to_string())],
db_path: PathBuf::from("/run/bffh/database"),
auditlog_path: PathBuf::from("/var/log/bffh/audit.log"),
roles: HashMap::new(),
tlsconfig: TlsListen {
certfile: PathBuf::from("./bffh.crt"),
keyfile: PathBuf::from("./bffh.key"),
..Default::default()
},
tlskeylog: None,
verbosity: 0,
logging: LogConfig::default(),
instanceurl: "".into(),
spacename: "".into(),
}
}
}

52
bffhd/config/mod.rs Normal file
View File

@ -0,0 +1,52 @@
use std::path::Path;
use miette::Diagnostic;
use thiserror::Error;
pub(crate) use dhall::deser_option;
pub use dhall::{Config, MachineDescription, ModuleConfig};
mod dhall;
#[derive(Debug, Error, Diagnostic)]
pub enum ConfigError {
#[error("The config file '{0}' does not exist or is not readable")]
#[diagnostic(
code(config::notfound),
help("Make sure the config file and the directory it's in are readable by the user running bffh")
)]
NotFound(String),
#[error("The path '{0}' does not point to a file")]
#[diagnostic(
code(config::notafile),
help("The config must be a file in the dhall format")
)]
NotAFile(String),
#[error("failed to parse config: {0}")]
#[diagnostic(code(config::parse))]
Parse(
#[from]
#[source]
serde_dhall::Error,
),
}
pub fn read(file: impl AsRef<Path>) -> Result<Config, ConfigError> {
let path = file.as_ref();
if !path.exists() {
return Err(ConfigError::NotFound(path.to_string_lossy().to_string()));
}
if !path.is_file() {
return Err(ConfigError::NotAFile(path.to_string_lossy().to_string()));
}
let config = dhall::read_config_file(file)?;
// TODO: configuration by environment variables?
// but rather in in a separate function
// for (envvar, value) in std::env::vars() {
// match envvar.as_str() {
// // Do things like this?
// // "BFFH_LOG" => config.logging.filter = Some(value),
// _ => {}
// }
// }
Ok(config)
}

90
bffhd/db/mod.rs Normal file
View File

@ -0,0 +1,90 @@
use thiserror::Error;
// for converting a database error into a failed promise
use capnp;
mod raw;
use miette::{Diagnostic, Severity};
pub use raw::RawDB;
use std::fmt::{Debug, Display};
mod typed;
pub use typed::{Adapter, AlignedAdapter, ArchivedValue, DB};
pub type ErrorO = lmdb::Error;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Clone, Debug, PartialEq, Eq, Error)]
#[error(transparent)]
#[repr(transparent)]
pub struct Error(#[from] lmdb::Error);
impl Diagnostic for Error {
fn code<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
Some(Box::new(match self.0 {
lmdb::Error::KeyExist => "bffh::db::raw::key_exists".to_string(),
lmdb::Error::NotFound => "bffh::db::raw::not_found".to_string(),
lmdb::Error::PageNotFound => "bffh::db::raw::page_not_found".to_string(),
lmdb::Error::Corrupted => "bffh::db::raw::corrupted".to_string(),
lmdb::Error::Panic => "bffh::db::raw::panic".to_string(),
lmdb::Error::VersionMismatch => "bffh::db::raw::version_mismatch".to_string(),
lmdb::Error::Invalid => "bffh::db::raw::invalid".to_string(),
lmdb::Error::MapFull => "bffh::db::raw::map_full".to_string(),
lmdb::Error::DbsFull => "bffh::db::raw::dbs_full".to_string(),
lmdb::Error::ReadersFull => "bffh::db::raw::readers_full".to_string(),
lmdb::Error::TlsFull => "bffh::db::raw::tls_full".to_string(),
lmdb::Error::TxnFull => "bffh::db::raw::txn_full".to_string(),
lmdb::Error::CursorFull => "bffh::db::raw::cursor_full".to_string(),
lmdb::Error::PageFull => "bffh::db::raw::page_full".to_string(),
lmdb::Error::MapResized => "bffh::db::raw::map_resized".to_string(),
lmdb::Error::Incompatible => "bffh::db::raw::incompatible".to_string(),
lmdb::Error::BadRslot => "bffh::db::raw::bad_rslot".to_string(),
lmdb::Error::BadTxn => "bffh::db::raw::bad_txn".to_string(),
lmdb::Error::BadValSize => "bffh::db::raw::bad_val_size".to_string(),
lmdb::Error::BadDbi => "bffh::db::raw::bad_dbi".to_string(),
lmdb::Error::Other(n) => format!("bffh::db::raw::e{}", n),
}))
}
fn severity(&self) -> Option<Severity> {
Some(Severity::Error)
}
fn help<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
match self.0 {
lmdb::Error::KeyExist => Some(Box::new("The provided key already exists in the database")),
lmdb::Error::NotFound => Some(Box::new("The requested key was not found in the database")),
lmdb::Error::PageNotFound => Some(Box::new("The requested page was not found. This usually indicates corruption.")),
lmdb::Error::Corrupted => None,
lmdb::Error::Panic => None,
lmdb::Error::VersionMismatch => None,
lmdb::Error::Invalid => None,
lmdb::Error::MapFull => None,
lmdb::Error::DbsFull => None,
lmdb::Error::ReadersFull => None,
lmdb::Error::TlsFull => None,
lmdb::Error::TxnFull => None,
lmdb::Error::CursorFull => None,
lmdb::Error::PageFull => None,
lmdb::Error::MapResized => None,
lmdb::Error::Incompatible => None,
lmdb::Error::BadRslot => Some(Box::new("This usually indicates that the operation can't complete because an incompatible transaction is still open.")),
lmdb::Error::BadTxn => None,
lmdb::Error::BadValSize => None,
lmdb::Error::BadDbi => None,
lmdb::Error::Other(_) => None,
}
}
fn url<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
None
}
}
impl From<Error> for capnp::Error {
fn from(dberr: Error) -> capnp::Error {
capnp::Error::failed(format!("database error: {}", dberr.to_string()))
}
}

85
bffhd/db/raw.rs Normal file
View File

@ -0,0 +1,85 @@
use lmdb::{DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags};
#[derive(Debug, Clone)]
pub struct RawDB {
db: lmdb::Database,
}
impl RawDB {
pub fn open(env: &Environment, name: Option<&str>) -> lmdb::Result<Self> {
env.open_db(name).map(|db| Self { db })
}
pub fn create(
env: &Environment,
name: Option<&str>,
flags: DatabaseFlags,
) -> lmdb::Result<Self> {
env.create_db(name, flags).map(|db| Self { db })
}
pub fn get<'txn, T: Transaction, K>(
&self,
txn: &'txn T,
key: &K,
) -> lmdb::Result<Option<&'txn [u8]>>
where
K: AsRef<[u8]>,
{
match txn.get(self.db, key) {
Ok(buf) => Ok(Some(buf)),
Err(lmdb::Error::NotFound) => Ok(None),
Err(e) => Err(e),
}
}
pub fn put<K, V>(
&self,
txn: &mut RwTransaction,
key: &K,
value: &V,
flags: WriteFlags,
) -> lmdb::Result<()>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
txn.put(self.db, key, value, flags)
}
pub fn reserve<'txn, K>(
&self,
txn: &'txn mut RwTransaction,
key: &K,
size: usize,
flags: WriteFlags,
) -> lmdb::Result<&'txn mut [u8]>
where
K: AsRef<[u8]>,
{
txn.reserve(self.db, key, size, flags)
}
pub fn del<K, V>(&self, txn: &mut RwTransaction, key: &K, value: Option<&V>) -> lmdb::Result<()>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
txn.del(self.db, key, value.map(AsRef::as_ref))
}
pub fn clear(&self, txn: &mut RwTransaction) -> lmdb::Result<()> {
txn.clear_db(self.db)
}
pub fn iter<'txn, C: lmdb::Cursor<'txn>>(&self, cursor: &'txn mut C) -> lmdb::Iter<'txn> {
cursor.iter_start()
}
pub fn open_ro_cursor<'txn, T: Transaction>(
&self,
txn: &'txn T,
) -> lmdb::Result<lmdb::RoCursor<'txn>> {
txn.open_ro_cursor(self.db)
}
}

160
bffhd/db/typed.rs Normal file
View File

@ -0,0 +1,160 @@
use crate::db::RawDB;
use lmdb::{Cursor, RwTransaction, Transaction, WriteFlags};
use rkyv::{AlignedVec, Archive, Archived};
use std::fmt;
use std::fmt::{Debug, Display, Formatter};
use std::marker::PhantomData;
use crate::db;
#[derive(Clone)]
/// Packed, sendable resource state
pub struct ArchivedValue<T> {
/// State is encoded using rkyv making it trivially serializable
data: AlignedVec,
_marker: PhantomData<T>,
}
impl<T> ArchivedValue<T> {
pub fn new(data: AlignedVec) -> Self {
Self {
data,
_marker: PhantomData,
}
}
pub fn build(data: &[u8]) -> Self {
let mut v = AlignedVec::with_capacity(data.len());
v.extend_from_slice(data);
Self::new(v)
}
pub fn as_mut(&mut self) -> &mut AlignedVec {
&mut self.data
}
pub fn as_slice(&self) -> &[u8] {
self.data.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [u8] {
self.data.as_mut_slice()
}
}
impl<T: Archive> AsRef<Archived<T>> for ArchivedValue<T> {
fn as_ref(&self) -> &Archived<T> {
unsafe { rkyv::archived_root::<T>(self.as_slice()) }
}
}
//
// Debug implementation shows wrapping SendState
//
impl<T: Archive> Debug for ArchivedValue<T>
where
<T as Archive>::Archived: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_tuple("SendState").field(self.as_ref()).finish()
}
}
//
// Display implementation hides wrapping SendState
//
impl<T: Archive> Display for ArchivedValue<T>
where
<T as Archive>::Archived: Display,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self.as_ref(), f)
}
}
/// Adapter trait handling de-/serialization
///
/// Values must be read from raw, unaligned byte buffers provided by LMDB.
pub trait Adapter {
type Item;
/// Decode data from a short-lived byte buffer into a durable format
fn decode(data: &[u8]) -> Self::Item;
fn encoded_len(item: &Self::Item) -> usize;
fn encode_into(item: &Self::Item, buf: &mut [u8]);
}
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
pub struct AlignedAdapter<V>(PhantomData<V>);
impl<V> Adapter for AlignedAdapter<V> {
type Item = ArchivedValue<V>;
fn decode(data: &[u8]) -> Self::Item {
ArchivedValue::build(data)
}
fn encoded_len(item: &Self::Item) -> usize {
item.as_slice().len()
}
fn encode_into(item: &Self::Item, buf: &mut [u8]) {
buf.copy_from_slice(item.as_slice())
}
}
#[derive(Debug, Clone)]
#[repr(transparent)]
/// `Typed` database, allowing storing a typed value
///
/// Values must be serialized into and deserialized from raw byte buffers.
/// This is handled by a stateless [Adapter] given by the type parameter `A`
pub struct DB<A> {
db: RawDB,
_marker: PhantomData<A>,
}
impl<A> DB<A> {
pub fn new(db: RawDB) -> Self {
Self {
db,
_marker: PhantomData,
}
}
}
impl<A: Adapter> DB<A> {
pub fn get<T: Transaction>(
&self,
txn: &T,
key: &impl AsRef<[u8]>,
) -> Result<Option<A::Item>, db::Error> {
Ok(self.db.get(txn, key)?.map(A::decode))
}
pub fn put(
&self,
txn: &mut RwTransaction,
key: &impl AsRef<[u8]>,
value: &A::Item,
flags: WriteFlags,
) -> Result<(), db::Error> {
let len = A::encoded_len(value);
let buf = self.db.reserve(txn, key, len, flags)?;
assert_eq!(buf.len(), len, "Reserved buffer is not of requested size!");
A::encode_into(value, buf);
Ok(())
}
pub fn del(&self, txn: &mut RwTransaction, key: &impl AsRef<[u8]>) -> Result<(), db::Error> {
Ok(self.db.del::<_, &[u8]>(txn, key, None)?)
}
pub fn clear(&self, txn: &mut RwTransaction) -> Result<(), db::Error> {
Ok(self.db.clear(txn)?)
}
pub fn get_all<'txn, T: Transaction>(
&self,
txn: &'txn T,
) -> Result<impl IntoIterator<Item = (&'txn [u8], A::Item)>, db::Error> {
let mut cursor = self.db.open_ro_cursor(txn)?;
let it = cursor.iter_start();
Ok(it.filter_map(|buf| buf.ok().map(|(kbuf, vbuf)| (kbuf, A::decode(vbuf)))))
}
}

97
bffhd/error.rs Normal file
View File

@ -0,0 +1,97 @@
use miette::{Diagnostic, Severity};
use std::error;
use std::fmt::{Display, Formatter};
use std::io;
use thiserror::Error;
pub trait Description {
const DESCRIPTION: Option<&'static str> = None;
const CODE: &'static str;
const HELP: Option<&'static str> = None;
const URL: Option<&'static str> = None;
}
pub fn wrap<D: Description>(error: Source) -> Error {
Error::new::<D>(error)
}
#[derive(Debug, Error, Diagnostic)]
pub enum Source {
#[error("io error occured")]
Io(
#[source]
#[from]
io::Error,
),
}
#[derive(Debug)]
pub struct Error {
description: Option<&'static str>,
code: &'static str,
severity: Option<Severity>,
help: Option<&'static str>,
url: Option<&'static str>,
source: Source,
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.source, f)
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
Some(&self.source)
}
fn description(&self) -> &str {
if let Some(desc) = self.description {
desc
} else {
self.source.description()
}
}
}
impl Error {
pub fn new<D: Description>(source: Source) -> Self {
Self {
description: D::DESCRIPTION,
code: D::CODE,
severity: source.severity(),
help: D::HELP,
url: D::URL,
source,
}
}
}
impl miette::Diagnostic for Error {
fn code<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
Some(Box::new(self.code))
}
fn severity(&self) -> Option<Severity> {
self.severity
}
fn help<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
self.help.map(|r| {
let b: Box<dyn Display + 'a> = Box::new(r);
b
})
}
fn url<'a>(&'a self) -> Option<Box<dyn Display + 'a>> {
self.url.map(|r| {
let b: Box<dyn Display + 'a> = Box::new(r);
b
})
}
fn diagnostic_source(&self) -> Option<&dyn Diagnostic> {
Some(&self.source)
}
}

109
bffhd/initiators/dummy.rs Normal file
View File

@ -0,0 +1,109 @@
use miette::{miette, Diagnostic};
use thiserror::Error;
use super::Initiator;
use crate::initiators::InitiatorCallbacks;
use crate::resources::modules::fabaccess::Status;
use crate::session::SessionHandle;
use async_io::Timer;
use futures_util::future::BoxFuture;
use futures_util::ready;
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
pub struct Dummy {
callbacks: InitiatorCallbacks,
session: SessionHandle,
state: DummyState,
}
enum DummyState {
Empty,
Sleeping(Timer, Option<Status>),
Updating(BoxFuture<'static, Status>),
}
impl Dummy {
fn timer() -> Timer {
Timer::after(Duration::from_secs(2))
}
fn flip(&self, status: Status) -> BoxFuture<'static, Status> {
let session = self.session.clone();
let mut callbacks = self.callbacks.clone();
Box::pin(async move {
let next = match &status {
Status::Free => Status::InUse(session.get_user_ref()),
Status::InUse(_) => Status::Free,
_ => Status::Free,
};
callbacks.try_update(session, status).await;
next
})
}
}
#[derive(Debug, Error, Diagnostic)]
pub enum DummyError {}
impl Future for Dummy {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let span = tracing::debug_span!("Dummy initiator poll");
let _guard = span.enter();
tracing::trace!("polling Dummy initiator");
loop {
match &mut self.state {
DummyState::Empty => {
tracing::trace!("Dummy initiator is empty, initializing…");
self.state = DummyState::Sleeping(Self::timer(), Some(Status::Free));
}
DummyState::Sleeping(timer, next) => {
tracing::trace!("Sleep timer exists, polling it.");
let _: Instant = ready!(Pin::new(timer).poll(cx));
tracing::trace!("Timer has fired, poking out an update!");
let status = next.take().unwrap();
let f = self.flip(status);
self.state = DummyState::Updating(f);
}
DummyState::Updating(f) => {
tracing::trace!("Update future exists, polling it .");
let next = ready!(Pin::new(f).poll(cx));
tracing::trace!("Update future completed, sleeping!");
self.state = DummyState::Sleeping(Self::timer(), Some(next));
}
}
}
}
}
impl Initiator for Dummy {
fn new(params: &HashMap<String, String>, callbacks: InitiatorCallbacks) -> miette::Result<Self>
where
Self: Sized,
{
let uid = params
.get("uid")
.ok_or_else(|| miette!("Dummy initiator configured without an UID"))?;
let session = callbacks
.open_session(uid)
.ok_or_else(|| miette!("The configured user for the dummy initiator does not exist"))?;
Ok(Self {
callbacks,
session,
state: DummyState::Empty,
})
}
}

173
bffhd/initiators/mod.rs Normal file
View File

@ -0,0 +1,173 @@
use crate::initiators::dummy::Dummy;
use crate::initiators::process::Process;
use crate::resources::modules::fabaccess::Status;
use crate::session::SessionHandle;
use crate::{
AuthenticationHandle, Config, Resource, ResourcesHandle, SessionManager,
};
use executor::prelude::Executor;
use futures_util::ready;
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use tracing::Span;
mod dummy;
mod process;
pub trait Initiator: Future<Output = ()> {
fn new(params: &HashMap<String, String>, callbacks: InitiatorCallbacks) -> miette::Result<Self>
where
Self: Sized;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> {
<Self as Future>::poll(self, cx)
}
}
#[derive(Clone)]
pub struct InitiatorCallbacks {
span: Span,
resource: Resource,
sessions: SessionManager,
}
impl InitiatorCallbacks {
pub fn new(span: Span, resource: Resource, sessions: SessionManager) -> Self {
Self {
span,
resource,
sessions,
}
}
pub async fn try_update(&mut self, session: SessionHandle, status: Status) {
self.resource.try_update(session, status).await
}
pub fn set_status(&mut self, status: Status) {
self.resource.set_status(status)
}
pub fn open_session(&self, uid: &str) -> Option<SessionHandle> {
self.sessions.try_open(&self.span, uid)
}
}
pub struct InitiatorDriver {
span: Span,
name: String,
initiator: Box<dyn Initiator + Unpin + Send>,
}
impl InitiatorDriver {
pub fn new<I>(
span: Span,
name: String,
params: &HashMap<String, String>,
resource: Resource,
sessions: SessionManager,
) -> miette::Result<Self>
where
I: 'static + Initiator + Unpin + Send,
{
let callbacks = InitiatorCallbacks::new(span.clone(), resource, sessions);
let initiator = Box::new(I::new(params, callbacks)?);
Ok(Self {
span,
name,
initiator,
})
}
}
impl Future for InitiatorDriver {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let _guard = tracing::info_span!("initiator poll", initiator=%self.name);
tracing::trace!(initiator=%self.name, "polling initiator");
ready!(Pin::new(&mut self.initiator).poll(cx));
tracing::warn!(initiator=%self.name, "initiator module ran to completion!");
Poll::Ready(())
}
}
pub fn load(
executor: Executor,
config: &Config,
resources: ResourcesHandle,
sessions: SessionManager,
_authentication: AuthenticationHandle,
) -> miette::Result<()> {
let span = tracing::info_span!("loading initiators");
let _guard = span.enter();
let mut initiator_map: HashMap<String, Resource> = config
.init_connections
.iter()
.filter_map(|(k, v)| {
if let Some(resource) = resources.get_by_id(v) {
Some((k.clone(), resource.clone()))
} else {
tracing::error!(initiator=%k, machine=%v,
"Machine configured for initiator not found!");
None
}
})
.collect();
for (name, cfg) in config.initiators.iter() {
if let Some(resource) = initiator_map.remove(name) {
if let Some(driver) = load_single(name, &cfg.module, &cfg.params, resource, &sessions) {
tracing::debug!(module_name=%cfg.module, %name, "starting initiator task");
executor.spawn(driver);
} else {
tracing::error!(module_name=%cfg.module, %name, "Initiator module could not be configured");
}
} else {
tracing::warn!(actor=%name, ?config, "Initiator has no machine configured. Skipping!");
}
}
Ok(())
}
fn load_single(
name: &String,
module_name: &String,
params: &HashMap<String, String>,
resource: Resource,
sessions: &SessionManager,
) -> Option<InitiatorDriver> {
let span = tracing::info_span!(
"initiator",
name = %name,
module = %module_name,
);
tracing::info!(%name, %module_name, ?params, "Loading initiator");
let o = match module_name.as_ref() {
"Dummy" => Some(InitiatorDriver::new::<Dummy>(
span,
name.clone(),
params,
resource,
sessions.clone(),
)),
"Process" => Some(InitiatorDriver::new::<Process>(
span,
name.clone(),
params,
resource,
sessions.clone(),
)),
_ => None,
};
o.transpose().unwrap_or_else(|error| {
tracing::error!(%error, "failed to configure initiator");
None
})
}

233
bffhd/initiators/process.rs Normal file
View File

@ -0,0 +1,233 @@
use super::Initiator;
use super::InitiatorCallbacks;
use crate::resources::modules::fabaccess::Status;
use crate::utils::linebuffer::LineBuffer;
use async_process::{Child, ChildStderr, ChildStdout, Command, Stdio};
use futures_lite::AsyncRead;
use miette::{miette, IntoDiagnostic};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[derive(Debug, Serialize, Deserialize)]
pub enum InputMessage {
#[serde(rename = "state")]
SetState(Status),
}
#[derive(Serialize, Deserialize)]
pub struct OutputLine {}
pub struct Process {
pub cmd: String,
pub args: Vec<String>,
state: Option<ProcessState>,
buffer: LineBuffer,
err_buffer: LineBuffer,
callbacks: InitiatorCallbacks,
}
impl Process {
fn spawn(&mut self) -> io::Result<()> {
let mut child = Command::new(&self.cmd)
.args(&self.args)
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
self.state = Some(ProcessState::new(
child
.stdout
.take()
.expect("Child just spawned with piped stdout has no stdout"),
child
.stderr
.take()
.expect("Child just spawned with piped stderr has no stderr"),
child,
));
Ok(())
}
}
struct ProcessState {
pub stdout: ChildStdout,
pub stderr: ChildStderr,
pub stderr_closed: bool,
pub child: Child,
}
impl ProcessState {
pub fn new(stdout: ChildStdout, stderr: ChildStderr, child: Child) -> Self {
Self {
stdout,
stderr,
stderr_closed: false,
child,
}
}
fn try_process(&mut self, buffer: &[u8], callbacks: &mut InitiatorCallbacks) -> usize {
tracing::trace!("trying to process current buffer");
let mut end = 0;
while let Some(idx) = buffer[end..].iter().position(|b| *b == b'\n') {
if idx == 0 {
end += 1;
continue;
}
let line = &buffer[end..(end + idx)];
self.process_line(line, callbacks);
end = idx;
}
end
}
fn process_line(&mut self, line: &[u8], callbacks: &mut InitiatorCallbacks) {
if !line.is_empty() {
let res = std::str::from_utf8(line);
if let Err(error) = &res {
tracing::warn!(%error, "Initiator sent line with invalid UTF-8");
return;
}
let string = res.unwrap().trim();
// Ignore whitespace-only lines
if !string.is_empty() {
match serde_json::from_str::<InputMessage>(res.unwrap()) {
Ok(state) => {
tracing::trace!(?state, "got new state for process initiator");
let InputMessage::SetState(status) = state;
callbacks.set_status(status);
}
Err(error) => {
tracing::warn!(%error, "process initiator did not send a valid line")
}
}
}
}
}
}
impl Future for Process {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Process {
state: Some(state),
buffer,
err_buffer,
callbacks,
..
} = self.get_mut()
{
match state.child.try_status() {
Err(error) => {
tracing::error!(%error, "checking child exit code returned an error");
return Poll::Ready(());
}
Ok(Some(exitcode)) => {
tracing::warn!(%exitcode, "child process exited");
return Poll::Ready(());
}
Ok(None) => {
tracing::trace!("process initiator checking on process");
let stdout = &mut state.stdout;
loop {
let buf = buffer.get_mut_write(512);
match AsyncRead::poll_read(Pin::new(stdout), cx, buf) {
Poll::Pending => break,
Poll::Ready(Ok(read)) => {
buffer.advance_valid(read);
continue;
}
Poll::Ready(Err(error)) => {
tracing::warn!(%error, "reading from child stdout errored");
return Poll::Ready(());
}
}
}
let processed = state.try_process(buffer, callbacks);
buffer.consume(processed);
if !state.stderr_closed {
let stderr = &mut state.stderr;
loop {
let buf = err_buffer.get_mut_write(512);
match AsyncRead::poll_read(Pin::new(stderr), cx, buf) {
Poll::Pending => break,
Poll::Ready(Ok(read)) => {
err_buffer.advance_valid(read);
continue;
}
Poll::Ready(Err(error)) => {
tracing::warn!(%error, "reading from child stderr errored");
state.stderr_closed = true;
break;
}
}
}
}
{
let mut consumed = 0;
while let Some(idx) = buffer[consumed..].iter().position(|b| *b == b'\n') {
if idx == 0 {
consumed += 1;
continue;
}
let line = &buffer[consumed..(consumed + idx)];
match std::str::from_utf8(line) {
Ok(line) => tracing::debug!(line, "initiator STDERR"),
Err(error) => tracing::debug!(%error,
"invalid UTF-8 on initiator STDERR"),
}
consumed = idx;
}
err_buffer.consume(consumed);
}
return Poll::Pending;
}
}
} else {
tracing::warn!("process initiator has no process attached!");
}
Poll::Ready(())
}
}
impl Initiator for Process {
fn new(params: &HashMap<String, String>, callbacks: InitiatorCallbacks) -> miette::Result<Self>
where
Self: Sized,
{
let cmd = params
.get("cmd")
.ok_or(miette!("Process initiator requires a `cmd` parameter."))?
.clone();
let args = params
.get("args")
.map(|argv| argv.split_whitespace().map(|s| s.to_string()).collect())
.unwrap_or_else(Vec::new);
let mut this = Self {
cmd,
args,
state: None,
buffer: LineBuffer::new(),
err_buffer: LineBuffer::new(),
callbacks,
};
this.spawn().into_diagnostic()?;
Ok(this)
}
}

70
bffhd/keylog.rs Normal file
View File

@ -0,0 +1,70 @@
use std::fmt::Formatter;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::Path;
use std::sync::Mutex;
use std::{fmt, io};
// Internal mutable state for KeyLogFile
struct KeyLogFileInner {
file: File,
buf: Vec<u8>,
}
impl fmt::Debug for KeyLogFileInner {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.file, f)
}
}
impl KeyLogFileInner {
fn new(path: impl AsRef<Path>) -> io::Result<Self> {
let file = OpenOptions::new().append(true).create(true).open(path)?;
Ok(Self {
file,
buf: Vec::new(),
})
}
fn try_write(&mut self, label: &str, client_random: &[u8], secret: &[u8]) -> io::Result<()> {
self.buf.truncate(0);
write!(self.buf, "{} ", label)?;
for b in client_random.iter() {
write!(self.buf, "{:02x}", b)?;
}
write!(self.buf, " ")?;
for b in secret.iter() {
write!(self.buf, "{:02x}", b)?;
}
writeln!(self.buf)?;
self.file.write_all(&self.buf)
}
}
#[derive(Debug)]
/// [`KeyLog`] implementation that opens a file at the given path
pub struct KeyLogFile(Mutex<KeyLogFileInner>);
impl KeyLogFile {
/// Makes a new `KeyLogFile`. The environment variable is
/// inspected and the named file is opened during this call.
pub fn new(path: impl AsRef<Path>) -> io::Result<Self> {
Ok(Self(Mutex::new(KeyLogFileInner::new(path)?)))
}
}
impl rustls::KeyLog for KeyLogFile {
fn log(&self, label: &str, client_random: &[u8], secret: &[u8]) {
match self
.0
.lock()
.unwrap()
.try_write(label, client_random, secret)
{
Ok(()) => {}
Err(e) => {
tracing::warn!("error writing to key log file: {}", e);
}
}
}
}

287
bffhd/lib.rs Normal file
View File

@ -0,0 +1,287 @@
#![warn(unused_imports, unused_import_braces)]
//#![warn(missing_debug_implementations)]
//#![warn(missing_docs)]
//#![warn(missing_crate_level_docs)]
//! Difluoroborane
//!
//! This is the capnp component of the FabAccess project.
//! The entry point of bffhd can be found in [bin/bffhd/main.rs](../bin/bffhd/main.rs)
use miette::{Diagnostic, IntoDiagnostic};
use thiserror::Error;
pub mod config;
/// Internal Databases build on top of LMDB, a mmap()'ed B-tree DB optimized for reads
pub mod db;
/// Shared error type
pub mod error;
pub mod authentication;
pub mod authorization;
pub mod users;
/// Resources
pub mod resources;
pub mod actors;
pub mod initiators;
pub mod sensors;
pub mod capnp;
pub mod utils;
// Store build information in the `env` module.
shadow_rs::shadow!(env);
mod audit;
mod keylog;
mod logging;
mod session;
mod tls;
use std::sync::Arc;
use futures_util::{FutureExt, StreamExt};
use once_cell::sync::OnceCell;
use crate::audit::AuditLog;
use crate::authentication::AuthenticationHandle;
use crate::authorization::roles::Roles;
use crate::capnp::APIServer;
use crate::config::Config;
use crate::resources::modules::fabaccess::MachineState;
use crate::resources::search::ResourcesHandle;
use crate::resources::state::db::StateDB;
use crate::resources::Resource;
use crate::session::SessionManager;
use crate::tls::TlsConfig;
use crate::users::db::UserDB;
use crate::users::Users;
use executor::pool::Executor;
use lightproc::recoverable_handle::RecoverableHandle;
use signal_hook::consts::signal::*;
use tracing::Span;
use std::collections::HashMap;
pub struct Difluoroborane {
config: Config,
executor: Executor<'static>,
pub statedb: StateDB,
pub users: Users,
pub roles: Roles,
pub resources: ResourcesHandle,
span: Span,
}
pub static RESOURCES: OnceCell<ResourcesHandle> = OnceCell::new();
pub static CONFIG: OnceCell<Config> = OnceCell::new();
struct SignalHandlerErr;
impl error::Description for SignalHandlerErr {
const CODE: &'static str = "signals::new";
}
#[derive(Debug, Error, Diagnostic)]
// TODO 0.5: #[non_exhaustive]
pub enum BFFHError {
#[error("DB operation failed")]
DBError(
#[from]
#[source]
db::Error,
),
#[error("failed to initialize global user store")]
UsersError(
#[from]
#[source]
users::Error,
),
#[error("failed to initialize state database")]
StateDBError(
#[from]
#[source]
resources::state::db::StateDBError,
),
#[error("audit log failed")]
AuditLogError(
#[from]
#[source]
audit::Error,
),
#[error("Failed to initialize signal handler")]
SignalsError(#[source] std::io::Error),
#[error("error in actor subsystem")]
ActorError(
#[from]
#[source]
actors::ActorError,
),
#[error("failed to initialize TLS config")]
TlsSetup(
#[from]
#[source]
tls::Error,
),
#[error("API handler failed")]
ApiError(
#[from]
#[source]
capnp::Error,
),
}
#[derive(serde::Serialize, serde::Deserialize)]
struct DatabaseDump {
users: HashMap<String, users::db::UserData>,
state: HashMap<String, resources::state::State>,
}
impl Difluoroborane {
pub fn setup() {}
pub fn new(config: Config) -> Result<Self, BFFHError> {
let mut server = logging::init(&config.logging);
let span = tracing::info_span!(
target: "bffh",
"bffh"
);
let span2 = span.clone();
let _guard = span2.enter();
tracing::info!(version = env::VERSION, "Starting BFFH");
let executor = Executor::new();
if let Some(aggregator) = server.aggregator.take() {
executor.spawn(aggregator.run());
}
tracing::info!("Server is being spawned");
let handle = executor.spawn(server.serve());
executor.spawn(handle.map(|result| match result {
Some(Ok(())) => {
tracing::info!("console server finished without error");
}
Some(Err(error)) => {
tracing::info!(%error, "console server finished with error");
}
None => {
tracing::info!("console server finished with panic");
}
}));
let env = StateDB::open_env(&config.db_path)?;
let statedb = StateDB::create_with_env(env.clone())?;
let users = Users::new(env.clone())?;
let roles = Roles::new(config.roles.clone());
let _audit_log = AuditLog::new(&config)?;
let resources = ResourcesHandle::new(config.machines.iter().map(|(id, desc)| {
Resource::new(Arc::new(resources::Inner::new(
id.to_string(),
statedb.clone(),
desc.clone(),
)))
}));
RESOURCES.set(resources.clone()).unwrap();
CONFIG.set(config.clone()).unwrap();
Ok(Self {
config,
executor,
statedb,
users,
roles,
resources,
span,
})
}
pub fn dump_db(&mut self, file: &str) -> Result<(), miette::Error> {
let users = self.users.dump_map()?;
let state = self.statedb.dump_map()?;
let dump = DatabaseDump{users, state};
let data = toml::ser::to_vec(&dump).map_err(|e| miette::Error::msg(format!("Serializing database dump failed: {}", e)))?;
std::fs::write(file, &data).map_err(|e| miette::Error::msg(format!("writing database dump failed: {}", e)))?;
Ok(())
}
pub fn load_db(&mut self, file: &str) -> Result<(), miette::Error> {
let data = std::fs::read(file).into_diagnostic()?;
let dump: DatabaseDump = toml::de::from_slice(&data).into_diagnostic()?;
self.users.load_map(&dump.users)?;
self.statedb.load_map(&dump.state)?;
Ok(())
}
pub fn run(&mut self) -> Result<(), BFFHError> {
let _guard = self.span.enter();
let mut signals = signal_hook_async_std::Signals::new(&[SIGINT, SIGQUIT, SIGTERM])
.map_err(BFFHError::SignalsError)?;
let sessionmanager = SessionManager::new(self.users.clone(), self.roles.clone());
let authentication = AuthenticationHandle::new(self.users.clone());
initiators::load(
self.executor.clone(),
&self.config,
self.resources.clone(),
sessionmanager.clone(),
authentication.clone(),
).expect("initializing initiators failed");
// TODO 0.5: error handling. Add variant to BFFHError
actors::load(self.executor.clone(), &self.config, self.resources.clone())?;
let tlsconfig = TlsConfig::new(self.config.tlskeylog.as_ref(), !self.config.is_quiet())?;
let acceptor = tlsconfig.make_tls_acceptor(&self.config.tlsconfig)?;
let apiserver = self.executor.run(APIServer::bind(
self.executor.clone(),
&self.config.listens,
acceptor,
sessionmanager,
authentication,
))?;
let (mut tx, rx) = async_oneshot::oneshot();
self.executor.spawn(apiserver.handle_until(rx));
let f = async {
let mut sig;
while {
sig = signals.next().await;
sig.is_none()
} {}
tracing::info!(signal = %sig.unwrap(), "Received signal");
_ = tx.send(()); // ignore result, as an Err means that the executor we want to stop has already stopped
};
self.executor.run(f);
Ok(())
}
}
struct ShutdownHandler {
tasks: Vec<RecoverableHandle<()>>,
}
impl ShutdownHandler {
pub fn new(tasks: Vec<RecoverableHandle<()>>) -> Self {
Self { tasks }
}
pub fn shutdown(&mut self) {
for handle in self.tasks.drain(..) {
handle.cancel()
}
}
}

74
bffhd/logging.rs Normal file
View File

@ -0,0 +1,74 @@
use serde::{Deserialize, Serialize};
use std::path::Path;
use tracing_subscriber::fmt::format::Format;
use tracing_subscriber::prelude::*;
use tracing_subscriber::EnvFilter;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
/// Log filter string in the tracing format `target[span{field=value}]=level`.
/// lvalue is optional and multiple filters can be combined with comma.
/// e.g. `warn,difluoroborane::actors=debug` will only print `WARN` and `ERROR` unless the
/// message is logged in a span below `difluoroborane::actors` (i.e. by an actor task) in
/// which case `DEBUG` and `INFO` will also be printed.
pub filter: Option<String>,
pub format: String,
}
impl Default for LogConfig {
fn default() -> Self {
Self {
filter: None,
format: "full".to_string(),
}
}
}
pub enum LogOutput<'a> {
Journald,
Stdout,
File(&'a Path),
}
pub struct LogConfig2<'a, F> {
output: LogOutput<'a>,
filter_str: Option<&'a str>,
format: Format<F>,
}
pub fn init(config: &LogConfig) -> console::Server {
let subscriber = tracing_subscriber::registry();
let (console_layer, server) = console::ConsoleLayer::new();
let subscriber = subscriber.with(console_layer);
let filter = if let Some(ref filter) = config.filter {
EnvFilter::new(filter.as_str())
} else {
EnvFilter::from_env("BFFH_LOG")
};
let format = config.format.to_lowercase();
let fmt_layer = tracing_subscriber::fmt::layer();
match format.as_ref() {
"pretty" => {
let fmt_layer = fmt_layer.pretty().with_filter(filter);
subscriber.with(fmt_layer).init();
}
"compact" => {
let fmt_layer = fmt_layer.compact().with_filter(filter);
subscriber.with(fmt_layer).init();
}
_ => {
let fmt_layer = fmt_layer.with_filter(filter);
subscriber.with(fmt_layer).init();
}
}
tracing::info!(format = format.as_str(), "Logging initialized");
server
}

44
bffhd/resources/claim.rs Normal file
View File

@ -0,0 +1,44 @@
use std::sync::Arc;
use async_channel::Sender;
use lmdb::Environment;
#[derive(Clone, Debug)]
/// Database of currently valid claims, interests and notify, as far as applicable
pub struct ClaimDB {
env: Arc<Environment>,
}
pub type UserID = String;
pub type ResourceID = String;
pub struct ClaimEntry {
subject: UserID,
target: ResourceID,
level: Level,
}
enum Level {
Claim(Claim),
Interest(Interest),
Notify(Notify),
}
#[derive(Debug)]
/// A claim on a resources grants permission to update state
///
/// This permission is not necessarily exclusive, depending on the resources in question.
pub struct Claim {
/// Sending end that can be used to send state updates to a resources.
pub tx: Sender<Update>,
}
#[derive(Debug)]
/// An interest on a resources indicates that an user wants a resources to be in a specific state
pub struct Interest {
}
#[derive(Debug)]
/// A notify indicates that an user wants to be informed about changes in a resources' state
pub struct Notify {
}

19
bffhd/resources/db.rs Normal file
View File

@ -0,0 +1,19 @@
use rkyv::{Archive, Deserialize, Serialize};
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Archive,
Serialize,
Deserialize,
serde::Serialize,
serde::Deserialize,
)]
pub struct Resource {
uuid: u128,
id: String,
name_idx: u64,
description_idx: u64,
}

268
bffhd/resources/mod.rs Normal file
View File

@ -0,0 +1,268 @@
use futures_signals::signal::{Mutable, Signal};
use rkyv::Infallible;
use std::ops::Deref;
use std::sync::Arc;
use crate::audit::AUDIT;
use crate::authorization::permissions::PrivilegesBuf;
use crate::config::MachineDescription;
use crate::db::ArchivedValue;
use crate::resources::modules::fabaccess::{ArchivedStatus, MachineState, Status};
use crate::resources::state::db::StateDB;
use crate::resources::state::State;
use crate::session::SessionHandle;
use crate::users::UserRef;
use rkyv::option::ArchivedOption;
use rkyv::ser::serializers::AllocSerializer;
use rkyv::ser::Serializer;
use rkyv::{Archived, Deserialize};
pub mod db;
pub mod search;
pub mod state;
pub mod modules;
pub struct PermissionDenied;
#[derive(Debug)]
pub(crate) struct Inner {
id: String,
db: StateDB,
signal: Mutable<ArchivedValue<State>>,
desc: MachineDescription,
}
impl Inner {
pub fn new(id: String, db: StateDB, desc: MachineDescription) -> Self {
let state = if let Some(previous) = db.get(id.as_bytes()).unwrap() {
tracing::info!(%id, ?previous, "Found previous state");
previous
} else {
let state = MachineState::free(None);
tracing::info!(%id, ?state, "No previous state found, setting default");
let update = state.to_state();
let mut serializer = AllocSerializer::<1024>::default();
serializer
.serialize_value(&update)
.expect("failed to serialize new default state");
let val = ArchivedValue::new(serializer.into_serializer().into_inner());
db.put(&id.as_bytes(), &val).unwrap();
val
};
let signal = Mutable::new(state);
Self {
id,
db,
signal,
desc,
}
}
pub fn signal(&self) -> impl Signal<Item = ArchivedValue<State>> {
Box::pin(self.signal.signal_cloned())
}
fn get_state(&self) -> ArchivedValue<State> {
self.db
.get(self.id.as_bytes())
.expect("lmdb error")
.expect("state should never be None")
}
fn get_state_ref(&self) -> impl Deref<Target = ArchivedValue<State>> + '_ {
self.signal.lock_ref()
}
fn set_state(&self, state: ArchivedValue<State>) {
let span = tracing::debug_span!("set_state", id = %self.id, ?state);
let _guard = span.enter();
tracing::debug!("Updating state");
tracing::trace!("Updating DB");
self.db.put(&self.id.as_bytes(), &state).unwrap();
tracing::trace!("Updated DB, sending update signal");
let res = AUDIT
.get()
.unwrap()
.log(self.id.as_str(), &format!("{}", state));
if let Err(e) = res {
tracing::error!("Writing to the audit log failed for {} {}: {e}", self.id.as_str(), state);
}
self.signal.set(state);
tracing::trace!("Sent update signal");
}
}
#[derive(Clone, Debug)]
pub struct Resource {
inner: Arc<Inner>,
}
impl Resource {
pub(crate) fn new(inner: Arc<Inner>) -> Self {
Self { inner }
}
pub fn get_state(&self) -> ArchivedValue<State> {
self.inner.get_state()
}
pub fn get_state_ref(&self) -> impl Deref<Target = ArchivedValue<State>> + '_ {
self.inner.get_state_ref()
}
pub fn get_id(&self) -> &str {
&self.inner.id
}
pub fn get_name(&self) -> &str {
self.inner.desc.name.as_str()
}
pub fn get_signal(&self) -> impl Signal<Item = ArchivedValue<State>> {
self.inner.signal()
}
pub fn get_required_privs(&self) -> &PrivilegesBuf {
&self.inner.desc.privs
}
pub fn get_description(&self) -> &MachineDescription {
&self.inner.desc
}
pub fn get_current_user(&self) -> Option<UserRef> {
let state = self.get_state_ref();
let state: &Archived<State> = state.as_ref();
match &state.inner.state {
ArchivedStatus::Blocked(user)
| ArchivedStatus::InUse(user)
| ArchivedStatus::Reserved(user)
| ArchivedStatus::ToCheck(user) => {
let user = Deserialize::<UserRef, _>::deserialize(user, &mut Infallible).unwrap();
Some(user)
}
_ => None,
}
}
pub fn get_previous_user(&self) -> Option<UserRef> {
let state = self.get_state_ref();
let state: &Archived<State> = state.as_ref();
if let ArchivedOption::Some(user) = &state.inner.previous {
let user = Deserialize::<UserRef, _>::deserialize(user, &mut Infallible).unwrap();
Some(user)
} else {
None
}
}
fn set_state(&self, state: MachineState) {
let mut serializer = AllocSerializer::<1024>::default();
serializer.serialize_value(&state).expect("serializing a MachineState shoud be infallible");
let archived = ArchivedValue::new(serializer.into_serializer().into_inner());
self.inner.set_state(archived)
}
pub fn set_status(&self, state: Status) {
let old = self.inner.get_state();
let oldref: &Archived<State> = old.as_ref();
let previous: &Archived<Option<UserRef>> = &oldref.inner.previous;
let previous =
Deserialize::<Option<UserRef>, _>::deserialize(previous, &mut rkyv::Infallible)
.expect("Infallible deserializer failed");
let new = MachineState { state, previous };
self.set_state(new);
}
pub async fn try_update(&self, session: SessionHandle, new: Status) {
let old = self.get_state();
let old: &Archived<State> = old.as_ref();
let user = session.get_user_ref();
if session.has_manage(self) // Default allow for managers
|| (session.has_write(self) // Decision tree for writers
&& match (&old.inner.state, &new) {
// Going from available to used by the person requesting is okay.
(ArchivedStatus::Free, Status::InUse(who))
// Check that the person requesting does not request for somebody else.
// *That* is manage privilege.
if who == &user => true,
// Reserving things for ourself is okay.
(ArchivedStatus::Free, Status::Reserved(whom))
if &user == whom => true,
// Returning things we've been using is okay. This includes both if
// they're being freed or marked as to be checked.
(ArchivedStatus::InUse(who), Status::Free | Status::ToCheck(_))
if who == &user => true,
// Un-reserving things we reserved is okay
(ArchivedStatus::Reserved(whom), Status::Free)
if whom == &user => true,
// Using things that we've reserved is okay. But the person requesting
// that has to be the person that reserved the machine. Otherwise
// somebody could make a machine reserved by a different user as used by
// that different user but use it themself.
(ArchivedStatus::Reserved(whom), Status::InUse(who))
if whom == &user && who == whom => true,
// Default is deny.
_ => false
})
// Default permissions everybody has
|| match (&old.inner.state, &new) {
// Returning things we've been using is okay. This includes both if
// they're being freed or marked as to be checked.
(ArchivedStatus::InUse(who), Status::Free | Status::ToCheck(_)) if who == &user => true,
// Un-reserving things we reserved is okay
(ArchivedStatus::Reserved(whom), Status::Free) if whom == &user => true,
// Default is deny.
_ => false,
}
{
self.set_status(new);
}
}
pub async fn give_back(&self, session: SessionHandle) {
let state = self.get_state();
let s: &Archived<State> = state.as_ref();
let i: &Archived<MachineState> = &s.inner;
if let ArchivedStatus::InUse(user) = &i.state {
let current = session.get_user_ref();
if user == &current {
self.set_state(MachineState::free(Some(current)));
}
}
}
pub async fn force_set(&self, new: Status) {
self.set_status(new);
}
pub fn visible(&self, session: &SessionHandle) -> bool {
session.has_disclose(self) || self.is_owned_by(session.get_user_ref())
}
pub fn is_owned_by(&self, owner: UserRef) -> bool {
match &self.get_state().as_ref().inner.state {
ArchivedStatus::Free | ArchivedStatus::Disabled => false,
ArchivedStatus::InUse(user)
| ArchivedStatus::ToCheck(user)
| ArchivedStatus::Blocked(user)
| ArchivedStatus::Reserved(user) => user == &owner,
}
}
}

View File

@ -0,0 +1,142 @@
use crate::config::deser_option;
use crate::utils::oid::ObjectIdentifier;
use once_cell::sync::Lazy;
use rkyv::{Archive, Archived, Deserialize, Infallible};
use std::fmt;
use std::str::FromStr;
//use crate::oidvalue;
use crate::resources::state::State;
use crate::users::UserRef;
/// Status of a Machine
#[derive(
Clone,
PartialEq,
Eq,
Debug,
Archive,
rkyv::Serialize,
rkyv::Deserialize,
serde::Serialize,
serde::Deserialize,
)]
#[archive_attr(derive(Debug, PartialEq))]
pub enum Status {
/// Not currently used by anybody
Free,
/// Used by somebody
InUse(UserRef),
/// Was used by somebody and now needs to be checked for cleanliness
ToCheck(UserRef),
/// Not used by anybody but also can not be used. E.g. down for maintenance
Blocked(UserRef),
/// Disabled for some other reason
Disabled,
/// Reserved
Reserved(UserRef),
}
#[derive(
Clone,
PartialEq,
Eq,
Debug,
Archive,
rkyv::Serialize,
rkyv::Deserialize,
serde::Serialize,
serde::Deserialize,
)]
#[archive_attr(derive(Debug, PartialEq))]
/// The status of the machine
pub struct MachineState {
pub state: Status,
#[serde(
default,
skip_serializing_if = "Option::is_none",
deserialize_with = "deser_option"
)]
pub previous: Option<UserRef>,
}
impl fmt::Display for ArchivedMachineState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.state {
ArchivedStatus::Free => f.write_str("free"),
ArchivedStatus::InUse(user) => write!(f, "inuse {}", user),
ArchivedStatus::ToCheck(user) => write!(f, "tocheck {}", user),
ArchivedStatus::Blocked(user) => write!(f, "blocked {}", user),
ArchivedStatus::Disabled => f.write_str("disabled"),
ArchivedStatus::Reserved(user) => write!(f, "reserved {}", user),
}
}
}
impl MachineState {
pub fn new() -> Self {
Self {
state: Status::Free,
previous: None,
}
}
pub fn from(dbstate: &Archived<State>) -> Self {
let state: &Archived<MachineState> = &dbstate.inner;
Deserialize::deserialize(state, &mut Infallible).unwrap()
}
pub fn to_state(&self) -> State {
State {
inner: self.clone(),
}
}
pub fn free(previous: Option<UserRef>) -> Self {
Self {
state: Status::Free,
previous,
}
}
pub fn used(user: UserRef, previous: Option<UserRef>) -> Self {
Self {
state: Status::InUse(user),
previous,
}
}
pub fn blocked(user: UserRef, previous: Option<UserRef>) -> Self {
Self {
state: Status::Blocked(user),
previous,
}
}
pub fn disabled(previous: Option<UserRef>) -> Self {
Self {
state: Status::Disabled,
previous,
}
}
pub fn reserved(user: UserRef, previous: Option<UserRef>) -> Self {
Self {
state: Status::Reserved(user),
previous,
}
}
pub fn check(user: UserRef) -> Self {
Self {
state: Status::ToCheck(user.clone()),
previous: Some(user),
}
}
}
pub static OID_TYPE: Lazy<ObjectIdentifier> =
Lazy::new(|| ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.14").unwrap());
pub static OID_VALUE: Lazy<ObjectIdentifier> =
Lazy::new(|| ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.2.4").unwrap());
//oidvalue!(OID_TYPE, MachineState, ArchivedMachineState);

View File

@ -0,0 +1,3 @@
pub mod fabaccess;
pub trait MachineModel {}

59
bffhd/resources/search.rs Normal file
View File

@ -0,0 +1,59 @@
use crate::resources::Resource;
use std::collections::HashMap;
use std::sync::Arc;
#[derive(Debug)]
struct Inner {
id: HashMap<String, Resource>,
}
impl Inner {
pub fn new(resources: impl IntoIterator<Item = Resource>) -> Self {
let mut id = HashMap::new();
for resource in resources {
let old = id.insert(resource.inner.id.clone(), resource);
assert!(old.is_none());
}
Self { id }
}
}
#[derive(Clone, Debug)]
pub struct ResourcesHandle {
inner: Arc<Inner>,
}
impl ResourcesHandle {
pub fn new(resources: impl IntoIterator<Item = Resource>) -> Self {
Self {
inner: Arc::new(Inner::new(resources)),
}
}
pub fn list_all(&self) -> impl IntoIterator<Item = &Resource> {
self.inner.id.values()
}
pub fn get_by_id(&self, id: &str) -> Option<&Resource> {
self.inner.id.get(id)
}
pub fn get_by_urn(&self, urn: &str) -> Option<&Resource> {
if let Some(id) = {
let mut parts = urn.split_terminator(':');
let part_urn = parts.next().map(|u| u == "urn").unwrap_or(false);
let part_fabaccess = parts.next().map(|f| f == "fabaccess").unwrap_or(false);
let part_resource = parts.next().map(|r| r == "resource").unwrap_or(false);
if !(part_urn && part_fabaccess && part_resource) {
return None;
}
parts.next().map(|s| s.to_string())
} {
self.get_by_id(&id)
} else {
None
}
}
}

133
bffhd/resources/state/db.rs Normal file
View File

@ -0,0 +1,133 @@
use rkyv::ser::Serializer;
use rkyv::ser::serializers::AllocSerializer;
use thiserror::Error;
use crate::db;
use crate::db::{AlignedAdapter, ArchivedValue, RawDB, DB};
use lmdb::{DatabaseFlags, Environment, EnvironmentFlags, Transaction, WriteFlags};
use miette::Diagnostic;
use std::fmt::Debug;
use std::{path::Path, sync::Arc};
use crate::resources::state::State;
#[derive(Debug, Clone)]
pub struct StateDB {
env: Arc<Environment>,
db: DB<AlignedAdapter<State>>,
}
#[derive(Clone, Debug, PartialEq, Eq, Error, Diagnostic)]
pub enum StateDBError {
#[error("opening the state db environment failed")]
#[diagnostic(
code(bffh::db::state::open_env),
help("does the parent directory for state_db exist?")
)]
OpenEnv(#[source] db::Error),
#[error("opening the state db failed")]
#[diagnostic(code(bffh::db::state::open))]
Open(#[source] db::Error),
#[error("creating the state db failed")]
#[diagnostic(code(bffh::db::state::create))]
Create(#[source] db::Error),
}
impl StateDB {
pub fn open_env<P: AsRef<Path>>(path: P) -> Result<Arc<Environment>, StateDBError> {
Environment::new()
.set_flags(
EnvironmentFlags::WRITE_MAP
| EnvironmentFlags::NO_SUB_DIR
| EnvironmentFlags::NO_TLS
| EnvironmentFlags::NO_READAHEAD,
)
.set_max_dbs(8)
.open(path.as_ref())
.map(Arc::new)
.map_err(|e| StateDBError::OpenEnv(e.into()))
}
fn new(env: Arc<Environment>, db: RawDB) -> Self {
let db = DB::new(db);
Self { env, db }
}
pub fn open_with_env(env: Arc<Environment>) -> Result<Self, StateDBError> {
let db = RawDB::open(&env, Some("state"))
.map_err(|e| StateDBError::Open(e.into()))?;
Ok(Self::new(env, db))
}
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, StateDBError> {
let env = Self::open_env(path)?;
Self::open_with_env(env)
}
pub fn create_with_env(env: Arc<Environment>) -> Result<Self, StateDBError> {
let flags = DatabaseFlags::empty();
let db = RawDB::create(&env, Some("state"), flags)
.map_err(|e| StateDBError::Create(e.into()))?;
Ok(Self::new(env, db))
}
pub fn create<P: AsRef<Path>>(path: P) -> Result<Self, StateDBError> {
let env = Self::open_env(path)?;
Self::create_with_env(env)
}
pub fn begin_ro_txn(&self) -> Result<impl Transaction + '_, db::Error> {
self.env.begin_ro_txn().map_err(db::Error::from)
}
pub fn get(&self, key: impl AsRef<[u8]>) -> Result<Option<ArchivedValue<State>>, db::Error> {
let txn = self.env.begin_ro_txn()?;
self.db.get(&txn, &key.as_ref())
}
pub fn get_all<'txn, T: Transaction>(
&self,
txn: &'txn T,
) -> Result<impl IntoIterator<Item = (&'txn [u8], ArchivedValue<State>)>, db::Error> {
self.db.get_all(txn)
}
pub fn put(&self, key: &impl AsRef<[u8]>, val: &ArchivedValue<State>) -> Result<(), db::Error> {
let mut txn = self.env.begin_rw_txn()?;
let flags = WriteFlags::empty();
self.db.put(&mut txn, key, val, flags)?;
Ok(txn.commit()?)
}
pub fn load_map(&self, map: &std::collections::HashMap<String, State>) -> miette::Result<()> {
use miette::IntoDiagnostic;
let mut txn = self.env.begin_rw_txn().into_diagnostic()?;
let flags = WriteFlags::empty();
for (key, val) in map {
let mut serializer = AllocSerializer::<1024>::default();
serializer.serialize_value(val).into_diagnostic()?;
let serialized = ArchivedValue::new(serializer.into_serializer().into_inner());
self.db.put(&mut txn, &key.as_bytes(), &serialized, flags)?;
}
txn.commit().into_diagnostic()?;
Ok(())
}
pub fn dump_map(&self) -> miette::Result<std::collections::HashMap<String, State>> {
let mut map = std::collections::HashMap::new();
for (key, val) in self.get_all(&self.begin_ro_txn()?)? {
let key_str = core::str::from_utf8(&key).map_err(|_e| miette::Error::msg("state key not UTF8"))?.to_string();
let val_state: State = rkyv::Deserialize::deserialize(val.as_ref(), &mut rkyv::Infallible).unwrap();
map.insert(key_str, val_state);
}
Ok(map)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::ops::Deref;
}

View File

@ -0,0 +1,86 @@
use std::fmt::{Debug, Display, Formatter};
use std::fmt;
use std::ops::Deref;
use rkyv::{out_field, Archive, Deserialize, Serialize};
use serde::de::{Error, MapAccess, Unexpected};
use serde::ser::SerializeMap;
use serde::Deserializer;
use crate::resources::modules::fabaccess::OID_VALUE;
use crate::MachineState;
use crate::utils::oid::ObjectIdentifier;
pub mod db;
pub mod value;
#[derive(Archive, Serialize, Deserialize, Clone, PartialEq, Eq)]
#[archive_attr(derive(Debug))]
pub struct State {
pub inner: MachineState,
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut sf = f.debug_struct("State");
//for Entry { oid, val } in self.inner.iter() {
let k: String = OID_VALUE.deref().into();
sf.field(k.as_ref(), &self.inner);
//}
sf.finish()
}
}
impl fmt::Display for ArchivedState {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.inner, f)
}
}
impl serde::Serialize for State {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut ser = serializer.serialize_map(Some(1))?;
ser.serialize_entry(OID_VALUE.deref(), &self.inner)?;
ser.end()
}
}
impl<'de> serde::Deserialize<'de> for State {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(StateVisitor)
}
}
struct StateVisitor;
impl<'de> serde::de::Visitor<'de> for StateVisitor {
type Value = State;
fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
write!(formatter, "a map from OIDs to value objects")
}
fn visit_map<A: MapAccess<'de>>(self, mut map: A) -> Result<Self::Value, A::Error> {
let oid: ObjectIdentifier = map.next_key()?.ok_or(A::Error::missing_field("oid"))?;
if oid != *OID_VALUE.deref() {
return Err(A::Error::invalid_value(
Unexpected::Other("Unknown OID"),
&"OID of fabaccess state",
));
}
let val: MachineState = map.next_value()?;
Ok(State { inner: val })
}
}
#[cfg(test)]
pub mod tests {
use super::value::*;
use super::*;
}

View File

@ -0,0 +1,798 @@
use std::hash::Hash;
use ptr_meta::{DynMetadata, Pointee};
use rkyv::{
out_field, Archive, ArchivePointee, ArchiveUnsized, Archived, ArchivedMetadata, RelPtr,
Serialize, SerializeUnsized,
};
use rkyv_dyn::{DynError, DynSerializer};
use crate::utils::oid::ObjectIdentifier;
// Not using linkme because dynamically loaded modules
use inventory;
use rkyv::ser::{ScratchSpace, Serializer};
use std::collections::HashMap;
use std::ops::Deref;
use std::sync::atomic::{AtomicUsize, Ordering};
use rkyv::vec::ArchivedVec;
#[repr(transparent)]
struct MetaBox<T: ?Sized>(Box<T>);
impl<T: ?Sized> From<Box<T>> for MetaBox<T> {
fn from(b: Box<T>) -> Self {
Self(b)
}
}
#[repr(transparent)]
struct ArchivedMetaBox<T: ArchivePointee + ?Sized>(RelPtr<T>);
impl<T: ArchivePointee + ?Sized> ArchivedMetaBox<T> {
#[inline]
pub fn get(&self) -> &T {
unsafe { &*self.0.as_ptr() }
}
}
impl<T: ArchivePointee + ?Sized> AsRef<T> for ArchivedMetaBox<T> {
fn as_ref(&self) -> &T {
self.get()
}
}
impl<T: ArchivePointee + ?Sized> Deref for ArchivedMetaBox<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
self.get()
}
}
// State built as
struct NewStateBuilder {
inner: Vec<MetaBox<dyn SerializeStateValue>>,
}
// turns into
struct NewState {
inner: ArchivedVec<ArchivedMetaBox<dyn ArchivedStateValue>>,
}
impl NewState {
pub fn get_value<T: TypeOid>(&self) -> Option<&T> {
/*
let target_oid = T::type_oid();
let values = self.inner.as_slice();
for v in values {
let oid: &Archived<ObjectIdentifier> = &v.metadata().type_oid;
if &target_oid.deref() == &oid.deref() {
let value = unsafe { &*v.as_ptr().cast() };
return Some(value);
}
}
None
*/
unimplemented!()
}
}
// for usage.
// The important part is that both `SerializeValue` and `Value` tell us their OIDs. State will
// usually consist of only a very small number of parts, most of the time just one, so linear
// search will be the best.
// `dyn Value` is Archived using custom sauce Metadata that will store the OID of the state
// value, allowing us to cast the object (reasonably) safely. Thus we can also add a
// method `get_part<T: Value>(&self) -> Option<&T>`
// ArchivedBox is just a RelPtr into the object; so we'd use an `ArchivedValue<NewDesignState>`.
// We can freely modify the memory of the value, so caching vtables is possible & sensible?
// For dumping / loading values using serde we have to be able to serialize a `dyn Value` and to
// deserialize a `dyn SerializeValue`.
// This means, for every type T that's a value we must have:
// - impl SerializeValue for T, which probably implies impl Value for T?
// - impl Value for Archived<T>
// - impl serde::Deserialize for T
// - impl serde::Serialize for Archived<T>
// - impl rkyv::Archive, rkyv::Serialize for T
#[ptr_meta::pointee]
/// Trait that values in the State Builder have to implement
///
/// It requires serde::Deserialize and rkyv::SerializeUnsized to be implemented.
///
/// it is assumed that there is a 1:1 mapping between a SerializeStateValue and a StateValue
/// implementation. Every `T` implementing the former has exactly *one* `Archived<T>` implementing
/// the latter.
///
/// The archived version of any implementation must implement [ArchivedStateValue](trait@ArchivedStateValue).
pub trait SerializeStateValue: SerializeDynOid {}
#[ptr_meta::pointee]
/// Trait that (rkyv'ed) values in the State Object have to implement.
///
/// It requires serde::Serialize to be implemented.
///
/// It must be Sync since the State is sent as a signal to all connected actors by reference.
/// It must be Send since the DB thread and the signal thread may be different.
pub trait ArchivedStateValue: Send + Sync {}
/// Serializing a trait object by storing an OID alongside
///
/// This trait is a dependency for [SerializeStateValue](trait@SerializeStateValue). It is by
/// default implemented for all `T where T: for<'a> Serialize<dyn DynSerializer + 'a>, T::Archived: TypeOid`.
pub trait SerializeDynOid {
/// Return the OID associated with the **Archived** type, i.e. `Archived<Self>`.
///
/// This OID will be serialized alongside the trait object and is used to retrieve the
/// correct vtable when loading the state from DB.
fn archived_type_oid(&self) -> &'static ObjectIdentifier;
/// Serialize this type into a [`DynSerializer`](trait@DynSerializer)
fn serialize_dynoid(&self, serializer: &mut dyn DynSerializer) -> Result<usize, DynError>;
}
/// Types with an associated OID
///
/// This trait is required by the default implementation of [SerializeDynOid](trait@SerializeDynOid),
/// providing the OID that is serialized alongside the state object to be able to correctly cast
/// it when accessing state from the DB.
pub trait TypeOid {
fn type_oid() -> &'static ObjectIdentifier;
fn type_name() -> &'static str;
}
impl<T> SerializeDynOid for T
where
T: for<'a> Serialize<dyn DynSerializer + 'a>,
T::Archived: TypeOid,
{
fn archived_type_oid(&self) -> &'static ObjectIdentifier {
Archived::<T>::type_oid()
}
fn serialize_dynoid(&self, serializer: &mut dyn DynSerializer) -> Result<usize, DynError> {
serializer.serialize_value(self)
}
}
impl ArchivePointee for dyn ArchivedStateValue {
type ArchivedMetadata = ArchivedStateValueMetadata;
fn pointer_metadata(archived: &Self::ArchivedMetadata) -> <Self as Pointee>::Metadata {
archived.pointer_metadata()
}
}
impl ArchiveUnsized for dyn SerializeStateValue {
type Archived = dyn ArchivedStateValue;
type MetadataResolver = <ObjectIdentifier as Archive>::Resolver;
unsafe fn resolve_metadata(
&self,
pos: usize,
resolver: Self::MetadataResolver,
out: *mut ArchivedMetadata<Self>, // => ArchivedStateValueMetadata
) {
let (oid_pos, oid) = out_field!(out.type_oid);
let type_oid = self.archived_type_oid();
type_oid.resolve(pos + oid_pos, resolver, oid);
let (_vtable_cache_pos, vtable_cache) = out_field!(out.vtable_cache);
*vtable_cache = AtomicUsize::default();
}
}
impl<S: ScratchSpace + Serializer + ?Sized> SerializeUnsized<S> for dyn SerializeStateValue {
fn serialize_unsized(&self, mut serializer: &mut S) -> Result<usize, S::Error> {
self.serialize_dynoid(&mut serializer)
.map_err(|e| *e.downcast::<S::Error>().unwrap())
}
fn serialize_metadata(&self, serializer: &mut S) -> Result<Self::MetadataResolver, S::Error> {
let oid = self.archived_type_oid();
oid.serialize(serializer)
}
}
#[derive(Debug)]
pub struct ArchivedStateValueMetadata {
pub type_oid: Archived<ObjectIdentifier>,
vtable_cache: AtomicUsize,
}
impl ArchivedStateValueMetadata {
// TODO: `usize as *const VTable` is not sane.
pub fn vtable(&self) -> usize {
let val = self.vtable_cache.load(Ordering::Relaxed);
if val != 0 {
return val;
}
let val = IMPL_REGISTRY
.get(ImplId::from_type_oid(&self.type_oid))
.expect(&format!("Unregistered type oid {:?}", self.type_oid))
.vtable;
self.vtable_cache.store(val, Ordering::Relaxed);
return val;
}
pub fn pointer_metadata(&self) -> DynMetadata<dyn ArchivedStateValue> {
unsafe { core::mem::transmute(self.vtable()) }
}
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
/// OID of an [ArchivedStateValue](trait@ArchivedStateValue) implementation.
///
/// Used by the global type registry of all implementations to look up the vtables of state values
/// when accessing it from DB and when (de-)serializing it using serde.
struct ImplId<'a> {
type_oid: &'a [u8],
}
impl<'a> ImplId<'a> {
fn from_type_oid(type_oid: &'a [u8]) -> Self {
Self { type_oid }
}
}
impl ImplId<'static> {
fn new<T: TypeOid>() -> Self {
Self {
type_oid: &T::type_oid(),
}
}
}
#[derive(Copy, Clone, Debug)]
struct ImplData<'a> {
pub vtable: usize,
pub name: &'a str,
pub info: ImplDebugInfo,
}
#[derive(Copy, Clone, Debug)]
pub struct ImplDebugInfo {
pub file: &'static str,
pub line: u32,
pub column: u32,
}
#[derive(Debug)]
/// State Value Implementation Entry
///
/// To register a state implementation you must call [inventory::collect](macro@inventory::collect)
/// macro for an Entry constructed for your type on top level. Your type will have to have
/// implementations of [TypeOid](trait@TypeOid) and [RegisteredImpl](trait@RegisteredImpl)
/// Alternatively you can use the
/// [statevalue_register](macro@crate::statevalue_register) macro with your OID as first and type
/// as second parameter like so:
///
pub struct ImplEntry<'a> {
id: ImplId<'a>,
data: ImplData<'a>,
}
inventory::collect!(ImplEntry<'static>);
impl ImplEntry<'_> {
pub fn new<T: TypeOid + RegisteredImpl>() -> Self {
Self {
id: ImplId::new::<T>(),
data: ImplData {
vtable: <T as RegisteredImpl>::vtable(),
name: <T as TypeOid>::type_name(),
info: <T as RegisteredImpl>::debug_info(),
},
}
}
}
#[derive(Debug)]
struct ImplRegistry {
oid_to_data: HashMap<ImplId<'static>, ImplData<'static>>,
}
impl ImplRegistry {
fn new() -> Self {
Self {
oid_to_data: HashMap::new(),
}
}
fn add_entry(&mut self, entry: &'static ImplEntry) {
let old_val = self.oid_to_data.insert(entry.id, entry.data);
if let Some(old) = old_val {
eprintln!("Value impl oid conflict for {:?}", entry.id.type_oid);
eprintln!(
"Existing impl registered at {}:{}:{}",
old.info.file, old.info.line, old.info.column
);
eprintln!(
"New impl registered at {}:{}:{}",
entry.data.info.file, entry.data.info.line, entry.data.info.column
);
}
}
fn get(&self, type_oid: ImplId) -> Option<ImplData> {
self.oid_to_data.get(&type_oid).map(|d| *d)
}
}
lazy_static::lazy_static! {
// FIXME: Dynamic modules *will* break this.
static ref IMPL_REGISTRY: ImplRegistry = {
let mut reg = ImplRegistry::new();
for entry in inventory::iter::<ImplEntry> {
reg.add_entry(entry);
}
reg
};
}
pub unsafe trait RegisteredImpl {
fn vtable() -> usize;
fn debug_info() -> ImplDebugInfo;
}
#[doc(hidden)]
#[macro_use]
pub mod macros {
#[macro_export]
macro_rules! debug_info {
() => {
$crate::resources::state::value::ImplDebugInfo {
file: ::core::file!(),
line: ::core::line!(),
column: ::core::column!(),
}
};
}
#[macro_export]
macro_rules! statevalue_typeoid {
( $x:ident, $y:ty, $z:ty ) => {
impl $crate::resources::state::value::TypeOid for $z {
fn type_oid() -> &'static $crate::utils::oid::ObjectIdentifier {
&$x
}
fn type_name() -> &'static str {
stringify!($y)
}
}
};
}
#[macro_export]
macro_rules! statevalue_registeredimpl {
( $z:ty ) => {
unsafe impl $crate::resources::state::value::RegisteredImpl for $z {
fn vtable() -> usize {
unsafe {
::core::mem::transmute(ptr_meta::metadata(::core::ptr::null::<$z>()
as *const dyn $crate::resources::state::value::ArchivedStateValue))
}
}
fn debug_info() -> $crate::resources::state::value::ImplDebugInfo {
$crate::debug_info!()
}
}
};
}
#[macro_export]
macro_rules! statevalue_register {
( $x:ident, $y:ty ) => {
$crate::oidvalue! {$x, $y, $y}
};
( $x:ident, $y:ty, $z:ty ) => {
$crate::statevalue_typeoid! { $x, $y, $z }
$crate::statevalue_registeredimpl! { $z }
::inventory::submit! {$crate::resources::state::value::ImplEntry::new::<$z>()}
};
}
}
/*
/// Adding a custom type to BFFH state management:
///
/// 1. Implement `serde`'s [`Serialize`](serde::Serialize) and [`Deserialize`](serde::Deserialize)
/// - `derive()`d instances work just fine, but keep stability over releases in mind.
/// 2. Implement rkyv's [`Serialize`](rkyv::Serialize).
/// 3. Implement TypeOid on your Archived type (i.e. `<T as Archive>::Archived`)
/// 4. Implement this
pub trait Value: Any + fmt::Debug + erased_serde::Serialize + Sync {
/// Initialize `&mut self` from `deserializer`
///
/// At the point this is called &mut self is of undefined value but guaranteed to be well
/// aligned and non-null. Any read access into &mut self before all of &self is brought into
/// a valid state is however undefined behaviour.
/// To this end you *must* initialize `self` **completely**. Serde will do the right thing if
/// you directly deserialize the type you're implementing `Value` for, but for manual
/// implementations this is important to keep in mind.
fn deserialize_init<'de>(
&mut self,
deserializer: &mut dyn erased_serde::Deserializer<'de>,
) -> Result<(), erased_serde::Error>;
/// Implement `PartialEq` dynamically.
///
/// This should return `true` iff the Value is of the same type and `self` == `other` for
/// non-dynamic types would return `true`.
/// It is safe to always return `false`.
fn dyn_eq(&self, other: &dyn Value) -> bool;
fn as_value(&self) -> &dyn Value;
fn as_any(&self) -> &dyn Any;
}
erased_serde::serialize_trait_object!(Value);
erased_serde::serialize_trait_object!(SerializeValue);
erased_serde::serialize_trait_object!(DeserializeValue);
impl<T> Value for T
where
T: Any
+ fmt::Debug
+ PartialEq
+ Sync
+ erased_serde::Serialize
+ for<'de> serde::Deserialize<'de>,
{
fn deserialize_init<'de>(
&mut self,
deserializer: &mut dyn erased_serde::Deserializer<'de>,
) -> Result<(), erased_serde::Error> {
*self = erased_serde::deserialize(deserializer)?;
Ok(())
}
fn dyn_eq(&self, other: &dyn Value) -> bool {
other
.as_any()
.downcast_ref()
.map_or(false, |other: &T| other == self)
}
fn as_value(&self) -> &dyn Value {
self
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl PartialEq for dyn Value {
fn eq(&self, other: &Self) -> bool {
self.dyn_eq(other)
}
}
#[repr(transparent)]
pub(super) struct DynVal<'a>(pub &'a dyn SerializeValue);
impl<'a> serde::Serialize for DynVal<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut ser = serializer.serialize_map(Some(1))?;
let oid = self.0.archived_type_oid();
ser.serialize_entry(oid, self.0)?;
ser.end()
}
}
#[repr(transparent)]
pub(super) struct DynOwnedVal(pub Box<dyn SerializeValue>);
impl<'de> serde::Deserialize<'de> for DynOwnedVal {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_map(DynValVisitor)
}
}
struct DynValVisitor;
impl<'de> serde::de::Visitor<'de> for DynValVisitor {
type Value = DynOwnedVal;
fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
write!(formatter, "an one entry map from OID to some value object")
}
fn visit_map<A: serde::de::MapAccess<'de>>(self, mut map: A) -> Result<Self::Value, A::Error> {
// Bad magic code. Problem we have to solve: We only know how to parse whatever comes
// after the OID after having looked at the OID. We have zero static type info available
// during deserialization. So:
// Get OID first. That's easy, we know it's the key, we know how to read it.
let oid: ObjectIdentifier = map.next_key()?.ok_or(A::Error::missing_field("oid"))?;
// Get the Value vtable for that OID. Or fail because we don't know that OID, either works.
let valimpl = IMPL_REGISTRY.get(ImplId::from_type_oid(&oid)).ok_or(
serde::de::Error::invalid_value(
serde::de::Unexpected::Other("unknown oid"),
&"oid an implementation was registered for",
),
)?;
// Casting random usize you find on the side of the road as vtable on unchecked pointers.
// What could possibly go wrong? >:D
let valbox: MaybeUninit<Box<dyn SerializeValue>> = unsafe {
// "recreate" vtable as fat ptr metadata (we literally just cast an `usize` but the
// only way to put this usize into that spot is by having a valid vtable cast so it's
// probably almost safe)
let meta = valimpl.pointer_metadata();
// Don't bother checking here. The only way this could be bad is if the vtable above
// is bad an in that case a segfault here would be *much better* than whatever is
// going to happen afterwards.
let layout = Layout::from_size_align_unchecked(meta.size_of(), meta.align_of());
// Hello yes I would like a Box the old fashioned way.
// Oh you're asking why we're allocating stuff here and never ever bother zeroing or
// validate in any other way if this is sane?
// Well...
let ptr: *mut () = std::alloc::alloc(layout).cast::<()>();
let b = Box::from_raw(ptr_meta::from_raw_parts_mut(ptr, meta));
// We make this a MaybeUninit so `Drop` is never called on the uninitialized value
MaybeUninit::new(b)
};
// ... The only way we can make Value a trait object by having it deserialize *into
// it's own uninitialized representation*. Yeah don't worry, this isn't the worst part of
// the game yet. >:D
let seed = InitIntoSelf(valbox);
let val = map.next_value_seed(seed)?;
Ok(DynOwnedVal(val))
}
}
struct InitIntoSelf(MaybeUninit<Box<dyn SerializeValue>>);
impl<'de> serde::de::DeserializeSeed<'de> for InitIntoSelf {
type Value = Box<dyn SerializeValue>;
fn deserialize<D>(mut self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: serde::Deserializer<'de>,
{
let mut deser = <dyn erased_serde::Deserializer>::erase(deserializer);
// Unsafe as hell but if we never read from this reference before initializing it's not
// undefined behaviour.
let selfptr = unsafe { &mut *self.0.as_mut_ptr() };
// Hey, better initialize late than never.
selfptr
.deserialize_init(&mut deser)
.map_err(|e| D::Error::custom(e))?;
// Assuming `deserialize_init` didn't error and did its job this is now safe.
unsafe { Ok(self.0.assume_init()) }
}
}
pub trait TypeOid {
fn type_oid() -> &'static ObjectIdentifier;
fn type_name() -> &'static str;
}
impl<S: ScratchSpace + Serializer + ?Sized> SerializeUnsized<S> for dyn SerializeValue {
fn serialize_unsized(&self, mut serializer: &mut S) -> Result<usize, S::Error> {
self.serialize_dynoid(&mut serializer)
.map_err(|e| *e.downcast::<S::Error>().unwrap())
}
fn serialize_metadata(&self, serializer: &mut S) -> Result<Self::MetadataResolver, S::Error> {
let oid = self.archived_type_oid();
oid.serialize(serializer)
}
}
impl<T> SerializeDynOid for T
where
T: for<'a> Serialize<dyn DynSerializer + 'a>,
T::Archived: TypeOid,
{
fn serialize_dynoid(&self, serializer: &mut dyn DynSerializer) -> Result<usize, DynError> {
serializer.serialize_value(self)
}
fn archived_type_oid(&self) -> &'static ObjectIdentifier {
Archived::<T>::type_oid()
}
}
pub trait DeserializeDynOid {
unsafe fn deserialize_dynoid(
&self,
deserializer: &mut dyn DynDeserializer,
alloc: &mut dyn FnMut(Layout) -> *mut u8,
) -> Result<*mut (), DynError>;
fn deserialize_dynoid_metadata(
&self,
deserializer: &mut dyn DynDeserializer,
) -> Result<<dyn SerializeValue as Pointee>::Metadata, DynError>;
}
#[ptr_meta::pointee]
pub trait SerializeValue: Value + SerializeDynOid {
}
impl<T: Archive + Value + SerializeDynOid + Clone> SerializeValue for T
where
T::Archived: RegisteredImpl,
{
}
impl PartialEq for dyn SerializeValue {
fn eq(&self, other: &Self) -> bool {
self.dyn_eq(other.as_value())
}
}
impl Clone for Box<dyn SerializeValue> {
fn clone(&self) -> Self {
self.dyn_clone()
}
}
#[ptr_meta::pointee]
pub trait DeserializeValue: DeserializeDynOid {}
impl<T: DeserializeDynOid> DeserializeValue for T {}
impl ArchivePointee for dyn DeserializeValue {
type ArchivedMetadata = ArchivedValueMetadata;
fn pointer_metadata(archived: &Self::ArchivedMetadata) -> <Self as Pointee>::Metadata {
archived.pointer_metadata()
}
}
impl<D: Fallible + ?Sized> DeserializeUnsized<dyn SerializeValue, D> for dyn DeserializeValue {
unsafe fn deserialize_unsized(
&self,
mut deserializer: &mut D,
mut alloc: impl FnMut(Layout) -> *mut u8,
) -> Result<*mut (), D::Error> {
self.deserialize_dynoid(&mut deserializer, &mut alloc)
.map_err(|e| *e.downcast().unwrap())
}
fn deserialize_metadata(
&self,
mut deserializer: &mut D,
) -> Result<<dyn SerializeValue as Pointee>::Metadata, D::Error> {
self.deserialize_dynoid_metadata(&mut deserializer)
.map_err(|e| *e.downcast().unwrap())
}
}
impl ArchiveUnsized for dyn SerializeValue {
type Archived = dyn DeserializeValue;
type MetadataResolver = <ObjectIdentifier as Archive>::Resolver;
unsafe fn resolve_metadata(
&self,
pos: usize,
resolver: Self::MetadataResolver,
out: *mut ArchivedMetadata<Self>,
) {
let (oid_pos, oid) = out_field!(out.type_oid);
let type_oid = self.archived_type_oid();
type_oid.resolve(pos + oid_pos, resolver, oid);
}
}
lazy_static::lazy_static! {
pub static ref OID_BOOL: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.1").unwrap()
};
pub static ref OID_U8: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.2").unwrap()
};
pub static ref OID_U16: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.3").unwrap()
};
pub static ref OID_U32: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.4").unwrap()
};
pub static ref OID_U64: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.5").unwrap()
};
pub static ref OID_U128: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.6").unwrap()
};
pub static ref OID_I8: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.7").unwrap()
};
pub static ref OID_I16: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.8").unwrap()
};
pub static ref OID_I32: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.9").unwrap()
};
pub static ref OID_I64: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.10").unwrap()
};
pub static ref OID_I128: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.11").unwrap()
};
pub static ref OID_VEC3U8: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.1.13").unwrap()
};
pub static ref OID_POWERED: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.2.1").unwrap()
};
pub static ref OID_INTENSITY: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.2.2").unwrap()
};
pub static ref OID_COLOUR: ObjectIdentifier = {
ObjectIdentifier::from_str("1.3.6.1.4.1.48398.612.2.3").unwrap()
};
}
oidvalue!(OID_BOOL, bool);
oidvalue!(OID_U8, u8);
oidvalue!(OID_U16, u16);
oidvalue!(OID_U32, u32);
oidvalue!(OID_U64, u64);
oidvalue!(OID_U128, u128);
oidvalue!(OID_I8, i8);
oidvalue!(OID_I16, i16);
oidvalue!(OID_I32, i32);
oidvalue!(OID_I64, i64);
oidvalue!(OID_I128, i128);
#[derive(
serde::Serialize,
serde::Deserialize,
Debug,
Copy,
Clone,
PartialEq,
Eq,
Hash,
rkyv::Archive,
rkyv::Serialize,
rkyv::Deserialize,
)]
#[archive_attr(derive(Debug, PartialEq, serde::Serialize, serde::Deserialize))]
pub struct Vec3u8 {
pub a: u8,
pub b: u8,
pub c: u8,
}
oidvalue!(OID_VEC3U8, Vec3u8, ArchivedVec3u8);
#[cfg(test)]
mod tests {
use super::*;
use rand::distributions::Standard;
use rand::prelude::Distribution;
use rand::Rng;
impl Distribution<Vec3u8> for Standard {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Vec3u8 {
let a = self.sample(rng);
let b = self.sample(rng);
let c = self.sample(rng);
Vec3u8 { a, b, c }
}
}
}
*/

1
bffhd/sensors/mod.rs Normal file
View File

@ -0,0 +1 @@

105
bffhd/session/mod.rs Normal file
View File

@ -0,0 +1,105 @@
use crate::authorization::permissions::Permission;
use crate::authorization::roles::Roles;
use crate::resources::Resource;
use crate::users::db::User;
use crate::users::{db, UserRef};
use crate::Users;
use tracing::Span;
#[derive(Clone)]
pub struct SessionManager {
users: Users,
roles: Roles,
// cache: SessionCache // todo
}
impl SessionManager {
pub fn new(users: Users, roles: Roles) -> Self {
Self { users, roles }
}
pub fn try_open(&self, parent: &Span, uid: impl AsRef<str>) -> Option<SessionHandle> {
self.users
.get_user(uid.as_ref())
.map(|user| self.open(parent, user))
}
// TODO: make infallible
pub fn open(&self, parent: &Span, user: User) -> SessionHandle {
let uid = user.id.as_str();
let span = tracing::info_span!(
target: "bffh::api",
parent: parent,
"session",
uid,
);
tracing::trace!(parent: &span, uid, ?user, "opening session");
SessionHandle {
span,
users: self.users.clone(),
roles: self.roles.clone(),
user: UserRef::new(user.id),
}
}
}
#[derive(Clone)]
pub struct SessionHandle {
pub span: Span,
pub users: Users,
pub roles: Roles,
user: UserRef,
}
impl SessionHandle {
pub fn get_user_ref(&self) -> UserRef {
self.user.clone()
}
pub fn get_user(&self) -> db::User {
self.users
.get_user(self.user.get_username())
.expect("Failed to get user self")
}
pub fn has_disclose(&self, resource: &Resource) -> bool {
if let Some(user) = self.users.get_user(self.user.get_username()) {
self.roles
.is_permitted(&user.userdata, &resource.get_required_privs().disclose)
} else {
false
}
}
pub fn has_read(&self, resource: &Resource) -> bool {
if let Some(user) = self.users.get_user(self.user.get_username()) {
self.roles
.is_permitted(&user.userdata, &resource.get_required_privs().read)
} else {
false
}
}
pub fn has_write(&self, resource: &Resource) -> bool {
if let Some(user) = self.users.get_user(self.user.get_username()) {
self.roles
.is_permitted(&user.userdata, &resource.get_required_privs().write)
} else {
false
}
}
pub fn has_manage(&self, resource: &Resource) -> bool {
if let Some(user) = self.users.get_user(self.user.get_username()) {
self.roles
.is_permitted(&user.userdata, &resource.get_required_privs().manage)
} else {
false
}
}
pub fn has_perm(&self, perm: impl AsRef<Permission>) -> bool {
if let Some(user) = self.users.get_user(self.user.get_username()) {
self.roles.is_permitted(&user.userdata, perm)
} else {
false
}
}
}

161
bffhd/tls.rs Normal file
View File

@ -0,0 +1,161 @@
use std::fs::File;
use std::io;
use std::io::BufReader;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use crate::capnp::TlsListen;
use futures_rustls::TlsAcceptor;
use miette::Diagnostic;
use rustls::version::{TLS12, TLS13};
use rustls::{Certificate, PrivateKey, ServerConfig, SupportedCipherSuite};
use thiserror::Error;
use tracing::Level;
use crate::keylog::KeyLogFile;
use crate::tls::Error::KeyLogOpen;
fn lookup_cipher_suite(name: &str) -> Option<SupportedCipherSuite> {
match name {
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" => {
Some(rustls::cipher_suite::TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)
}
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" => {
Some(rustls::cipher_suite::TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384)
}
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" => {
Some(rustls::cipher_suite::TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256)
}
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" => {
Some(rustls::cipher_suite::TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256)
}
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" => {
Some(rustls::cipher_suite::TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384)
}
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" => {
Some(rustls::cipher_suite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256)
}
"TLS13_AES_128_GCM_SHA256" => Some(rustls::cipher_suite::TLS13_AES_128_GCM_SHA256),
"TLS13_AES_256_GCM_SHA384" => Some(rustls::cipher_suite::TLS13_AES_256_GCM_SHA384),
"TLS13_CHACHA20_POLY1305_SHA256" => {
Some(rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256)
}
_ => None,
}
}
#[derive(Debug, Clone)]
pub struct TlsConfig {
keylog: Option<Arc<KeyLogFile>>,
}
#[derive(Debug, Error, Diagnostic)]
pub enum Error {
#[error("failed to open certificate file at path {0}")]
OpenCertFile(PathBuf, #[source] io::Error),
#[error("failed to open private key file at path {0}")]
OpenKeyFile(PathBuf, #[source] io::Error),
#[error("failed to read system certs")]
SystemCertsFile(#[source] io::Error),
#[error("failed to read from key file")]
ReadKeyFile(#[source] io::Error),
#[error("private key file must contain a single PEM-encoded private key")]
KeyFileFormat,
#[error("invalid TLS version {0}")]
TlsVersion(String),
#[error("Initializing TLS context failed")]
Builder(
#[from]
#[source]
rustls::Error,
),
#[error("failed to initialize key log")]
KeyLogOpen(#[source] io::Error),
}
impl TlsConfig {
pub fn new(keylogfile: Option<impl AsRef<Path>>, warn: bool) -> Result<Self, Error> {
let span = tracing::span!(Level::INFO, "tls");
let _guard = span.enter();
if warn {
Self::warn_logging_secrets(keylogfile.as_ref());
}
if let Some(path) = keylogfile {
let keylog = Some(
KeyLogFile::new(path)
.map(|ok| Arc::new(ok))
.map_err(KeyLogOpen)?,
);
Ok(Self { keylog })
} else {
Ok(Self { keylog: None })
}
}
fn warn_logging_secrets(path: Option<impl AsRef<Path>>) {
if let Some(path) = path {
let path = path.as_ref().display();
tracing::warn!(keylog = true, path = %path,
"TLS secret logging is ENABLED! TLS secrets and keys will be written to {}",
path);
} else {
tracing::debug!(keylog = false, "TLS secret logging is disabled.");
}
}
pub fn make_tls_acceptor(&self, config: &TlsListen) -> Result<TlsAcceptor, Error> {
let span = tracing::debug_span!("tls");
let _guard = span.enter();
let path = config.certfile.as_path();
tracing::debug!(path = %path.display(), "reading certificates");
let mut certfp =
BufReader::new(File::open(path).map_err(|e| Error::OpenCertFile(path.into(), e))?);
let certs = rustls_pemfile::certs(&mut certfp)
.map_err(Error::SystemCertsFile)?
.into_iter()
.map(Certificate)
.collect();
let path = config.keyfile.as_path();
tracing::debug!(path = %path.display(), "reading private key");
let mut keyfp =
BufReader::new(File::open(path).map_err(|err| Error::OpenKeyFile(path.into(), err))?);
let key = match rustls_pemfile::read_one(&mut keyfp).map_err(Error::ReadKeyFile)? {
Some(rustls_pemfile::Item::PKCS8Key(key) | rustls_pemfile::Item::RSAKey(key)) => {
PrivateKey(key)
}
_ => {
tracing::error!("private key file invalid");
return Err(Error::KeyFileFormat);
}
};
let tls_builder = ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups();
let tls_builder = if let Some(ref min) = config.tls_min_version {
let v = min.to_lowercase();
match v.as_str() {
"tls12" => tls_builder.with_protocol_versions(&[&TLS12]),
"tls13" => tls_builder.with_protocol_versions(&[&TLS13]),
_ => return Err(Error::TlsVersion(v)),
}
} else {
tls_builder.with_safe_default_protocol_versions()
}?;
let mut tls_config = tls_builder
.with_no_client_auth()
.with_single_cert(certs, key)?;
if let Some(keylog) = &self.keylog {
tls_config.key_log = keylog.clone();
}
Ok(Arc::new(tls_config).into())
}
}

202
bffhd/users/db.rs Normal file
View File

@ -0,0 +1,202 @@
use lmdb::{DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags};
use rkyv::Infallible;
use std::collections::HashMap;
use std::sync::Arc;
use crate::db;
use crate::db::{AlignedAdapter, ArchivedValue, RawDB, DB};
use rkyv::ser::serializers::AllocSerializer;
use rkyv::ser::Serializer;
use rkyv::Deserialize;
pub use crate::db::Error;
#[derive(
Clone,
PartialEq,
Eq,
Debug,
rkyv::Archive,
rkyv::Serialize,
rkyv::Deserialize,
serde::Serialize,
serde::Deserialize,
)]
pub struct User {
pub id: String,
pub userdata: UserData,
}
fn hash_pw(pw: &[u8]) -> argon2::Result<String> {
let config = argon2::Config::default();
let salt: [u8; 16] = rand::random();
argon2::hash_encoded(pw, &salt, &config)
}
impl User {
pub fn check_password(&self, pwd: &[u8]) -> Result<bool, argon2::Error> {
if let Some(ref encoded) = self.userdata.passwd {
argon2::verify_encoded(encoded, pwd)
} else {
Ok(false)
}
}
pub fn new_with_plain_pw(username: &str, password: impl AsRef<[u8]>) -> Self {
let hash = hash_pw(password.as_ref())
.expect(&format!("Failed to hash password for {}: ", username));
tracing::debug!("Hashed pw for {} to {}", username, hash);
User {
id: username.to_string(),
userdata: UserData {
passwd: Some(hash),
..Default::default()
},
}
}
pub fn set_pw(&mut self, password: impl AsRef<[u8]>) {
self.userdata.passwd = Some(hash_pw(password.as_ref()).expect(&format!(
"failed to update hashed password for {}",
&self.id
)));
}
}
#[derive(
Clone,
PartialEq,
Eq,
Debug,
Default,
rkyv::Archive,
rkyv::Serialize,
rkyv::Deserialize,
serde::Serialize,
serde::Deserialize,
)]
/// Data on an user to base decisions on
///
/// This of course includes authorization data, i.e. that users set roles
pub struct UserData {
/// A Person has N ≥ 0 roles.
/// Persons are only ever given roles, not permissions directly
pub roles: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default)]
pub passwd: Option<String>,
/// Additional data storage
#[serde(flatten, skip_serializing_if = "HashMap::is_empty")]
pub kv: HashMap<String, String>,
}
impl UserData {
pub fn new(roles: Vec<String>) -> Self {
Self {
roles,
kv: HashMap::new(),
passwd: None,
}
}
pub fn new_with_kv(roles: Vec<String>, kv: HashMap<String, String>) -> Self {
Self {
roles,
kv,
passwd: None,
}
}
}
#[derive(Clone, Debug)]
pub struct UserDB {
env: Arc<Environment>,
db: DB<AlignedAdapter<User>>,
}
impl UserDB {
// TODO: Make an userdb-specific Transaction newtype to make this safe
pub unsafe fn get_rw_txn(&self) -> Result<RwTransaction, db::Error> {
// The returned transaction is only valid for *this* environment.
Ok(self.env.begin_rw_txn()?)
}
pub unsafe fn new(env: Arc<Environment>, db: RawDB) -> Self {
let db = DB::new(db);
Self { env, db }
}
pub unsafe fn open(env: Arc<Environment>) -> Result<Self, db::Error> {
let db = RawDB::open(&env, Some("user"))?;
Ok(Self::new(env, db))
}
pub unsafe fn create(env: Arc<Environment>) -> Result<Self, db::Error> {
let flags = DatabaseFlags::empty();
let db = RawDB::create(&env, Some("user"), flags)?;
Ok(Self::new(env, db))
}
pub fn get(&self, uid: &str) -> Result<Option<ArchivedValue<User>>, db::Error> {
let txn = self.env.begin_ro_txn()?;
self.db.get(&txn, &uid.as_bytes())
}
pub fn put(&self, uid: &str, user: &User) -> Result<(), db::Error> {
let mut serializer = AllocSerializer::<1024>::default();
serializer.serialize_value(user).expect("rkyv error");
let v = serializer.into_serializer().into_inner();
let value = ArchivedValue::new(v);
let mut txn = self.env.begin_rw_txn()?;
let flags = WriteFlags::empty();
self.db.put(&mut txn, &uid.as_bytes(), &value, flags)?;
txn.commit()?;
Ok(())
}
pub fn put_txn(
&self,
txn: &mut RwTransaction,
uid: &str,
user: &User,
) -> Result<(), db::Error> {
let mut serializer = AllocSerializer::<1024>::default();
serializer.serialize_value(user).expect("rkyv error");
let v = serializer.into_serializer().into_inner();
let value = ArchivedValue::new(v);
let flags = WriteFlags::empty();
self.db.put(txn, &uid.as_bytes(), &value, flags)?;
Ok(())
}
pub fn delete(&self, uid: &str) -> Result<(), db::Error> {
let mut txn = self.env.begin_rw_txn()?;
self.db.del(&mut txn, &uid)?;
txn.commit()?;
Ok(())
}
pub fn clear_txn(&self, txn: &mut RwTransaction) -> Result<(), db::Error> {
// TODO: why was the result ignored here?
self.db.clear(txn)
}
pub fn get_all(&self) -> Result<HashMap<String, UserData>, db::Error> {
let txn = self.env.begin_ro_txn()?;
let iter = self.db.get_all(&txn)?;
let mut out = HashMap::new();
for (uid, user) in iter {
let uid = unsafe { std::str::from_utf8_unchecked(uid).to_string() };
let user: User =
Deserialize::<User, _>::deserialize(user.as_ref(), &mut Infallible).unwrap();
out.insert(uid, user.userdata);
}
Ok(out)
}
}

235
bffhd/users/mod.rs Normal file
View File

@ -0,0 +1,235 @@
use std::fs;
use lmdb::{Environment, Transaction};
use once_cell::sync::OnceCell;
use rkyv::{Archive, Deserialize, Infallible, Serialize};
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use std::io::Write;
use miette::{Diagnostic, IntoDiagnostic, SourceSpan};
use std::path::Path;
use std::sync::Arc;
use thiserror::Error;
pub mod db;
use crate::users::db::UserData;
use crate::UserDB;
#[derive(
Clone,
PartialEq,
Eq,
Debug,
Archive,
Serialize,
Deserialize,
serde::Serialize,
serde::Deserialize,
)]
#[archive_attr(derive(Debug, PartialEq))]
pub struct UserRef {
pub id: String,
}
impl PartialEq<ArchivedUserRef> for UserRef {
fn eq(&self, other: &ArchivedUserRef) -> bool {
self.id == other.id
}
}
impl PartialEq<UserRef> for ArchivedUserRef {
fn eq(&self, other: &UserRef) -> bool {
self.id == other.id
}
}
impl Display for ArchivedUserRef {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.id.as_str())
}
}
impl UserRef {
pub fn new(id: String) -> Self {
UserRef { id }
}
pub fn get_username(&self) -> &str {
self.id.as_str()
}
}
static USERDB: OnceCell<UserDB> = OnceCell::new();
#[derive(Copy, Clone, Debug)]
#[repr(transparent)]
pub struct Users {
userdb: &'static UserDB,
}
#[derive(Clone, Debug, PartialEq, Eq, Error, Diagnostic)]
#[error(transparent)]
#[repr(transparent)]
pub struct Error(#[from] pub db::Error);
impl Users {
pub fn new(env: Arc<Environment>) -> Result<Self, Error> {
let span = tracing::debug_span!("users", ?env, "Creating Users handle");
let _guard = span.enter();
let userdb = USERDB.get_or_try_init(|| {
tracing::debug!("Global resource not yet initialized, initializing…");
unsafe { UserDB::create(env) }
})?;
Ok(Self { userdb })
}
pub(crate) fn into_inner(self) -> &'static UserDB {
self.userdb
}
pub fn get_user(&self, uid: &str) -> Option<db::User> {
tracing::trace!(uid, "Looking up user");
self.userdb.get(uid).unwrap().map(|user| {
Deserialize::<db::User, _>::deserialize(user.as_ref(), &mut Infallible).unwrap()
})
}
pub fn put_user(&self, uid: &str, user: &db::User) -> Result<(), crate::db::Error> {
tracing::trace!(uid, ?user, "Updating user");
self.userdb.put(uid, user)
}
pub fn del_user(&self, uid: &str) -> Result<(), crate::db::Error> {
tracing::trace!(uid, "Deleting user");
self.userdb.delete(uid)
}
pub fn load_file(&self, path_str: &str) -> miette::Result<()> {
let path: &Path = Path::new(path_str);
if path.is_dir() {
#[derive(Debug, Error, Diagnostic)]
#[error("load takes a file, not a directory")]
#[diagnostic(
code(load::file),
url("https://gitlab.com/fabinfra/fabaccess/bffh/-/issues/55")
)]
struct LoadIsDirError {
#[source_code]
src: String,
#[label("path provided")]
dir_path: SourceSpan,
#[help]
help: String,
}
Err(LoadIsDirError {
src: format!("--load {}", path_str),
dir_path: (7, path_str.as_bytes().len()).into(),
help: format!(
"Provide a path to a file instead, e.g. {}/users.toml",
path_str
),
})?;
return Ok(());
}
let f = std::fs::read(path).into_diagnostic()?;
let map: HashMap<String, UserData> = toml::from_slice(&f).into_diagnostic()?;
let mut txn = unsafe { self.userdb.get_rw_txn()? };
self.userdb.clear_txn(&mut txn)?;
for (uid, mut userdata) in map {
userdata.passwd = userdata.passwd.map(|pw| {
if !pw.starts_with("$argon2") {
let config = argon2::Config::default();
let salt: [u8; 16] = rand::random();
let hash = argon2::hash_encoded(pw.as_bytes(), &salt, &config)
.expect(&format!("Failed to hash password for {}: ", uid));
tracing::debug!("Hashed pw for {} to {}", uid, hash);
hash
} else {
pw
}
});
let user = db::User {
id: uid.clone(),
userdata,
};
tracing::trace!(%uid, ?user, "Storing user object");
if let Err(e) = self.userdb.put_txn(&mut txn, uid.as_str(), &user) {
tracing::warn!(error=?e, "failed to add user")
}
}
txn.commit().map_err(crate::db::Error::from)?;
Ok(())
}
pub fn load_map(&mut self, dump: &HashMap<String,UserData>) -> miette::Result<()> {
let mut txn = unsafe { self.userdb.get_rw_txn() }?;
self.userdb.clear_txn(&mut txn)?;
for (uid, data) in dump {
let user = db::User {
id: uid.clone(),
userdata: data.clone(),
};
tracing::trace!(%uid, ?user, "Storing user object");
if let Err(e) = self.userdb.put_txn(&mut txn, uid.as_str(), &user) {
tracing::warn!(error=?e, "failed to add user")
}
}
txn.commit().map_err(crate::db::Error::from)?;
Ok(())
}
pub fn dump_map(&self) -> miette::Result<HashMap<String, UserData>> {
return Ok(self.userdb.get_all()?)
}
pub fn dump_file(&self, path_str: &str, force: bool) -> miette::Result<usize> {
let path = Path::new(path_str);
let exists = path.exists();
if exists {
if !force {
#[derive(Debug, Error, Diagnostic)]
#[error("given file already exists, refusing to clobber")]
#[diagnostic(code(dump::clobber))]
struct DumpFileExists {
#[source_code]
src: String,
#[label("file provided")]
dir_path: SourceSpan,
#[help]
help: &'static str,
}
Err(DumpFileExists {
src: format!("--load {}", path_str),
dir_path: (7, path_str.as_bytes().len()).into(),
help: "to force overwriting the file add `--force` as argument",
})?;
} else {
tracing::info!("output file already exists, overwriting due to `--force`");
}
}
let mut file = fs::File::create(path).into_diagnostic()?;
let users = self.dump_map()?;
let encoded = toml::ser::to_vec(&users).into_diagnostic()?;
file.write_all(&encoded[..]).into_diagnostic()?;
Ok(0)
}
}

58
bffhd/utils/l10nstring.rs Normal file
View File

@ -0,0 +1,58 @@
use std::collections::HashMap;
use once_cell::sync::Lazy;
struct Locales {
map: HashMap<&'static str, HashMap<&'static str, &'static str>>,
}
impl Locales {
pub fn get(&self, lang: &str, msg: &str) -> Option<(&'static str, &'static str)> {
self.map
.get(msg)
.and_then(|map| map.get_key_value(lang).map(|(k, v)| (*k, *v)))
}
pub fn available(&self, _msg: &str) -> &[&'static str] {
&[]
}
}
static LANG: Lazy<Locales> = Lazy::new(|| Locales {
map: HashMap::new(),
});
struct L10NString {
msg: &'static str,
}
/*
impl l10n::Server for L10NString {
fn get(&mut self, params: l10n::GetParams, mut results: l10n::GetResults)
-> Promise<(), Error>
{
let lang = pry!(pry!(params.get()).get_lang());
if let Some((lang, content)) = LANG.get(lang, &self.msg) {
let mut builder = results.get();
builder.set_lang(lang);
builder.set_content(content);
}
Promise::ok(())
}
fn available(&mut self, _: l10n::AvailableParams, mut results: l10n::AvailableResults)
-> Promise<(), Error>
{
let langs = LANG.available(self.msg);
let builder = results.get();
let mut lb = builder.init_langs(langs.len() as u32);
for (n, lang) in langs.into_iter().enumerate() {
lb.reborrow().set(n as u32, *lang);
}
Promise::ok(())
}
}
*/

60
bffhd/utils/linebuffer.rs Normal file
View File

@ -0,0 +1,60 @@
use std::ops::{Deref, DerefMut};
pub struct LineBuffer {
buffer: Vec<u8>,
valid: usize,
}
impl LineBuffer {
pub fn new() -> Self {
Self {
buffer: Vec::new(),
valid: 0,
}
}
/// Resize the internal Vec so that buffer.len() == buffer.capacity()
fn resize(&mut self) {
// SAFETY: Whatever is in memory is always valid as u8.
unsafe { self.buffer.set_len(self.buffer.capacity()) }
}
/// Get an (initialized but empty) writeable buffer of at least `atleast` bytes
pub fn get_mut_write(&mut self, atleast: usize) -> &mut [u8] {
let avail = self.buffer.len() - self.valid;
if avail < atleast {
self.buffer.reserve(atleast - avail);
self.resize()
}
&mut self.buffer[self.valid..]
}
pub fn advance_valid(&mut self, amount: usize) {
self.valid += amount
}
/// Mark `amount` bytes as 'consumed'
///
/// This will move any remaining data to the start of the buffer for future processing
pub fn consume(&mut self, amount: usize) {
assert!(amount <= self.valid);
if amount < self.valid {
self.buffer.copy_within(amount..self.valid, 0);
}
self.valid -= amount;
}
}
impl Deref for LineBuffer {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.buffer[0..self.valid]
}
}
impl DerefMut for LineBuffer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer[0..self.valid]
}
}

12
bffhd/utils/mod.rs Normal file
View File

@ -0,0 +1,12 @@
/// ITU Object Identifier implementation
pub mod oid;
/// Variable sized integer types
pub mod varint;
/// Localization strings
pub mod l10nstring;
pub mod uuid;
pub mod linebuffer;

853
bffhd/utils/oid.rs Normal file
View File

@ -0,0 +1,853 @@
//! oid crate by <https://github.com/UnnecessaryEngineering/oid> turned into vendored module
//!
//! [Object Identifiers] are a standard of the [ITU] used to reference objects, things, and
//! concepts in a globally unique way. This crate provides for data structures and methods
//! to build, parse, and format OIDs.
//!
//!
//! ## Parsing OID String Representation
//! ```ignore
//! use crate::oid::prelude::*;
//!
//! fn main() -> Result<(), ObjectIdentifierError> {
//! let oid = ObjectIdentifier::try_from("0.1.2.3")?;
//! Ok(())
//! }
//! ```
//!
//! ## Parsing OID Binary Representation
//! ```ignore
//! use prelude::*;
//!
//! fn main() -> Result<(), ObjectIdentifierError> {
//! let oid = ObjectIdentifier::try_from(vec![0x00, 0x01, 0x02, 0x03])?;
//! Ok(())
//! }
//! ```
//!
//! ## Encoding OID as String Representation
//! ```ignore
//! use prelude::*;
//!
//! fn main() -> Result<(), ObjectIdentifierError> {
//! let oid = ObjectIdentifier::try_from("0.1.2.3")?;
//! let oid: String = oid.into();
//! assert_eq!(oid, "0.1.2.3");
//! Ok(())
//! }
//! ```
//!
//! ## Encoding OID as Binary Representation
//! ```ignore
//! use oid::prelude::*;
//!
//! fn main() -> Result<(), ObjectIdentifierError> {
//! let oid = ObjectIdentifier::try_from(vec![0x00, 0x01, 0x02, 0x03])?;
//! let oid: Vec<u8> = oid.into();
//! assert_eq!(oid, vec![0x00, 0x01, 0x02, 0x03]);
//! Ok(())
//! }
//! ```
//!
//! [Object Identifiers]: https://en.wikipedia.org/wiki/Object_identifier
//! [ITU]: https://en.wikipedia.org/wiki/International_Telecommunications_Union
use crate::utils::varint::VarU128;
use rkyv::ser::Serializer;
use rkyv::vec::{ArchivedVec, VecResolver};
use rkyv::{Archive, Serialize};
use std::convert::TryFrom;
use std::convert::TryInto;
use std::fmt;
use std::fmt::Formatter;
use std::ops::Deref;
use std::str::FromStr;
type Node = u128;
type VarNode = VarU128;
/// Convenience module for quickly importing the public interface (e.g., `use oid::prelude::*`)
pub mod prelude {
pub use super::ObjectIdentifier;
pub use super::ObjectIdentifierError;
pub use super::ObjectIdentifierRoot::*;
pub use core::convert::{TryFrom, TryInto};
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
#[repr(u8)]
pub enum ObjectIdentifierRoot {
ItuT = 0,
Iso = 1,
JointIsoItuT = 2,
}
impl Into<String> for ObjectIdentifierRoot {
fn into(self) -> String {
format!("{}", self as u8)
}
}
impl TryFrom<u8> for ObjectIdentifierRoot {
type Error = ObjectIdentifierError;
fn try_from(value: u8) -> Result<ObjectIdentifierRoot, Self::Error> {
match value {
0 => Ok(ObjectIdentifierRoot::ItuT),
1 => Ok(ObjectIdentifierRoot::Iso),
2 => Ok(ObjectIdentifierRoot::JointIsoItuT),
_ => Err(ObjectIdentifierError::IllegalRootNode),
}
}
}
/// Object Identifier Errors
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum ObjectIdentifierError {
/// Failed to parse OID due to illegal root node (must be 0-2 decimal)
IllegalRootNode,
/// Failed to parse OID due to illegal first node (must be 0-39 decimal)
IllegalFirstChildNode,
/// Failed to parse OID due to illegal child node value (except first node)
IllegalChildNodeValue,
}
/// Object Identifier (OID)
#[derive(Clone, Eq, PartialEq, Hash)]
#[repr(transparent)]
pub struct ObjectIdentifier {
nodes: Box<[u8]>,
}
impl ObjectIdentifier {
#[inline(always)]
pub const fn new_unchecked(nodes: Box<[u8]>) -> Self {
Self { nodes }
}
pub fn from_box(nodes: Box<[u8]>) -> Result<Self, ObjectIdentifierError> {
if nodes.len() < 1 {
return Err(ObjectIdentifierError::IllegalRootNode);
};
ObjectIdentifierRoot::try_from(nodes[0] / 40)?;
let mut parsing_big_int = false;
let mut big_int: Node = 0;
for i in 1..nodes.len() {
if !parsing_big_int && nodes[i] < 128 {
} else {
if big_int > 0 {
if big_int >= Node::MAX >> 7 {
return Err(ObjectIdentifierError::IllegalChildNodeValue);
}
big_int <<= 7;
};
big_int |= (nodes[i] & !0x80) as Node;
parsing_big_int = nodes[i] & 0x80 != 0;
}
if big_int > 0 && !parsing_big_int {
big_int = 0;
}
}
Ok(Self { nodes })
}
pub fn build<B: AsRef<[Node]>>(
root: ObjectIdentifierRoot,
first: u8,
children: B,
) -> Result<Self, ObjectIdentifierError> {
if first > 40 {
return Err(ObjectIdentifierError::IllegalFirstChildNode);
}
let children = children.as_ref();
let mut vec = Vec::with_capacity(children.len() + 1);
vec.push((root as u8) * 40 + first);
for child in children {
let var: VarNode = child.into();
vec.extend_from_slice(var.as_bytes())
}
Ok(Self {
nodes: vec.into_boxed_slice(),
})
}
#[inline(always)]
pub fn root(&self) -> Result<ObjectIdentifierRoot, ObjectIdentifierError> {
ObjectIdentifierRoot::try_from(self.nodes[0] / 40)
}
#[inline(always)]
pub const fn first_node(&self) -> u8 {
self.nodes[0] % 40
}
#[inline(always)]
pub fn child_nodes(&self) -> &[u8] {
&self.nodes[1..]
}
#[inline(always)]
pub const fn as_bytes(&self) -> &[u8] {
&self.nodes
}
}
impl Deref for ObjectIdentifier {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.nodes
}
}
impl FromStr for ObjectIdentifier {
type Err = ObjectIdentifierError;
fn from_str(value: &str) -> Result<Self, Self::Err> {
let mut nodes = value.split(".");
let root = nodes
.next()
.and_then(|n| n.parse::<u8>().ok())
.and_then(|n| n.try_into().ok())
.ok_or(ObjectIdentifierError::IllegalRootNode)?;
let first = nodes
.next()
.and_then(|n| parse_string_first_node(n).ok())
.ok_or(ObjectIdentifierError::IllegalFirstChildNode)?;
let mut children = if let (_, Some(hint)) = nodes.size_hint() {
Vec::with_capacity(hint)
} else {
Vec::new()
};
for child in nodes.map(|n| n.parse().ok()) {
if let Some(c) = child {
children.push(c);
} else {
return Err(ObjectIdentifierError::IllegalChildNodeValue);
}
}
ObjectIdentifier::build(root, first, children)
}
}
impl fmt::Display for ObjectIdentifier {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let show: String = self.into();
write!(f, "{}", show)
}
}
impl fmt::Debug for ObjectIdentifier {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let show: String = self.into();
write!(f, "{}", show)
}
}
#[repr(transparent)]
pub struct ArchivedObjectIdentifier {
archived: ArchivedVec<u8>,
}
impl Deref for ArchivedObjectIdentifier {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.archived.as_slice()
}
}
impl fmt::Debug for ArchivedObjectIdentifier {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
&convert_to_string(self.archived.as_slice())
.unwrap_or_else(|e| format!("Invalid OID: {:?}", e))
)
}
}
impl Archive for ObjectIdentifier {
type Archived = ArchivedObjectIdentifier;
type Resolver = VecResolver;
unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
let (oid_pos, oid_out) = rkyv::out_field!(out.archived);
ArchivedVec::resolve_from_slice(self.nodes.as_ref(), pos + oid_pos, resolver, oid_out);
}
}
impl Archive for &'static ObjectIdentifier {
type Archived = ArchivedObjectIdentifier;
type Resolver = VecResolver;
unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
let (oid_pos, oid_out) = rkyv::out_field!(out.archived);
ArchivedVec::resolve_from_slice(self.nodes.as_ref(), pos + oid_pos, resolver, oid_out);
}
}
impl<S: Serializer + ?Sized> Serialize<S> for ObjectIdentifier
where
[u8]: rkyv::SerializeUnsized<S>,
{
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
ArchivedVec::serialize_from_slice(self.nodes.as_ref(), serializer)
}
}
fn parse_string_first_node(first_child_node: &str) -> Result<u8, ObjectIdentifierError> {
let first_child_node: u8 = first_child_node
.parse()
.map_err(|_| ObjectIdentifierError::IllegalFirstChildNode)?;
if first_child_node > 39 {
return Err(ObjectIdentifierError::IllegalFirstChildNode);
}
Ok(first_child_node)
}
impl ObjectIdentifier {
fn from_string<S>(value: S) -> Result<ObjectIdentifier, ObjectIdentifierError>
where
S: AsRef<str>,
{
ObjectIdentifier::from_str(value.as_ref())
}
}
fn convert_to_string(nodes: &[u8]) -> Result<String, ObjectIdentifierError> {
assert!(nodes.len() > 0);
let root = nodes[0] / 40;
let mut out = root.to_string();
out.push('.');
let first = nodes[0] % 40;
out.extend(first.to_string().chars());
let mut parsing_big_int = false;
let mut big_int: Node = 0;
for i in 1..nodes.len() {
if !parsing_big_int && nodes[i] < 128 {
// less than 7 bit of node value
out.push('.');
let nr = nodes[i].to_string();
out.extend(nr.chars());
} else {
if big_int > 0 {
if big_int >= Node::MAX >> 7 {
return Err(ObjectIdentifierError::IllegalChildNodeValue);
}
big_int <<= 7;
};
big_int += (nodes[i] & !0x80) as Node;
parsing_big_int = nodes[i] & 0x80 != 0;
}
if big_int > 0 && !parsing_big_int {
out.push('.');
out.extend(big_int.to_string().chars());
big_int = 0;
}
}
Ok(out)
}
impl Into<String> for &ObjectIdentifier {
fn into(self) -> String {
convert_to_string(&self.nodes).expect("Valid OID object couldn't be serialized.")
}
}
impl Into<String> for ObjectIdentifier {
fn into(self) -> String {
(&self).into()
}
}
impl<'a> Into<&'a [u8]> for &'a ObjectIdentifier {
fn into(self) -> &'a [u8] {
&self.nodes
}
}
impl Into<Vec<u8>> for ObjectIdentifier {
fn into(self) -> Vec<u8> {
self.nodes.into_vec()
}
}
impl TryFrom<&str> for ObjectIdentifier {
type Error = ObjectIdentifierError;
fn try_from(value: &str) -> Result<ObjectIdentifier, Self::Error> {
ObjectIdentifier::from_string(value)
}
}
impl TryFrom<String> for ObjectIdentifier {
type Error = ObjectIdentifierError;
fn try_from(value: String) -> Result<ObjectIdentifier, Self::Error> {
ObjectIdentifier::from_string(value)
}
}
impl TryFrom<&[u8]> for ObjectIdentifier {
type Error = ObjectIdentifierError;
fn try_from(nodes: &[u8]) -> Result<ObjectIdentifier, Self::Error> {
Self::from_box(nodes.into())
}
}
impl TryFrom<Vec<u8>> for ObjectIdentifier {
type Error = ObjectIdentifierError;
fn try_from(value: Vec<u8>) -> Result<ObjectIdentifier, Self::Error> {
Self::from_box(value.into_boxed_slice())
}
}
mod serde_support {
use super::*;
use core::fmt;
use serde::{de, ser};
struct OidVisitor;
impl<'de> de::Visitor<'de> for OidVisitor {
type Value = ObjectIdentifier;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a valid buffer representing an OID")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
ObjectIdentifier::try_from(v).map_err(|err| {
E::invalid_value(
de::Unexpected::Other(match err {
ObjectIdentifierError::IllegalRootNode => "illegal root node",
ObjectIdentifierError::IllegalFirstChildNode => "illegal first child node",
ObjectIdentifierError::IllegalChildNodeValue => "illegal child node value",
}),
&"a valid buffer representing an OID",
)
})
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
ObjectIdentifier::try_from(v).map_err(|err| {
E::invalid_value(
de::Unexpected::Other(match err {
ObjectIdentifierError::IllegalRootNode => "illegal root node",
ObjectIdentifierError::IllegalFirstChildNode => "illegal first child node",
ObjectIdentifierError::IllegalChildNodeValue => "illegal child node value",
}),
&"a string representing an OID",
)
})
}
}
impl<'de> de::Deserialize<'de> for ObjectIdentifier {
fn deserialize<D>(deserializer: D) -> Result<ObjectIdentifier, D::Error>
where
D: de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
deserializer.deserialize_str(OidVisitor)
} else {
deserializer.deserialize_bytes(OidVisitor)
}
}
}
impl ser::Serialize for ObjectIdentifier {
fn serialize<S>(
&self,
serializer: S,
) -> Result<<S as ser::Serializer>::Ok, <S as ser::Serializer>::Error>
where
S: ser::Serializer,
{
if serializer.is_human_readable() {
let encoded: String = self.into();
serializer.serialize_str(&encoded)
} else {
serializer.serialize_bytes(self.as_bytes())
}
}
}
impl ser::Serialize for ArchivedObjectIdentifier {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
if serializer.is_human_readable() {
let encoded: String =
convert_to_string(self.deref()).expect("Failed to convert valid OID to String");
serializer.serialize_str(&encoded)
} else {
serializer.serialize_bytes(self.deref())
}
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::convert::TryInto;
pub(crate) fn gen_random() -> ObjectIdentifier {
let amt: u8 = rand::random::<u8>() % 10 + 1;
let mut children = Vec::new();
for _ in 0..amt {
children.push(rand::random());
}
ObjectIdentifier::build(ObjectIdentifierRoot::JointIsoItuT, 25, children).unwrap()
}
#[test]
fn encode_binary_root_node_0() {
let expected: Vec<u8> = vec![0];
let oid = ObjectIdentifier::build(ObjectIdentifierRoot::ItuT, 0x00, vec![]).unwrap();
let actual: Vec<u8> = oid.into();
assert_eq!(expected, actual);
}
#[test]
fn encode_binary_root_node_1() {
let expected: Vec<u8> = vec![40];
let oid = ObjectIdentifier::build(ObjectIdentifierRoot::Iso, 0x00, vec![]).unwrap();
let actual: Vec<u8> = oid.into();
assert_eq!(expected, actual);
}
#[test]
fn encode_binary_root_node_2() {
let expected: Vec<u8> = vec![80];
let oid =
ObjectIdentifier::build(ObjectIdentifierRoot::JointIsoItuT, 0x00, vec![]).unwrap();
let actual: Vec<u8> = oid.into();
assert_eq!(expected, actual);
}
#[test]
fn encode_binary_example_1() {
let expected: Vec<u8> = vec![0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0D, 0x15];
let oid = ObjectIdentifier::build(
ObjectIdentifierRoot::ItuT,
0x01,
vec![1, 2, 3, 5, 8, 13, 21],
)
.unwrap();
let actual: Vec<u8> = oid.into();
assert_eq!(expected, actual);
}
#[test]
fn encode_binary_example_2() {
let expected: Vec<u8> = vec![
0x77, 0x2A, 0x93, 0x45, 0x83, 0xFF, 0x7F, 0x87, 0xFF, 0xFF, 0xFF, 0x7F, 0x89, 0x53,
0x92, 0x30,
];
let oid = ObjectIdentifier::build(
ObjectIdentifierRoot::JointIsoItuT,
39,
vec![42, 2501, 65535, 2147483647, 1235, 2352],
)
.unwrap();
let actual: Vec<u8> = (oid).into();
assert_eq!(expected, actual);
}
#[test]
fn encode_string_root_node_0() {
let expected = "0.0";
let oid = ObjectIdentifier::build(ObjectIdentifierRoot::ItuT, 0x00, vec![]).unwrap();
let actual: String = (oid).into();
assert_eq!(expected, actual);
}
#[test]
fn encode_string_root_node_1() {
let expected = "1.0";
let oid = ObjectIdentifier::build(ObjectIdentifierRoot::Iso, 0x00, vec![]).unwrap();
let actual: String = (&oid).into();
assert_eq!(expected, actual);
}
#[test]
fn encode_string_root_node_2() {
let expected = "2.0";
let oid =
ObjectIdentifier::build(ObjectIdentifierRoot::JointIsoItuT, 0x00, vec![]).unwrap();
let actual: String = (&oid).into();
assert_eq!(expected, actual);
}
#[test]
fn encode_string_example_1() {
let expected = "0.1.1.2.3.5.8.13.21";
let oid = ObjectIdentifier::build(
ObjectIdentifierRoot::ItuT,
0x01,
vec![1, 2, 3, 5, 8, 13, 21],
)
.unwrap();
let actual: String = (&oid).into();
assert_eq!(expected, actual);
}
#[test]
fn encode_string_example_2() {
let expected = "2.39.42.2501.65535.2147483647.1235.2352";
let oid = ObjectIdentifier::build(
ObjectIdentifierRoot::JointIsoItuT,
39,
vec![42, 2501, 65535, 2147483647, 1235, 2352],
)
.unwrap();
let actual: String = (&oid).into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_root_node_0() {
let expected = ObjectIdentifier::build(ObjectIdentifierRoot::ItuT, 0x00, vec![]);
let actual = vec![0x00].try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_root_node_1() {
let expected = ObjectIdentifier::build(ObjectIdentifierRoot::Iso, 0x00, vec![]);
let actual = vec![40].try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_root_node_2() {
let expected = ObjectIdentifier::build(ObjectIdentifierRoot::JointIsoItuT, 0x00, vec![]);
let actual = vec![80].try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_example_1() {
let expected = ObjectIdentifier::build(
ObjectIdentifierRoot::ItuT,
0x01,
vec![1, 2, 3, 5, 8, 13, 21],
);
let actual = vec![0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0D, 0x15].try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_example_2() {
let expected = ObjectIdentifier::build(
ObjectIdentifierRoot::JointIsoItuT,
39,
vec![42, 2501, 65535, 2147483647, 1235, 2352],
);
let actual = vec![
0x77, 0x2A, 0x93, 0x45, 0x83, 0xFF, 0x7F, 0x87, 0xFF, 0xFF, 0xFF, 0x7F, 0x89, 0x53,
0x92, 0x30,
]
.try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_root_node_0() {
let expected = ObjectIdentifier::build(ObjectIdentifierRoot::ItuT, 0x00, vec![]);
let actual = "0.0".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_root_node_1() {
let expected = ObjectIdentifier::build(ObjectIdentifierRoot::Iso, 0x00, vec![]);
let actual = "1.0".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_root_node_2() {
let expected = ObjectIdentifier::build(ObjectIdentifierRoot::JointIsoItuT, 0x00, vec![]);
let actual = "2.0".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_example_1() {
let expected = ObjectIdentifier::build(
ObjectIdentifierRoot::ItuT,
0x01,
vec![1, 2, 3, 5, 8, 13, 21],
);
let actual = "0.1.1.2.3.5.8.13.21".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_example_2() {
let expected = ObjectIdentifier::build(
ObjectIdentifierRoot::JointIsoItuT,
39,
vec![42, 2501, 65535, 2147483647, 1235, 2352],
);
let actual = "2.39.42.2501.65535.2147483647.1235.2352".try_into();
assert_eq!(expected, actual);
}
#[test]
fn illegal_oid_root() {
let expected = Err(ObjectIdentifierError::IllegalRootNode);
for i in 3..core::u8::MAX {
let actual = ObjectIdentifierRoot::try_from(i);
assert_eq!(expected, actual);
}
}
#[test]
fn illegal_first_node_too_large() {
let expected = Err(ObjectIdentifierError::IllegalFirstChildNode);
for i in 40..core::u8::MAX {
let string_val = format!("{}.2.3.4", i);
let mut nodes_iter = string_val.split(".");
let actual = parse_string_first_node(nodes_iter.next().unwrap());
assert_eq!(expected, actual);
}
}
#[test]
fn illegal_first_node_empty() {
let expected = Err(ObjectIdentifierError::IllegalFirstChildNode);
let string_val = String::new();
let mut nodes_iter = string_val.split(".");
let actual = parse_string_first_node(nodes_iter.next().unwrap());
assert_eq!(expected, actual);
}
#[test]
fn illegal_first_node_large() {
let expected = Err(ObjectIdentifierError::IllegalFirstChildNode);
let string_val = String::from("40");
let mut nodes_iter = string_val.split(".");
let actual = parse_string_first_node(nodes_iter.next().unwrap());
assert_eq!(expected, actual);
}
#[test]
fn parse_string_crap() {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalRootNode);
let actual = "wtf".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_empty() {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalRootNode);
let actual = String::new().try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_empty() {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalRootNode);
let actual = vec![].try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_binary_example_over_u128() {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalChildNodeValue);
let actual = vec![
0x00, 0x89, 0x97, 0xBF, 0xA3, 0xB8, 0xE8, 0xB3, 0xE6, 0xFB, 0xF2, 0xEA, 0xC3, 0xCA,
0xF2, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F,
]
.try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_root_node_3plus() {
for i in 3..=core::u8::MAX {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalRootNode);
let actual = format!("{}", i).try_into();
assert_eq!(expected, actual);
}
}
#[test]
fn parse_string_example_over_u128() {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalChildNodeValue);
let actual = "1.1.349239782398732987223423423423423423423423423423434982342342342342342342324523453452345234523452345234523452345234537234987234".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_example_first_node_over_39() {
let expected: Result<ObjectIdentifier, ObjectIdentifierError> =
Err(ObjectIdentifierError::IllegalFirstChildNode);
let actual = "1.40.1.2.3".try_into();
assert_eq!(expected, actual);
}
#[test]
fn parse_string_large_children_ok() {
let expected = ObjectIdentifier::build(
ObjectIdentifierRoot::JointIsoItuT,
25,
vec![
190754093376743485973207716749546715206,
255822649272987943607843257596365752308,
15843412533224453995377625663329542022,
6457999595881951503805148772927347934,
19545192863105095042881850060069531734,
195548685662657784196186957311035194990,
233020488258340943072303499291936117654,
193307160423854019916786016773068715190,
],
)
.unwrap();
let actual = "2.25.190754093376743485973207716749546715206.\
255822649272987943607843257596365752308.\
15843412533224453995377625663329542022.\
6457999595881951503805148772927347934.\
19545192863105095042881850060069531734.\
195548685662657784196186957311035194990.\
233020488258340943072303499291936117654.\
193307160423854019916786016773068715190"
.try_into()
.unwrap();
assert_eq!(expected, actual);
}
#[test]
fn encode_to_string() {
let expected = String::from("1.2.3.4");
let actual: String = ObjectIdentifier::build(ObjectIdentifierRoot::Iso, 2, vec![3, 4])
.unwrap()
.into();
assert_eq!(expected, actual);
}
#[test]
fn encode_to_bytes() {
let expected = vec![0x2A, 0x03, 0x04];
let actual: Vec<u8> = ObjectIdentifier::build(ObjectIdentifierRoot::Iso, 2, vec![3, 4])
.unwrap()
.into();
assert_eq!(expected, actual);
}
}

19
bffhd/utils/uuid.rs Normal file
View File

@ -0,0 +1,19 @@
use api::general_capnp::u_u_i_d::{Builder, Reader};
use uuid::Uuid;
pub fn uuid_to_api(uuid: Uuid, mut builder: Builder) {
let [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p] = uuid.as_u128().to_ne_bytes();
let lower = u64::from_ne_bytes([a, b, c, d, e, f, g, h]);
let upper = u64::from_ne_bytes([i, j, k, l, m, n, o, p]);
builder.set_uuid0(lower);
builder.set_uuid1(upper);
}
pub fn api_to_uuid(reader: Reader) -> Uuid {
let lower: u64 = reader.reborrow().get_uuid0();
let upper: u64 = reader.get_uuid1();
let [a, b, c, d, e, f, g, h] = lower.to_ne_bytes();
let [i, j, k, l, m, n, o, p] = upper.to_ne_bytes();
let num = u128::from_ne_bytes([a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p]);
Uuid::from_u128(num)
}

165
bffhd/utils/varint.rs Normal file
View File

@ -0,0 +1,165 @@
use std::default::Default;
use std::ops::Deref;
#[derive(Debug)]
pub struct VarUInt<const N: usize> {
offset: usize,
bytes: [u8; N],
}
impl<const N: usize> VarUInt<N> {
#[inline(always)]
const fn new(bytes: [u8; N], offset: usize) -> Self {
Self { bytes, offset }
}
#[inline(always)]
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[self.offset..]
}
#[inline(always)]
fn as_mut_bytes(&mut self) -> &mut [u8] {
&mut self.bytes[..]
}
#[inline(always)]
pub const fn into_bytes(self) -> [u8; N] {
self.bytes
}
}
impl<const N: usize> Default for VarUInt<N> {
fn default() -> Self {
Self::new([0u8; N], N)
}
}
impl<const N: usize> Deref for VarUInt<N> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_bytes()
}
}
macro_rules! convert_from {
( $x:ty ) => {
fn from(inp: $x) -> Self {
let mut num = inp;
let mut this = Self::default();
let bytes = this.as_mut_bytes();
let mut more = 0u8;
let mut idx: usize = bytes.len() - 1;
while num > 0x7f {
bytes[idx] = ((num & 0x7f) as u8 | more);
num >>= 7;
more = 0x80;
idx -= 1;
}
bytes[idx] = (num as u8) | more;
this.offset = idx;
this
}
};
}
macro_rules! convert_into {
( $x:ty ) => {
fn into(self) -> $x {
let mut out = 0;
// [0,1,2,3,4,5,6,7,8,9]
// ^ 0
// ^offset = 5
// ^ len = 10
// ^---------^ # of valid bytes = (len - offset)
// for i in offset..len ⇒ all valid idx
let bytes = self.as_bytes();
let len = bytes.len();
let mut shift = 0;
for neg in 1..=len {
let idx = len - neg;
let val = (bytes[idx] & 0x7f) as $x;
let shifted = val << shift;
out |= shifted;
shift += 7;
}
out
}
};
}
macro_rules! impl_convert_from_to {
( $num:ty, $req:literal, $nt:ident ) => {
pub type $nt = VarUInt<$req>;
impl From<$num> for VarUInt<$req> {
convert_from! { $num }
}
impl Into<$num> for VarUInt<$req> {
convert_into! { $num }
}
};
}
impl_convert_from_to!(u8, 2, VarU8);
impl_convert_from_to!(u16, 3, VarU16);
impl_convert_from_to!(u32, 5, VarU32);
impl_convert_from_to!(u64, 10, VarU64);
impl_convert_from_to!(u128, 19, VarU128);
#[allow(dead_code)]
#[cfg(target_pointer_width = "64")]
type VarUsize = VarU64;
#[cfg(target_pointer_width = "32")]
type VarUsize = VarU32;
#[cfg(target_pointer_width = "16")]
type VarUsize = VarU16;
impl<T, const N: usize> From<&T> for VarUInt<N>
where
T: Copy,
VarUInt<N>: From<T>,
{
fn from(t: &T) -> Self {
(*t).into()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_varuint() {
let inp = u64::MAX;
let vi: VarU64 = inp.into();
println!("Encoded {} into {:?}", inp, vi.as_bytes());
let outp: u64 = vi.into();
assert_eq!(inp, outp);
let inp = 0x80;
let vi: VarUInt<10> = inp.into();
println!("Encoded {} into {:?}", inp, vi.as_bytes());
let outp: u64 = vi.into();
assert_eq!(inp, outp);
}
#[test]
fn minimal() {
let a = 5u8;
assert_eq!(VarU8::from(a).as_bytes(), &[a]);
let a = 200u8;
assert_eq!(VarU8::from(a).as_bytes(), &[129, 72]);
let inp = 128;
let vi: VarU32 = inp.into();
let expected: &[u8] = &[129, 0];
assert_eq!(vi.as_bytes(), expected)
}
}

218
bin/bffhd/main.rs Normal file
View File

@ -0,0 +1,218 @@
use clap::{Arg, Command, ValueHint};
use difluoroborane::{config, Difluoroborane};
use std::str::FromStr;
use std::{env, io, io::Write, path::PathBuf};
use nix::NixPath;
fn main() -> miette::Result<()> {
// Argument parsing
// values for the name, description and version are pulled from `Cargo.toml`.
let matches = Command::new(clap::crate_name!())
.version(clap::crate_version!())
.long_version(&*format!("{version}\n\
FabAccess {apiver}\n\
\t[{build_kind} build built on {build_time}]\n\
\t {rustc_version}\n\t {cargo_version}",
version=difluoroborane::env::PKG_VERSION,
apiver="0.3",
rustc_version=difluoroborane::env::RUST_VERSION,
cargo_version=difluoroborane::env::CARGO_VERSION,
build_time=difluoroborane::env::BUILD_TIME_3339,
build_kind=difluoroborane::env::BUILD_RUST_CHANNEL))
.about(clap::crate_description!())
.arg(Arg::new("config")
.help("Path to the config file to use")
.long("config")
.short('c')
.takes_value(true))
.arg(Arg::new("verbosity")
.help("Increase logging verbosity")
.long("verbose")
.short('v')
.multiple_occurrences(true)
.max_occurrences(3)
.conflicts_with("quiet"))
.arg(Arg::new("quiet")
.help("Decrease logging verbosity")
.long("quiet")
.conflicts_with("verbosity"))
.arg(Arg::new("log format")
.help("Use an alternative log formatter. Available: Full, Compact, Pretty")
.long("log-format")
.takes_value(true)
.ignore_case(true)
.possible_values(["Full", "Compact", "Pretty"]))
.arg(Arg::new("log level")
.help("Set the desired log levels.")
.long("log-level")
.takes_value(true))
.arg(
Arg::new("print default")
.help("Print a default config to stdout instead of running")
.long("print-default"))
.arg(
Arg::new("check config")
.help("Check config for validity")
.long("check"))
.arg(
Arg::new("dump-db")
.help("Dump all internal databases")
.long("dump-db")
.alias("dump")
.conflicts_with("dump-users")
.conflicts_with("load-users")
.conflicts_with("load-db")
.takes_value(true)
.value_name("FILE")
.value_hint(ValueHint::AnyPath)
.default_missing_value("bffh-db.toml")
)
.arg(
Arg::new("dump-users")
.help("Dump the users db to the given file as TOML")
.long("dump-users")
.takes_value(true)
.value_name("FILE")
.value_hint(ValueHint::AnyPath)
.default_missing_value("users.toml")
.conflicts_with("load-users")
.conflicts_with("load-db")
.conflicts_with("dump-db")
)
.arg(
Arg::new("force")
.help("force ops that may clobber")
.long("force")
)
.arg(
Arg::new("load-users")
.help("Load users into the internal databases")
.long("load-users")
.alias("load")
.takes_value(true)
.conflicts_with("dump-db")
.conflicts_with("load-db")
.conflicts_with("dump-users")
)
.arg(
Arg::new("load-db")
.help("Load values into the internal databases")
.long("load-db")
.takes_value(true)
.conflicts_with("dump-db")
.conflicts_with("load-users")
.conflicts_with("dump-users"))
.arg(Arg::new("keylog")
.help("log TLS keys into PATH. If no path is specified the value of the envvar SSLKEYLOGFILE is used.")
.long("tls-key-log")
.value_name("PATH")
.takes_value(true)
.max_values(1)
.min_values(0)
.default_missing_value(""))
.try_get_matches();
let matches = match matches {
Ok(m) => m,
Err(error) => error.exit(),
};
let configpath = matches
.value_of("config")
.unwrap_or("/etc/difluoroborane.dhall");
// Check for the --print-default option first because we don't need to do anything else in that
// case.
if matches.is_present("print default") {
let config = config::Config::default();
let encoded = serde_dhall::serialize(&config).to_string().unwrap();
// Direct writing to fd 1 is faster but also prevents any print-formatting that could
// invalidate the generated TOML
let stdout = io::stdout();
let mut handle = stdout.lock();
handle.write_all(encoded.as_bytes()).unwrap();
// Early return to exit.
return Ok(());
} else if matches.is_present("check config") {
match config::read(&PathBuf::from_str(configpath).unwrap()) {
Ok(c) => {
let formatted = format!("{:#?}", c);
// Direct writing to fd 1 is faster but also prevents any print-formatting that could
// invalidate the generated TOML
let stdout = io::stdout();
let mut handle = stdout.lock();
handle.write_all(formatted.as_bytes()).unwrap();
// Early return to exit.
return Ok(());
}
Err(e) => {
eprintln!("{}", e);
std::process::exit(-1);
}
}
}
let mut config = config::read(&PathBuf::from_str(configpath).unwrap())?;
if matches.is_present("dump-db") {
let mut bffh = Difluoroborane::new(config)?;
let fname = matches.value_of("dump-db").unwrap();
bffh.dump_db(fname)?;
return Ok(());
} else if matches.is_present("load-db") {
let mut bffh = Difluoroborane::new(config)?;
let fname = matches.value_of("load-db").unwrap();
bffh.load_db(fname)?;
return Ok(());
} else if matches.is_present("dump-users") {
let bffh = Difluoroborane::new(config)?;
let number = bffh.users.dump_file(
matches.value_of("dump-users").unwrap(),
matches.is_present("force"),
)?;
tracing::info!("successfully dumped {} users", number);
return Ok(());
} else if matches.is_present("load-users") {
let bffh = Difluoroborane::new(config)?;
bffh.users.load_file(matches.value_of("load-users").unwrap())?;
tracing::info!("loaded users from {}", matches.value_of("load-users").unwrap());
return Ok(());
} else {
let keylog = matches.value_of("keylog");
// When passed an empty string (i.e no value) take the value from the env
let keylog = if let Some("") = keylog {
let v = env::var_os("SSLKEYLOGFILE").map(PathBuf::from);
if v.is_none() || v.as_ref().unwrap().is_empty() {
eprintln!("--tls-key-log set but no path configured!");
return Ok(());
}
v
} else {
keylog.map(PathBuf::from)
};
config.tlskeylog = keylog;
config.verbosity = matches.occurrences_of("verbosity") as isize;
if config.verbosity == 0 && matches.is_present("quiet") {
config.verbosity = -1;
}
config.logging.format = matches.value_of("log format").unwrap_or("full").to_string();
let mut bffh = Difluoroborane::new(config)?;
bffh.run()?;
}
Ok(())
}

View File

@ -1,5 +1,4 @@
fn main() { fn main() {
::capnpc::CompilerCommand::new().file("schema/connection.capnp").run().unwrap(); // Extract build-time information using the `shadow-rs` crate
::capnpc::CompilerCommand::new().file("schema/api.capnp").run().unwrap(); shadow_rs::new().unwrap();
::capnpc::CompilerCommand::new().file("schema/auth.capnp").run().unwrap();
} }

8
cargo-cross-config Normal file
View File

@ -0,0 +1,8 @@
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"
[target.arm-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabi-gcc"
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"

View File

@ -1,34 +0,0 @@
strict digraph connection {
Establish [label="TCP/SCTP connection established"];
Closed [label="TCP/SCTP connection closed"];
Establish -> Open [label=open];
Open -> Closed [label=close];
Open -> SASL [label=auth];
SASL -> SASL [label=step];
// Authentication fails
SASL -> Closed [label=fails];
// Authentication succeeds
SASL -> Authenticated [label=successful];
Open -> STARTTLS [label=starttls];
// TLS wrapping succeeds
STARTTLS -> Encrypted [label=successful];
// TLS wrapping fails
STARTTLS -> Closed [label=fails];
Authenticated -> SASL_TLS [label=starttls];
SASL_TLS -> Closed [label=fails];
SASL_TLS -> AuthEnc [label=successful];
Encrypted -> TLS_SASL [label=auth];
TLS_SASL -> TLS_SASL [label=step];
TLS_SASL -> Closed [label=fails];
TLS_SASL -> AuthEnc [label=successful];
// Only authenticated connections may open RPC. For "unauth", use the `Anonymous` SASL method.
AuthEnc -> RPC [label=bootstrap];
Authenticated -> RPC [label=bootstrap];
}

View File

@ -1,42 +0,0 @@
# Stream initiation
In a session there are two parties: The initiating entity and the receiving
entity. This terminology does not refer to information flow but rather to the
side opening a connection respectively the one listening for connection
attempts.
In the currently envisioned use-case the initiating entity is a) a client
(i.e. interactive or batch/automated program) trying to interact in some way or
other with a server b) a server trying to exchange / request information
with/from another server (i.e. federating). The receiving entity however is
already a server.
Additionally the amount and type of clients is likely to be more diverse and
less up to date than the servers.
Conclusions I draw from this:
- Clients are more likely to implement an outdated version of the communication
protocol.
- The place for backwards-compatability should be the servers.
- Thus the client (initiating entity) should send the expected API version
first, the server then using that as a basis to decide with which API
version to answer.
# Stream negotiation
Since the receiving entity for a connection is responsible for the machines it
controls it imposes conditions for connecting either as client or as federating
server. At least every initiating entity is required to authenticate itself to
the receiving entity before attempting further actions or requesting
information. But a receiving entity can require other features, such as
transport layer encryption.
To this end a receiving entity informs the initiating entity about features that
it requires from the initiating entity before taking any further action and
features that are voluntary to negotiate but may improve qualities of the stream
(such as message compression)
A varying set of conditions implies negotiation needs to take place. Since
features potentially require a strict order (e.g. Encryption before
Authentication) negotiation has to be a multi-stage process. Further
restrictions are imposed because some features may only be offered after others
have been established (e.g. SASL authentication only becoming available after
encryption, EXTERNAL mechanism only being available to local sockets or
connections providing a certificate)

View File

@ -1,11 +0,0 @@
# API-Testsetup
wirklich nur um das API zu testen. ATM implementiert: machines::* & machine::read, authenticate
1. Ein mosquitto o.ä MQTT Server starten
1. Datenbanken füllen: `cargo run -- -c examples/bffh.dhall --load=examples`
1. Daemon starten: `cargo run -- -c examples/bffh.dhall`
1. ???
1. PROFIT!
A dockerized version of this example can be found in the docker subdirectory

View File

@ -1,42 +0,0 @@
-- { actor_connections = [] : List { _1 : Text, _2 : Text }
{ actor_connections = [{ _1 = "Testmachine", _2 = "Actor" }]
, actors =
{ Actor = { module = "Shelly", params = {=} }
}
, init_connections = [] : List { _1 : Text, _2 : Text }
--, init_connections = [{ _1 = "Initiator", _2 = "Testmachine" }]
, initiators =
{ Initiator = { module = "Dummy", params = {=} }
}
, listens =
[ { address = "127.0.0.1", port = Some 59661 }
, { address = "::1", port = Some 59661 }
]
, machines =
{ Testmachine =
{ description = Some "A test machine"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Testmachine"
, read = "lab.test.read"
, write = "lab.test.write"
},
Another =
{ description = Some "Another test machine"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Another"
, read = "lab.test.read"
, write = "lab.test.write"
},
Yetmore =
{ description = Some "Yet more test machines"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Yetmore"
, read = "lab.test.read"
, write = "lab.test.write"
}
}
, mqtt_url = "tcp://localhost:1883"
}

View File

@ -1,5 +0,0 @@
# API-Testsetup, aber mit Docker
wirklich nur um das API zu testen. ATM implementiert: machines::* & machine::read, authenticate
* run `docker-compose up` in this directory

View File

@ -1,41 +0,0 @@
{ actor_connections = [] : List { _1 : Text, _2 : Text }
-- { actor_connections = [{ _1 = "Testmachine", _2 = "Actor" }]
, actors =
{ Actor = { module = "Shelly", params = {=} }
}
, init_connections = [] : List { _1 : Text, _2 : Text }
--, init_connections = [{ _1 = "Initiator", _2 = "Testmachine" }]
, initiators =
{ Initiator = { module = "Dummy", params = {=} }
}
, listens =
[ { address = "::", port = Some 59661 }
]
, machines =
{ Testmachine =
{ description = Some "A test machine"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Testmachine"
, read = "lab.test.read"
, write = "lab.test.write"
},
Another =
{ description = Some "Another test machine"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Another"
, read = "lab.test.read"
, write = "lab.test.write"
},
Yetmore =
{ description = Some "Yet more test machines"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Yetmore"
, read = "lab.test.read"
, write = "lab.test.write"
}
}
, mqtt_url = "tcp://mqtt:1883"
}

View File

@ -1 +0,0 @@
Testuser = "secret"

View File

@ -1,19 +0,0 @@
[anotherrole]
[testrole]
permissions = [
"lab.test.*"
]
[somerole]
parents = ["testparent/lmdb"]
permissions = [
"lab.some.admin"
]
[testparent]
permissions = [
"lab.some.write",
"lab.some.read",
"lab.some.disclose",
]

View File

@ -1,11 +0,0 @@
[Testuser]
# Define them in roles.toml as well
roles = ["somerole/lmdb", "testrole/lmdb"]
# If two or more users want to use the same machine at once the higher prio
# wins
priority = 0
# You can add whatever random data you want.
# It will get stored in the `kv` field in UserData.
noot = "noot!"

View File

@ -1,13 +0,0 @@
version: "3.8"
services:
bffh:
image: registry.gitlab.com/fabinfra/fabaccess/bffh:dev-latest
ports:
- "59661:59661"
volumes:
# generate a sample config.toml by running "docker run registry.gitlab.com/fabinfra/fabaccess/bffh:dev-latest --print-default > examples/config.toml" from the project root. You may have to delete the ipv6 listen section.
- "./config:/etc/bffh"
links:
- mqtt
mqtt:
image: eclipse-mosquitto:1.6.13

View File

@ -1,11 +0,0 @@
# Integration tests with Docker
## How it works
* spawns 2 instances of our bffh container and required mqqt broker
* spawns an additional debian to run a shell
* the containers can reach each other by their hostname
## How to start
* run `docker-compose up --exit-code-from test-manager` in this directory
* this will kill all containers when

View File

@ -1,20 +0,0 @@
{ actor_connections = [{ _1 = "Testmachine", _2 = "Actor" }]
, actors =
{ Actor = { module = "Shelly", params = {=} }
}
, init_connections = [{ _1 = "Initiator", _2 = "Testmachine" }]
, initiators =
{ Initiator = { module = "Dummy", params = {=} }
}
, listens = [{ address = "::", port = Some 59661 }]
, machines =
{ Testmachine =
{ description = Some "A test machine"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Testmachine"
, read = "lab.test.read"
, write = "lab.test.write"
} }
, mqtt_url = "tcp://mqtt-a:1883"
}

View File

@ -1 +0,0 @@
Testuser = "secret"

View File

@ -1,20 +0,0 @@
[testrole]
name = "Testrole"
permissions = [
"lab.test.*"
]
[somerole]
name = "Somerole"
parents = ["testparent%lmdb"]
permissions = [
"lab.some.admin"
]
[testparent]
name = "Testparent"
permissions = [
"lab.some.write",
"lab.some.read",
"lab.some.disclose",
]

View File

@ -1,11 +0,0 @@
[Testuser]
# Define them in roles.toml as well
roles = []
# If two or more users want to use the same machine at once the higher prio
# wins
priority = 0
# You can add whatever random data you want.
# It will get stored in the `kv` field in UserData.
noot = "noot!"

View File

@ -1,20 +0,0 @@
{ actor_connections = [{ _1 = "Testmachine", _2 = "Actor" }]
, actors =
{ Actor = { module = "Shelly", params = {=} }
}
, init_connections = [{ _1 = "Initiator", _2 = "Testmachine" }]
, initiators =
{ Initiator = { module = "Dummy", params = {=} }
}
, listens = [{ address = "::", port = Some 59661 }]
, machines =
{ Testmachine =
{ description = Some "A test machine"
, disclose = "lab.test.read"
, manage = "lab.test.admin"
, name = "Testmachine"
, read = "lab.test.read"
, write = "lab.test.write"
} }
, mqtt_url = "tcp://mqtt-b:1883"
}

View File

@ -1 +0,0 @@
Testuser = "secret"

View File

@ -1,20 +0,0 @@
[testrole]
name = "Testrole"
permissions = [
"lab.test.*"
]
[somerole]
name = "Somerole"
parents = ["testparent%lmdb"]
permissions = [
"lab.some.admin"
]
[testparent]
name = "Testparent"
permissions = [
"lab.some.write",
"lab.some.read",
"lab.some.disclose",
]

View File

@ -1,11 +0,0 @@
[Testuser]
# Define them in roles.toml as well
roles = []
# If two or more users want to use the same machine at once the higher prio
# wins
priority = 0
# You can add whatever random data you want.
# It will get stored in the `kv` field in UserData.
noot = "noot!"

View File

@ -1,26 +0,0 @@
version: "3.8"
services:
bffh-a:
image: registry.gitlab.com/fabinfra/fabaccess/bffh:dev-latest
command: ["sh", "-c", "diflouroborane -c /etc/bffh/bffh.dhall --load=/etc/bffh; diflouroborane -c /etc/bffh/bffh.dhall"]
volumes:
# generate a sample config.toml by running "docker run registry.gitlab.com/fabinfra/fabaccess/bffh:dev-latest --print-default > examples/config.toml" from the project root. You may have to delete the ipv6 listen section.
- "./config_a:/etc/bffh"
links:
- mqtt-a
mqtt-a:
image: eclipse-mosquitto
bffh-b:
image: registry.gitlab.com/fabinfra/fabaccess/bffh:dev-latest
command: ["sh", "-c", "diflouroborane -c /etc/bffh/bffh.dhall --load=/etc/bffh; diflouroborane -c /etc/bffh/bffh.dhall"]
volumes:
# generate a sample config.toml by running "docker run registry.gitlab.com/fabinfra/fabaccess/bffh:dev-latest --print-default > examples/config.toml" from the project root. You may have to delete the ipv6 listen section.
- "./config_b:/etc/bffh"
links:
- mqtt-b
mqtt-b:
image: eclipse-mosquitto
test-manager:
image: debian
tty: true

View File

@ -1 +0,0 @@
Testuser = "secret"

View File

@ -1,19 +0,0 @@
[anotherrole]
[testrole]
permissions = [
"lab.test.*"
]
[somerole]
parents = ["testparent/lmdb"]
permissions = [
"lab.some.admin"
]
[testparent]
permissions = [
"lab.some.write",
"lab.some.read",
"lab.some.disclose",
]

View File

@ -1,11 +0,0 @@
[Testuser]
# Define them in roles.toml as well
roles = ["somerole/lmdb", "testrole/lmdb"]
# If two or more users want to use the same machine at once the higher prio
# wins
priority = 0
# You can add whatever random data you want.
# It will get stored in the `kv` field in UserData.
noot = "noot!"

4
i18n.toml Normal file
View File

@ -0,0 +1,4 @@
fallback-language = "en-GB"
[fluent]
assets_dir = "i18n"

11
modules/sdk/Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[package]
name = "sdk"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
sdk-proc = { path = "sdk_proc" }
futures-util = "0.3"
difluoroborane = { path = "../.." }

View File

@ -0,0 +1,22 @@
[package]
name = "sdk-proc"
version = "0.1.0"
edition = "2021"
autotests = false
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
proc-macro = true
[[test]]
name = "tests"
path = "tests/progress.rs"
[dev-dependencies]
trybuild = "1.0"
[dependencies]
proc-macro2 = "1.0"
syn = "1.0"
quote = "1.0"

View File

@ -0,0 +1,89 @@
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use std::sync::Mutex;
use syn::parse::{Parse, ParseStream};
use syn::punctuated::Punctuated;
use syn::token::Brace;
use syn::{braced, parse_macro_input, Field, Ident, Token, Type, Visibility};
mod keywords {
syn::custom_keyword!(initiator);
syn::custom_keyword!(actor);
syn::custom_keyword!(sensor);
}
enum ModuleAttrs {
Nothing,
Initiator,
Actor,
Sensor,
}
impl Parse for ModuleAttrs {
fn parse(input: ParseStream) -> syn::Result<Self> {
if input.is_empty() {
Ok(ModuleAttrs::Nothing)
} else {
let lookahead = input.lookahead1();
if lookahead.peek(keywords::initiator) {
Ok(ModuleAttrs::Initiator)
} else if lookahead.peek(keywords::actor) {
Ok(ModuleAttrs::Actor)
} else if lookahead.peek(keywords::sensor) {
Ok(ModuleAttrs::Sensor)
} else {
Err(input.error(
"Module type must be empty or one of \"initiator\", \"actor\", or \
\"sensor\"",
))
}
}
}
}
struct ModuleInput {
pub ident: Ident,
pub fields: Punctuated<Field, Token![,]>,
}
impl Parse for ModuleInput {
fn parse(input: ParseStream) -> syn::Result<Self> {
let lookahead = input.lookahead1();
if lookahead.peek(Token![pub]) {
let _vis: Visibility = input.parse()?;
}
if input.parse::<Token![struct]>().is_err() {
return Err(input.error("Modules must be structs"));
}
let ident = input.parse::<Ident>()?;
let lookahead = input.lookahead1();
if !lookahead.peek(Brace) {
return Err(input.error("Modules can't be unit structs"));
}
let content;
braced!(content in input);
Ok(Self {
ident,
fields: content.parse_terminated(Field::parse_named)?,
})
}
}
#[proc_macro_attribute]
pub fn module(attr: TokenStream, tokens: TokenStream) -> TokenStream {
let attrs = parse_macro_input!(attr as ModuleAttrs);
let item = parse_macro_input!(tokens as ModuleInput);
let output = {
let ident = item.ident;
let fields = item.fields.iter();
quote! {
pub struct #ident {
#(#fields),*
}
}
};
output.into()
}

Some files were not shown because too many files have changed in this diff Show More