Remove sincerely obsolete code

This commit is contained in:
Nadja Reitzenstein 2022-05-05 17:51:51 +02:00
parent be5a600abf
commit ce204b9bc1
5 changed files with 0 additions and 224 deletions

View File

@ -1,12 +0,0 @@
[package]
name = "dummy"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
sdk = { path = "../sdk" }

View File

@ -1,35 +0,0 @@
use sdk::initiators::{Initiator, InitiatorError, ResourceID, UpdateSink};
use sdk::BoxFuture;
#[sdk::module]
struct Dummy {
a: u32,
b: u32,
c: u32,
d: u32,
}
impl Initiator for Dummy {
fn start_for(
&mut self,
machine: ResourceID,
) -> BoxFuture<'static, Result<(), Box<dyn InitiatorError>>> {
todo!()
}
fn run(
&mut self,
request: &mut UpdateSink,
) -> BoxFuture<'static, Result<(), Box<dyn InitiatorError>>> {
todo!()
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
let result = 2 + 2;
assert_eq!(result, 4);
}
}

View File

@ -1,4 +0,0 @@
pub use diflouroborane::{
initiators::{Initiator, InitiatorError, UpdateError, UpdateSink},
resource::claim::ResourceID,
};

View File

@ -1,36 +0,0 @@
use executor::prelude::{spawn, ProcStack};
use executor::run::run;
use std::io::Write;
use std::thread;
use std::time::Duration;
#[cfg(feature = "tokio-runtime")]
mod tokio_tests {
#[tokio::test]
async fn test_run_blocking() {
super::run_test()
}
}
#[cfg(not(feature = "tokio-runtime"))]
mod no_tokio_tests {
#[test]
fn test_run_blocking() {
super::run_test()
}
}
fn run_test() {
let handle = spawn(async {
let duration = Duration::from_millis(1);
thread::sleep(duration);
//42
});
let output = run(handle, ProcStack {});
println!("{:?}", output);
std::io::stdout().flush();
assert!(output.is_some());
std::thread::sleep(Duration::from_millis(200));
}

View File

@ -1,137 +0,0 @@
use executor::blocking;
use executor::prelude::ProcStack;
use executor::run::run;
use futures_util::future::join_all;
use lightproc::recoverable_handle::RecoverableHandle;
use std::thread;
use std::time::Duration;
use std::time::Instant;
// Test for slow joins without task bursts during joins.
#[test]
#[ignore]
fn slow_join() {
let thread_join_time_max = 11_000;
let start = Instant::now();
// Send an initial batch of million bursts.
let handles = (0..1_000_000)
.map(|_| {
blocking::spawn_blocking(async {
let duration = Duration::from_millis(1);
thread::sleep(duration);
})
})
.collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack {});
// Let them join to see how it behaves under different workloads.
let duration = Duration::from_millis(thread_join_time_max);
thread::sleep(duration);
// Spawn yet another batch of work on top of it
let handles = (0..10_000)
.map(|_| {
blocking::spawn_blocking(async {
let duration = Duration::from_millis(100);
thread::sleep(duration);
})
})
.collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack {});
// Slow joins shouldn't cause internal slow down
let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128;
println!("Slow task join. Monotonic exec time: {:?} ns", elapsed);
// Previous implementation is around this threshold.
}
// Test for slow joins with task burst.
#[test]
#[ignore]
fn slow_join_interrupted() {
let thread_join_time_max = 2_000;
let start = Instant::now();
// Send an initial batch of million bursts.
let handles = (0..1_000_000)
.map(|_| {
blocking::spawn_blocking(async {
let duration = Duration::from_millis(1);
thread::sleep(duration);
})
})
.collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack {});
// Let them join to see how it behaves under different workloads.
// This time join under the time window.
let duration = Duration::from_millis(thread_join_time_max);
thread::sleep(duration);
// Spawn yet another batch of work on top of it
let handles = (0..10_000)
.map(|_| {
blocking::spawn_blocking(async {
let duration = Duration::from_millis(100);
thread::sleep(duration);
})
})
.collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack {});
// Slow joins shouldn't cause internal slow down
let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128;
println!("Slow task join. Monotonic exec time: {:?} ns", elapsed);
// Previous implementation is around this threshold.
}
// This test is expensive but it proves that longhauling tasks are working in adaptive thread pool.
// Thread pool which spawns on-demand will panic with this test.
#[test]
#[ignore]
fn longhauling_task_join() {
let thread_join_time_max = 11_000;
let start = Instant::now();
// First batch of overhauling tasks
let _ = (0..100_000)
.map(|_| {
blocking::spawn_blocking(async {
let duration = Duration::from_millis(1000);
thread::sleep(duration);
})
})
.collect::<Vec<RecoverableHandle<()>>>();
// Let them join to see how it behaves under different workloads.
let duration = Duration::from_millis(thread_join_time_max);
thread::sleep(duration);
// Send yet another medium sized batch to see how it scales.
let handles = (0..10_000)
.map(|_| {
blocking::spawn_blocking(async {
let duration = Duration::from_millis(100);
thread::sleep(duration);
})
})
.collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack {});
// Slow joins shouldn't cause internal slow down
let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128;
println!(
"Long-hauling task join. Monotonic exec time: {:?} ns",
elapsed
);
// Previous implementation will panic when this test is running.
}