diff --git a/runtime/executor/Cargo.toml b/runtime/executor/Cargo.toml
new file mode 100644
index 0000000..5c27a14
--- /dev/null
+++ b/runtime/executor/Cargo.toml
@@ -0,0 +1,34 @@
+[package]
+name = "executor"
+version = "0.3.0"
+publish = false
+description = "Executor"
+authors = []
+keywords = []
+categories = []
+readme = "README.md"
+license = "Apache-2.0/MIT"
+edition = "2021"
+exclude = [
+ "scripts/*",
+]
+
+[dependencies]
+lightproc = { path = "../lightproc" }
+
+crossbeam-utils = "0.8"
+crossbeam-channel = "0.5"
+crossbeam-epoch = "0.9"
+crossbeam-deque = "0.8.1"
+lazy_static = "1.4"
+libc = "0.2"
+num_cpus = "1.13"
+pin-utils = "0.1.0"
+
+# Allocator
+arrayvec = { version = "0.7.0" }
+futures-timer = "3.0.2"
+once_cell = "1.4.0"
+lever = "0.1"
+tracing = "0.1.19"
+crossbeam-queue = "0.3.0"
diff --git a/runtime/executor/README.md b/runtime/executor/README.md
new file mode 100644
index 0000000..b2dc377
--- /dev/null
+++ b/runtime/executor/README.md
@@ -0,0 +1,94 @@
+# Bastion Executor
+
+
+
+ Latest Release |
+
+
+
+
+ |
+
+
+ |
+
+
+ License |
+
+
+
+
+ |
+
+
+ Build Status |
+
+
+
+
+ |
+
+
+ Downloads |
+
+
+
+
+ |
+
+
+ Discord |
+
+
+
+
+ |
+
+
+
+Bastion Executor is NUMA-aware SMP based Fault-tolerant Executor
+
+Bastion Executor is a highly-available, fault-tolerant, async communication
+oriented executor. Bastion's main idea is supplying a fully async runtime
+with fault-tolerance to work on heavy loads.
+
+Main differences between other executors are:
+* Uses SMP based execution scheme to exploit cache affinity on multiple cores and execution is
+equally distributed over the system resources, which means utilizing the all system.
+* Uses NUMA-aware allocation for scheduler's queues and exploit locality on server workloads.
+* Tailored for creating middleware and working with actor model like concurrency and distributed communication.
+
+**NOTE:** Bastion Executor is independent of it's framework implementation.
+It uses [lightproc](https://docs.rs/lightproc) to encapsulate and provide fault-tolerance to your future based workloads.
+You can use your futures with [lightproc](https://docs.rs/lightproc) to run your workloads on Bastion Executor without the need to have framework.
+
+## Example Usage
+
+```rust
+use bastion_executor::prelude::*;
+use lightproc::proc_stack::ProcStack;
+
+fn main() {
+ let pid = 1;
+ let stack = ProcStack::default()
+ .with_pid(pid)
+ .with_after_panic(move || println!("after panic {}", pid.clone()));
+
+ let handle = spawn(
+ async {
+ panic!("test");
+ },
+ stack,
+ );
+
+ let pid = 2;
+ let stack = ProcStack::default().with_pid(pid);
+
+ run(
+ async {
+ handle.await;
+ },
+ stack.clone(),
+ );
+}
+```
\ No newline at end of file
diff --git a/runtime/executor/benches/blocking.rs b/runtime/executor/benches/blocking.rs
new file mode 100644
index 0000000..6c5a6ff
--- /dev/null
+++ b/runtime/executor/benches/blocking.rs
@@ -0,0 +1,67 @@
+#![feature(test)]
+
+extern crate test;
+
+use bastion_executor::blocking;
+use lightproc::proc_stack::ProcStack;
+use std::thread;
+use std::time::Duration;
+use test::Bencher;
+
+#[cfg(feature = "tokio-runtime")]
+mod tokio_benchs {
+ use super::*;
+ #[bench]
+ fn blocking(b: &mut Bencher) {
+ tokio_test::block_on(async { _blocking(b) });
+ }
+ #[bench]
+ fn blocking_single(b: &mut Bencher) {
+ tokio_test::block_on(async {
+ _blocking_single(b);
+ });
+ }
+}
+
+#[cfg(not(feature = "tokio-runtime"))]
+mod no_tokio_benchs {
+ use super::*;
+ #[bench]
+ fn blocking(b: &mut Bencher) {
+ _blocking(b);
+ }
+ #[bench]
+ fn blocking_single(b: &mut Bencher) {
+ _blocking_single(b);
+ }
+}
+
+// Benchmark for a 10K burst task spawn
+fn _blocking(b: &mut Bencher) {
+ b.iter(|| {
+ (0..10_000)
+ .map(|_| {
+ blocking::spawn_blocking(
+ async {
+ let duration = Duration::from_millis(1);
+ thread::sleep(duration);
+ },
+ ProcStack::default(),
+ )
+ })
+ .collect::>()
+ });
+}
+
+// Benchmark for a single blocking task spawn
+fn _blocking_single(b: &mut Bencher) {
+ b.iter(|| {
+ blocking::spawn_blocking(
+ async {
+ let duration = Duration::from_millis(1);
+ thread::sleep(duration);
+ },
+ ProcStack::default(),
+ )
+ });
+}
diff --git a/runtime/executor/benches/perf.rs b/runtime/executor/benches/perf.rs
new file mode 100644
index 0000000..e8a588b
--- /dev/null
+++ b/runtime/executor/benches/perf.rs
@@ -0,0 +1,25 @@
+#![feature(test)]
+
+extern crate test;
+
+use bastion_executor::prelude::*;
+use lightproc::proc_stack::ProcStack;
+use test::{black_box, Bencher};
+
+#[bench]
+fn increment(b: &mut Bencher) {
+ let mut sum = 0;
+
+ b.iter(|| {
+ run(
+ async {
+ (0..10_000_000).for_each(|_| {
+ sum += 1;
+ });
+ },
+ ProcStack::default(),
+ );
+ });
+
+ black_box(sum);
+}
diff --git a/runtime/executor/benches/run_blocking.rs b/runtime/executor/benches/run_blocking.rs
new file mode 100644
index 0000000..43de440
--- /dev/null
+++ b/runtime/executor/benches/run_blocking.rs
@@ -0,0 +1,69 @@
+#![feature(test)]
+
+extern crate test;
+
+use bastion_executor::blocking;
+use bastion_executor::run::run;
+use futures::future::join_all;
+use lightproc::proc_stack::ProcStack;
+use std::thread;
+use std::time::Duration;
+use test::Bencher;
+
+#[cfg(feature = "tokio-runtime")]
+mod tokio_benchs {
+ use super::*;
+ #[bench]
+ fn blocking(b: &mut Bencher) {
+ tokio_test::block_on(async { _blocking(b) });
+ }
+ #[bench]
+ fn blocking_single(b: &mut Bencher) {
+ tokio_test::block_on(async {
+ _blocking_single(b);
+ });
+ }
+}
+
+#[cfg(not(feature = "tokio-runtime"))]
+mod no_tokio_benchs {
+ use super::*;
+ #[bench]
+ fn blocking(b: &mut Bencher) {
+ _blocking(b);
+ }
+ #[bench]
+ fn blocking_single(b: &mut Bencher) {
+ _blocking_single(b);
+ }
+}
+
+// Benchmark for a 10K burst task spawn
+fn _blocking(b: &mut Bencher) {
+ b.iter(|| {
+ (0..10_000)
+ .map(|_| {
+ blocking::spawn_blocking(
+ async {
+ let duration = Duration::from_millis(1);
+ thread::sleep(duration);
+ },
+ ProcStack::default(),
+ )
+ })
+ .collect::>()
+ });
+}
+
+// Benchmark for a single blocking task spawn
+fn _blocking_single(b: &mut Bencher) {
+ b.iter(|| {
+ blocking::spawn_blocking(
+ async {
+ let duration = Duration::from_millis(1);
+ thread::sleep(duration);
+ },
+ ProcStack::default(),
+ )
+ });
+}
diff --git a/runtime/executor/benches/spawn.rs b/runtime/executor/benches/spawn.rs
new file mode 100644
index 0000000..02b896b
--- /dev/null
+++ b/runtime/executor/benches/spawn.rs
@@ -0,0 +1,70 @@
+#![feature(test)]
+
+extern crate test;
+
+use bastion_executor::load_balancer;
+use bastion_executor::prelude::spawn;
+use futures_timer::Delay;
+use lightproc::proc_stack::ProcStack;
+use std::time::Duration;
+use test::Bencher;
+
+#[cfg(feature = "tokio-runtime")]
+mod tokio_benchs {
+ use super::*;
+ #[bench]
+ fn spawn_lot(b: &mut Bencher) {
+ tokio_test::block_on(async { _spawn_lot(b) });
+ }
+ #[bench]
+ fn spawn_single(b: &mut Bencher) {
+ tokio_test::block_on(async {
+ _spawn_single(b);
+ });
+ }
+}
+
+#[cfg(not(feature = "tokio-runtime"))]
+mod no_tokio_benchs {
+ use super::*;
+ #[bench]
+ fn spawn_lot(b: &mut Bencher) {
+ _spawn_lot(b);
+ }
+ #[bench]
+ fn spawn_single(b: &mut Bencher) {
+ _spawn_single(b);
+ }
+}
+
+// Benchmark for a 10K burst task spawn
+fn _spawn_lot(b: &mut Bencher) {
+ let proc_stack = ProcStack::default();
+ b.iter(|| {
+ let _ = (0..10_000)
+ .map(|_| {
+ spawn(
+ async {
+ let duration = Duration::from_millis(1);
+ Delay::new(duration).await;
+ },
+ proc_stack.clone(),
+ )
+ })
+ .collect::>();
+ });
+}
+
+// Benchmark for a single task spawn
+fn _spawn_single(b: &mut Bencher) {
+ let proc_stack = ProcStack::default();
+ b.iter(|| {
+ spawn(
+ async {
+ let duration = Duration::from_millis(1);
+ Delay::new(duration).await;
+ },
+ proc_stack.clone(),
+ );
+ });
+}
diff --git a/runtime/executor/benches/stats.rs b/runtime/executor/benches/stats.rs
new file mode 100644
index 0000000..684e7cb
--- /dev/null
+++ b/runtime/executor/benches/stats.rs
@@ -0,0 +1,71 @@
+#![feature(test)]
+
+extern crate test;
+use bastion_executor::load_balancer::{core_count, get_cores, stats, SmpStats};
+use bastion_executor::placement;
+use std::thread;
+use test::Bencher;
+
+fn stress_stats(stats: &'static S) {
+ let mut handles = Vec::with_capacity(*core_count());
+ for core in get_cores() {
+ let handle = thread::spawn(move || {
+ placement::set_for_current(*core);
+ for i in 0..100 {
+ stats.store_load(core.id, 10);
+ if i % 3 == 0 {
+ let _sorted_load = stats.get_sorted_load();
+ }
+ }
+ });
+ handles.push(handle);
+ }
+
+ for handle in handles {
+ handle.join().unwrap();
+ }
+}
+
+// previous lock based stats benchmark 1,352,791 ns/iter (+/- 2,682,013)
+
+// 158,278 ns/iter (+/- 117,103)
+#[bench]
+fn lockless_stats_bench(b: &mut Bencher) {
+ b.iter(|| {
+ stress_stats(stats());
+ });
+}
+
+#[bench]
+fn lockless_stats_bad_load(b: &mut Bencher) {
+ let stats = stats();
+ const MAX_CORE: usize = 256;
+ for i in 0..MAX_CORE {
+ // Generating the worst possible mergesort scenario
+ // [0,2,4,6,8,10,1,3,5,7,9]...
+ if i <= MAX_CORE / 2 {
+ stats.store_load(i, i * 2);
+ } else {
+ stats.store_load(i, i - 1 - MAX_CORE / 2);
+ }
+ }
+
+ b.iter(|| {
+ let _sorted_load = stats.get_sorted_load();
+ });
+}
+
+#[bench]
+fn lockless_stats_good_load(b: &mut Bencher) {
+ let stats = stats();
+ const MAX_CORE: usize = 256;
+ for i in 0..MAX_CORE {
+ // Generating the best possible mergesort scenario
+ // [0,1,2,3,4,5,6,7,8,9]...
+ stats.store_load(i, i);
+ }
+
+ b.iter(|| {
+ let _sorted_load = stats.get_sorted_load();
+ });
+}
diff --git a/runtime/executor/examples/spawn_async.rs b/runtime/executor/examples/spawn_async.rs
new file mode 100644
index 0000000..250f433
--- /dev/null
+++ b/runtime/executor/examples/spawn_async.rs
@@ -0,0 +1,42 @@
+use std::io::Write;
+use std::panic::resume_unwind;
+use std::time::Duration;
+use executor::pool;
+use executor::prelude::*;
+
+fn main() {
+ std::panic::set_hook(Box::new(|info| {
+ let tid = std::thread::current().id();
+ println!("Panicking ThreadId: {:?}", tid);
+ std::io::stdout().flush();
+ println!("panic hook: {:?}", info);
+ }));
+ let tid = std::thread::current().id();
+ println!("Main ThreadId: {:?}", tid);
+
+ let handle = spawn(
+ async {
+ panic!("test");
+ },
+ );
+
+ run(
+ async {
+ handle.await;
+ },
+ ProcStack {},
+ );
+
+ let pool = pool::get();
+ let manager = pool::get_manager().unwrap();
+ println!("After panic: {:?}", pool);
+ println!("{:#?}", manager);
+
+ let h = std::thread::spawn(|| {
+ panic!("This is a test");
+ });
+
+ std::thread::sleep(Duration::from_secs(30));
+
+ println!("After panic");
+}
diff --git a/runtime/executor/scripts/test_blocking_thread_pool.sh b/runtime/executor/scripts/test_blocking_thread_pool.sh
new file mode 100644
index 0000000..f1c28d8
--- /dev/null
+++ b/runtime/executor/scripts/test_blocking_thread_pool.sh
@@ -0,0 +1,5 @@
+#!/bin/zsh
+
+cargo test longhauling_task_join -- --ignored --exact --nocapture
+cargo test slow_join_interrupted -- --ignored --exact --nocapture
+cargo test slow_join -- --ignored --exact --nocapture
\ No newline at end of file
diff --git a/runtime/executor/src/blocking.rs b/runtime/executor/src/blocking.rs
new file mode 100644
index 0000000..29e34ef
--- /dev/null
+++ b/runtime/executor/src/blocking.rs
@@ -0,0 +1,165 @@
+//!
+//! Pool of threads to run heavy processes
+//!
+//! We spawn futures onto the pool with [`spawn_blocking`] method of global run queue or
+//! with corresponding [`Worker`]'s spawn method.
+//!
+//! [`Worker`]: crate::run_queue::Worker
+
+use crate::thread_manager::{DynamicPoolManager, DynamicRunner};
+use crossbeam_channel::{unbounded, Receiver, Sender};
+use lazy_static::lazy_static;
+use lightproc::lightproc::LightProc;
+use lightproc::recoverable_handle::RecoverableHandle;
+use once_cell::sync::{Lazy, OnceCell};
+use std::future::Future;
+use std::iter::Iterator;
+use std::time::Duration;
+use std::{env, thread};
+use tracing::trace;
+
+/// If low watermark isn't configured this is the default scaler value.
+/// This value is used for the heuristics of the scaler
+const DEFAULT_LOW_WATERMARK: u64 = 2;
+
+const THREAD_RECV_TIMEOUT: Duration = Duration::from_millis(100);
+
+/// Spawns a blocking task.
+///
+/// The task will be spawned onto a thread pool specifically dedicated to blocking tasks.
+pub fn spawn_blocking(future: F) -> RecoverableHandle
+where
+ F: Future