Kick out asyncio into an external crate for later

This commit is contained in:
Nadja Reitzenstein 2021-11-25 23:36:17 +01:00
parent ad5c4061de
commit 32894300f4
70 changed files with 1477 additions and 10138 deletions

518
Cargo.lock generated
View File

@ -19,7 +19,7 @@ checksum = "372baaa5d3a422d8816b513bcdb2c120078c8614f7ecbcc3baf34a1634bbbe2e"
dependencies = [ dependencies = [
"abnf", "abnf",
"indexmap", "indexmap",
"itertools", "itertools 0.9.0",
"pretty", "pretty",
] ]
@ -70,6 +70,12 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "anyhow"
version = "1.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62e1f47f7dc0422027a4e370dd4548d4d66b26782e513e98dca1e689e058a80e"
[[package]] [[package]]
name = "api" name = "api"
version = "0.1.0" version = "0.1.0"
@ -91,6 +97,12 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "arrayvec"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]] [[package]]
name = "async-channel" name = "async-channel"
version = "1.6.1" version = "1.6.1"
@ -127,6 +139,22 @@ dependencies = [
"futures-lite", "futures-lite",
] ]
[[package]]
name = "async-global-executor"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6"
dependencies = [
"async-channel",
"async-executor",
"async-io",
"async-mutex",
"blocking",
"futures-lite",
"num_cpus",
"once_cell",
]
[[package]] [[package]]
name = "async-io" name = "async-io"
version = "1.6.0" version = "1.6.0"
@ -155,6 +183,15 @@ dependencies = [
"event-listener", "event-listener",
] ]
[[package]]
name = "async-mutex"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e"
dependencies = [
"event-listener",
]
[[package]] [[package]]
name = "async-native-tls" name = "async-native-tls"
version = "0.3.3" version = "0.3.3"
@ -211,12 +248,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952"
dependencies = [ dependencies = [
"async-channel", "async-channel",
"async-global-executor",
"async-io",
"async-lock", "async-lock",
"crossbeam-utils", "crossbeam-utils 0.8.5",
"futures-channel", "futures-channel",
"futures-core", "futures-core",
"futures-io", "futures-io",
"futures-lite",
"gloo-timers",
"kv-log-macro",
"log",
"memchr", "memchr",
"num_cpus",
"once_cell", "once_cell",
"pin-project-lite", "pin-project-lite",
"pin-utils", "pin-utils",
@ -303,6 +347,12 @@ dependencies = [
"which", "which",
] ]
[[package]]
name = "bitfield"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719"
[[package]] [[package]]
name = "bitflags" name = "bitflags"
version = "1.3.2" version = "1.3.2"
@ -316,7 +366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587"
dependencies = [ dependencies = [
"arrayref", "arrayref",
"arrayvec", "arrayvec 0.5.2",
"constant_time_eq", "constant_time_eq",
] ]
@ -364,6 +414,18 @@ dependencies = [
"once_cell", "once_cell",
] ]
[[package]]
name = "bstr"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223"
dependencies = [
"lazy_static",
"memchr",
"regex-automata",
"serde",
]
[[package]] [[package]]
name = "bumpalo" name = "bumpalo"
version = "3.8.0" version = "3.8.0"
@ -445,6 +507,15 @@ dependencies = [
"capnp", "capnp",
] ]
[[package]]
name = "cast"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a"
dependencies = [
"rustc_version",
]
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.0.71" version = "1.0.71"
@ -552,6 +623,126 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "criterion"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10"
dependencies = [
"atty",
"cast",
"clap",
"criterion-plot",
"csv",
"itertools 0.10.1",
"lazy_static",
"num-traits",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_cbor",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57"
dependencies = [
"cast",
"itertools 0.10.1",
]
[[package]]
name = "crossbeam"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch 0.9.5",
"crossbeam-queue",
"crossbeam-utils 0.8.5",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils 0.8.5",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-epoch 0.9.5",
"crossbeam-utils 0.8.5",
]
[[package]]
name = "crossbeam-epoch"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
dependencies = [
"autocfg",
"cfg-if 0.1.10",
"crossbeam-utils 0.7.2",
"lazy_static",
"maybe-uninit",
"memoffset 0.5.6",
"scopeguard",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils 0.8.5",
"lazy_static",
"memoffset 0.6.4",
"scopeguard",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils 0.8.5",
]
[[package]]
name = "crossbeam-utils"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
"autocfg",
"cfg-if 0.1.10",
"lazy_static",
]
[[package]] [[package]]
name = "crossbeam-utils" name = "crossbeam-utils"
version = "0.8.5" version = "0.8.5"
@ -562,6 +753,28 @@ dependencies = [
"lazy_static", "lazy_static",
] ]
[[package]]
name = "csv"
version = "1.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
dependencies = [
"bstr",
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "ctor" name = "ctor"
version = "0.1.21" version = "0.1.21"
@ -582,7 +795,7 @@ dependencies = [
"annotate-snippets", "annotate-snippets",
"elsa", "elsa",
"hex", "hex",
"itertools", "itertools 0.9.0",
"lazy_static", "lazy_static",
"once_cell", "once_cell",
"percent-encoding", "percent-encoding",
@ -609,7 +822,7 @@ dependencies = [
[[package]] [[package]]
name = "diflouroborane" name = "diflouroborane"
version = "0.3.0" version = "0.4.0"
dependencies = [ dependencies = [
"api", "api",
"async-channel", "async-channel",
@ -622,6 +835,7 @@ dependencies = [
"chrono", "chrono",
"clap", "clap",
"erased-serde", "erased-serde",
"executor",
"futures-signals", "futures-signals",
"futures-test", "futures-test",
"intmap", "intmap",
@ -629,6 +843,7 @@ dependencies = [
"lazy_static", "lazy_static",
"libc", "libc",
"lmdb-rkv", "lmdb-rkv",
"pin-utils",
"ptr_meta", "ptr_meta",
"rand", "rand",
"rkyv", "rkyv",
@ -643,7 +858,7 @@ dependencies = [
"smol", "smol",
"tempfile", "tempfile",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber 0.2.25",
"uuid", "uuid",
] ]
@ -720,6 +935,33 @@ version = "2.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59"
[[package]]
name = "executor"
version = "0.3.0"
dependencies = [
"arrayvec 0.7.2",
"async-std",
"criterion",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch 0.9.5",
"crossbeam-queue",
"crossbeam-utils 0.8.5",
"futures-timer",
"futures-util",
"lazy_static",
"lever",
"libc",
"lightproc",
"num_cpus",
"once_cell",
"pin-utils",
"rand",
"slab",
"tracing",
"tracing-subscriber 0.3.1",
]
[[package]] [[package]]
name = "fake-simd" name = "fake-simd"
version = "0.1.2" version = "0.1.2"
@ -887,6 +1129,12 @@ dependencies = [
"pin-utils", "pin-utils",
] ]
[[package]]
name = "futures-timer"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
[[package]] [[package]]
name = "futures-util" name = "futures-util"
version = "0.3.17" version = "0.3.17"
@ -955,6 +1203,19 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "gloo-timers"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f"
dependencies = [
"futures-channel",
"futures-core",
"js-sys",
"wasm-bindgen",
"web-sys",
]
[[package]] [[package]]
name = "gsasl-sys" name = "gsasl-sys"
version = "0.2.3" version = "0.2.3"
@ -1070,6 +1331,15 @@ dependencies = [
"either", "either",
] ]
[[package]]
name = "itertools"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf"
dependencies = [
"either",
]
[[package]] [[package]]
name = "itoa" name = "itoa"
version = "0.4.8" version = "0.4.8"
@ -1085,6 +1355,15 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
] ]
[[package]]
name = "kv-log-macro"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f"
dependencies = [
"log",
]
[[package]] [[package]]
name = "lazy_static" name = "lazy_static"
version = "1.4.0" version = "1.4.0"
@ -1097,13 +1376,29 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lever"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4894ef6dbd1b26dbd7284530c227aab005a57b939427ace2d4d0cc62c23fb05b"
dependencies = [
"anyhow",
"crossbeam-epoch 0.8.2",
"itertools 0.9.0",
"lazy_static",
"log",
"parking_lot",
"slice-group-by",
"thiserror",
]
[[package]] [[package]]
name = "lexical-core" name = "lexical-core"
version = "0.7.6" version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe"
dependencies = [ dependencies = [
"arrayvec", "arrayvec 0.5.2",
"bitflags", "bitflags",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"ryu", "ryu",
@ -1112,9 +1407,9 @@ dependencies = [
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.105" version = "0.2.108"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" checksum = "8521a1b57e76b1ec69af7599e75e38e7b7fad6610f037db8c79b127201b5d119"
[[package]] [[package]]
name = "libloading" name = "libloading"
@ -1126,6 +1421,20 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "lightproc"
version = "0.3.0"
dependencies = [
"async-std",
"bitfield",
"bitflags",
"crossbeam",
"crossbeam-utils 0.8.5",
"futures-executor",
"lazy_static",
"pin-utils",
]
[[package]] [[package]]
name = "lmdb-rkv" name = "lmdb-rkv"
version = "0.14.0" version = "0.14.0"
@ -1149,6 +1458,15 @@ dependencies = [
"pkg-config", "pkg-config",
] ]
[[package]]
name = "lock_api"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109"
dependencies = [
"scopeguard",
]
[[package]] [[package]]
name = "log" name = "log"
version = "0.4.14" version = "0.4.14"
@ -1156,6 +1474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"value-bag",
] ]
[[package]] [[package]]
@ -1179,12 +1498,36 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f"
[[package]]
name = "maybe-uninit"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
[[package]] [[package]]
name = "memchr" name = "memchr"
version = "2.4.1" version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
[[package]]
name = "memoffset"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
"autocfg",
]
[[package]]
name = "memoffset"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9"
dependencies = [
"autocfg",
]
[[package]] [[package]]
name = "native-tls" name = "native-tls"
version = "0.2.8" version = "0.2.8"
@ -1233,12 +1576,28 @@ dependencies = [
"autocfg", "autocfg",
] ]
[[package]]
name = "num_cpus"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
dependencies = [
"hermit-abi",
"libc",
]
[[package]] [[package]]
name = "once_cell" name = "once_cell"
version = "1.8.0" version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
[[package]]
name = "oorandom"
version = "11.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]] [[package]]
name = "opaque-debug" name = "opaque-debug"
version = "0.2.3" version = "0.2.3"
@ -1290,6 +1649,31 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72"
[[package]]
name = "parking_lot"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216"
dependencies = [
"cfg-if 1.0.0",
"instant",
"libc",
"redox_syscall",
"smallvec",
"winapi",
]
[[package]] [[package]]
name = "peeking_take_while" name = "peeking_take_while"
version = "0.1.2" version = "0.1.2"
@ -1405,6 +1789,34 @@ version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f"
[[package]]
name = "plotters"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c"
[[package]]
name = "plotters-svg"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9"
dependencies = [
"plotters-backend",
]
[[package]] [[package]]
name = "polling" name = "polling"
version = "2.1.0" version = "2.1.0"
@ -1529,6 +1941,31 @@ dependencies = [
"rand_core", "rand_core",
] ]
[[package]]
name = "rayon"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
dependencies = [
"autocfg",
"crossbeam-deque",
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-utils 0.8.5",
"lazy_static",
"num_cpus",
]
[[package]] [[package]]
name = "redox_syscall" name = "redox_syscall"
version = "0.2.10" version = "0.2.10"
@ -1672,7 +2109,7 @@ dependencies = [
"base64", "base64",
"blake2b_simd", "blake2b_simd",
"constant_time_eq", "constant_time_eq",
"crossbeam-utils", "crossbeam-utils 0.8.5",
] ]
[[package]] [[package]]
@ -1681,6 +2118,15 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "rustc_version"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver",
]
[[package]] [[package]]
name = "ryu" name = "ryu"
version = "1.0.5" version = "1.0.5"
@ -1706,6 +2152,12 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "sdk" name = "sdk"
version = "0.1.0" version = "0.1.0"
@ -1739,6 +2191,12 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "semver"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.130" version = "1.0.130"
@ -1858,6 +2316,12 @@ version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5"
[[package]]
name = "slice-group-by"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f7474f0b646d228360ab62ed974744617bc869d959eac8403bfa3665931a7fb"
[[package]] [[package]]
name = "smallvec" name = "smallvec"
version = "1.7.0" version = "1.7.0"
@ -1992,6 +2456,16 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]] [[package]]
name = "tinyvec" name = "tinyvec"
version = "1.5.0" version = "1.5.0"
@ -2082,6 +2556,20 @@ dependencies = [
"tracing-serde", "tracing-serde",
] ]
[[package]]
name = "tracing-subscriber"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80a4ddde70311d8da398062ecf6fc2c309337de6b0f77d6c27aff8d53f6fca52"
dependencies = [
"ansi_term 0.12.1",
"sharded-slab",
"smallvec",
"thread_local",
"tracing-core",
"tracing-log",
]
[[package]] [[package]]
name = "typed-arena" name = "typed-arena"
version = "1.7.0" version = "1.7.0"
@ -2149,6 +2637,16 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "value-bag"
version = "1.0.0-alpha.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f"
dependencies = [
"ctor",
"version_check",
]
[[package]] [[package]]
name = "vcpkg" name = "vcpkg"
version = "0.2.15" version = "0.2.15"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "diflouroborane" name = "diflouroborane"
version = "0.3.0" version = "0.4.0"
authors = [ "dequbed <me@dequbed.space>" authors = [ "dequbed <me@dequbed.space>"
, "Kai Jan Kriegel <kai@kjkriegel.de>" , "Kai Jan Kriegel <kai@kjkriegel.de>"
, "Joseph Langosch <thejoklla@gmail.com>" , "Joseph Langosch <thejoklla@gmail.com>"
@ -9,6 +9,7 @@ authors = [ "dequbed <me@dequbed.space>"
license = "LGPL-3.0" license = "LGPL-3.0"
edition = "2021" edition = "2021"
publish = false publish = false
readme = "README.md"
[profile.release] [profile.release]
opt-level = 3 opt-level = 3
@ -29,8 +30,10 @@ uuid = { version = "0.8.2", features = ["serde", "v4"] }
async-trait = "0.1.51" async-trait = "0.1.51"
async-native-tls = "0.3" async-native-tls = "0.3"
intmap = "0.7" intmap = "0.7"
pin-utils = "0.1.0"
# Runtime # Runtime
executor = { path = "runtime/executor" }
smol = "1.2.5" smol = "1.2.5"
# Catch&Handle POSIX process signals # Catch&Handle POSIX process signals
@ -81,4 +84,4 @@ tempfile = "3.2"
bincode = "1.3" bincode = "1.3"
[workspace] [workspace]
members = ["modules/*", "api"] members = ["modules/*", "api"]

View File

@ -22,23 +22,3 @@ pub mod resource;
pub mod state; pub mod state;
/// Varints /// Varints
pub mod varint; pub mod varint;
use intmap::IntMap;
use resource::ResourceDriver;
#[derive(Debug)]
struct InitiatorDriver;
#[derive(Debug)]
struct ActorDriver;
#[derive(Debug)]
struct System {
resources: IntMap<ResourceDriver>,
initiators: IntMap<InitiatorDriver>,
actors: IntMap<ActorDriver>,
}
#[derive(Debug)]
struct Accountant {
}

View File

@ -1,25 +0,0 @@
[package]
name = "asyncio"
version = "0.1.0"
edition = "2021"
description = "io_uring-first async I/O implementation"
readme = "README.md"
publish = false
[dependencies]
static_assertions = "1.1"
libc = "0.2"
nix = "0.23"
bitflags = "1.3"
ptr_meta = "0.1"
# SegQueue for task waiting on CQE or SQE being available again.
crossbeam-queue = "0.3"
# AsyncRead, AsyncWrite, AsyncSeek and related traits
futures-io = "0.3"
[dev-dependencies]
# As Mr. Torgue would put it: THE MOST EXTREME F*CKING ASYNC FUNCTION RUNNNER! EXPLOSIONS!
extreme = "666.666.666666"
futures-lite = "1.12"

View File

@ -1,63 +0,0 @@
use std::fs::File;
use std::future::Future;
use std::io;
use std::os::unix::prelude::AsRawFd;
use std::sync::Arc;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use asyncio::ctypes::IORING_OP;
use asyncio::io_uring::IoUring;
use futures_lite::io::AsyncReadExt;
pub fn drive<T>(iouring: &IoUring, mut f: impl Future<Output = io::Result<T>>) -> io::Result<T> {
static VTABLE: RawWakerVTable = RawWakerVTable::new(
|clone_me| unsafe {
let arc = Arc::from_raw(clone_me);
std::mem::forget(arc.clone());
RawWaker::new(Arc::into_raw(arc) as *const (), &VTABLE)
},
|wake_me| unsafe { Arc::from_raw(wake_me); },
|wake_by_ref_me| unsafe {},
|drop_me| unsafe { drop(Arc::from_raw(drop_me)) },
);
let mut f = unsafe { std::pin::Pin::new_unchecked(&mut f) };
let park = Arc::new(());
let sender = Arc::into_raw(park.clone());
let raw_waker = RawWaker::new(sender as *const _, &VTABLE);
let waker = unsafe { Waker::from_raw(raw_waker) };
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(t) => return t,
Poll::Pending => {
iouring.handle_completions();
match iouring.submit_wait() {
Ok(_) => {}
Err(e) => return Err(e),
}
}
}
}
}
fn main() {
let file = File::open("/tmp/poem").unwrap();
let fd = file.as_raw_fd();
let mut ring: &'static IoUring = Box::leak(Box::new(IoUring::setup(4).unwrap()));
let mut async_file = asyncio::fs::File::new(fd, ring);
let mut buf = Box::new([0u8; 4096]);
let f = async move {
let len = async_file.read(&mut buf[..]).await?;
println!("Read {} bytes:", len);
let str = unsafe { std::str::from_utf8_unchecked(&buf[..len]) };
println!("{}", str);
Ok(())
};
drive(ring, f);
}

View File

@ -1,54 +0,0 @@
use std::fs::File;
use std::os::unix::prelude::AsRawFd;
use asyncio::ctypes::IORING_OP;
use asyncio::io_uring::IoUring;
fn main() {
let file = File::open("/tmp/poem").unwrap();
let fd = file.as_raw_fd();
let ring = IoUring::setup(4).unwrap();
let mut cqes = ring.cqes();
let buf = Box::new([0u8; 4096]);
ring.try_prepare(3, |mut sqes| {
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::READ);
sqe.set_address(buf.as_ptr() as u64);
sqe.set_fd(fd);
sqe.set_len(4096);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::NOP);
sqe.set_userdata(0xCAFEBABE);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::NOP);
sqe.set_userdata(0xDEADBEEF);
}).unwrap();
let mut amt = 0;
while amt == 0 {
amt = ring.submit().unwrap();
}
println!("{}", amt);
for _ in 0..3 {
let mut cqe = None;
while cqe.is_none() {
cqe = cqes.next();
}
let cqe = cqe.unwrap();
println!("{:?}", cqe);
if cqe.user_data == 0xCAFEBABE {
println!("cafebabe");
} else if cqe.user_data == 0xDEADBEEF {
println!("deadbeef");
}
if let Ok(len) = cqe.result() {
let out = unsafe { std::str::from_utf8_unchecked(&buf[0..len as usize]) };
println!("{}", out);
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
#include <linux/io_uring.h>

View File

@ -1,149 +0,0 @@
use std::ptr;
use std::any::Any;
use std::ffi::CString;
use ptr_meta::DynMetadata;
/// Cancellation callback to clean up I/O resources
///
/// This allows IO actions to properly cancel and have their resources cleaned up without having to
/// worry about the current state of the io_uring queues.
pub struct Cancellation {
data: *mut (),
metadata: usize,
drop: unsafe fn (*mut (), usize),
}
pub unsafe trait Cancel {
fn into_raw(self) -> (*mut (), usize);
unsafe fn drop_raw(ptr: *mut (), metadata: usize);
}
pub unsafe trait CancelNarrow {
fn into_narrow_raw(self) -> *mut ();
unsafe fn drop_narrow_raw(ptr: *mut ());
}
unsafe impl<T: CancelNarrow> Cancel for T {
fn into_raw(self) -> (*mut (), usize) {
(T::into_narrow_raw(self), 0)
}
unsafe fn drop_raw(ptr: *mut (), _: usize) {
T::drop_narrow_raw(ptr)
}
}
unsafe impl<T> CancelNarrow for Box<T> {
fn into_narrow_raw(self) -> *mut () {
Box::into_raw(self) as *mut ()
}
unsafe fn drop_narrow_raw(ptr: *mut ()) {
drop(Box::from_raw(ptr))
}
}
unsafe impl<T> Cancel for Box<[T]> {
fn into_raw(self) -> (*mut (), usize) {
let len = self.len();
(Box::into_raw(self) as *mut (), len)
}
unsafe fn drop_raw(ptr: *mut (), metadata: usize) {
drop(Vec::from_raw_parts(ptr, metadata, metadata))
}
}
// Cancel impl for panics
unsafe impl Cancel for Box<dyn Any + Send + Sync> {
fn into_raw(self) -> (*mut (), usize) {
let ptr = Box::into_raw(self);
let metadata = ptr_meta::metadata(ptr as *mut dyn Any);
let metadata = unsafe {
// SAFETY: None. I happen to know that metadata is always exactly `usize`-sized for this
// type but only `std` can guarantee it.
std::mem::transmute::<_, usize>(metadata)
};
(ptr as *mut(), metadata)
}
unsafe fn drop_raw(ptr: *mut (), metadata: usize) {
let boxed: Box<dyn Any> = unsafe {
let metadata =
// SAFETY: We did it the other way around so this is safe if the previous step was.
std::mem::transmute::<_, DynMetadata<dyn Any>>(metadata);
// We can then (safely) construct a fat pointer from the metadata and data address
let ptr = ptr_meta::from_raw_parts_mut(ptr, metadata);
// SAFETY: We know the pointer is valid since `Self::into_raw` took ownership and the
// vtable was extracted from this known good reference.
Box::from_raw(ptr)
};
drop(boxed)
}
}
unsafe impl CancelNarrow for CString {
fn into_narrow_raw(self) -> *mut () {
self.into_raw() as *mut ()
}
unsafe fn drop_narrow_raw(ptr: *mut ()) {
drop(CString::from_raw(ptr as *mut libc::c_char));
}
}
unsafe impl CancelNarrow for () {
fn into_narrow_raw(self) -> *mut () {
ptr::null_mut()
}
unsafe fn drop_narrow_raw(_: *mut ()) {}
}
unsafe impl<T, F> Cancel for (T, F)
where T: CancelNarrow,
F: CancelNarrow,
{
fn into_raw(self) -> (*mut (), usize) {
let (t, f) = self;
let (t, _) = t.into_raw();
let (f, _) = f.into_raw();
(t, f as usize)
}
unsafe fn drop_raw(t: *mut (), f: usize) {
T::drop_raw(t, 0);
F::drop_raw(f as *mut (), 0);
}
}
impl Cancellation {
pub fn new<T: Cancel>(cancel: T) -> Self {
let (data, metadata) = cancel.into_raw();
Self { data, metadata, drop: T::drop_raw }
}
}
impl Drop for Cancellation {
fn drop(&mut self) {
unsafe {
(self.drop)(self.data, self.metadata)
}
}
}
impl<T: Cancel> From<T> for Cancellation {
fn from(cancel: T) -> Self {
Cancellation::new(cancel)
}
}
impl<T> From<Option<T>> for Cancellation
where Cancellation: From<T>
{
fn from(option: Option<T>) -> Self {
option.map_or(Cancellation::new(()), Cancellation::from)
}
}

View File

@ -1,82 +0,0 @@
use std::cell::Cell;
use std::io;
use std::marker::PhantomData;
use std::mem::ManuallyDrop;
use std::task::Waker;
use crate::cancellation::Cancellation;
// TODO: Completions for linked requests? How would you handle having multiple results? In one
// Completion struct or using multiple? If the latter, prepare needs to set user_data
// for all intermediary SQE explicitly.
pub struct Completion {
state: ManuallyDrop<Box<Cell<State>>>,
}
enum State {
Submitted(Waker),
Completed(io::Result<i32>),
Cancelled(Cancellation),
Empty,
}
impl Completion {
pub fn new(waker: Waker) -> Self {
Self {
state: ManuallyDrop::new(Box::new(Cell::new(State::Submitted(waker)))),
}
}
pub(crate) unsafe fn from_raw(ptr: u64) -> Self {
let ptr = ptr as usize as *mut Cell<State>;
let state = ManuallyDrop::new(Box::from_raw(ptr));
Self {
state,
}
}
pub fn addr(&self) -> u64 {
self.state.as_ptr() as *const _ as usize as u64
}
pub fn check(self, waker: &Waker) -> Result<io::Result<i32>, Self> {
match self.state.replace(State::Empty) {
State::Submitted(old_waker) => {
// If the given waker wakes a different task than the one we were constructed
// with we must replace our waker.
if !old_waker.will_wake(waker) {
self.state.replace(State::Submitted(waker.clone()));
} else {
self.state.replace(State::Submitted(old_waker));
}
Err(self)
},
State::Completed(result) => {
Ok(result)
},
_ => unreachable!(),
}
}
pub fn cancel(self, callback: Cancellation) {
match self.state.replace(State::Cancelled(callback)) {
State::Completed(_) => {
drop(self.state);
},
State::Submitted(_) => {
},
_ => unreachable!(),
}
}
pub fn complete(self, result: io::Result<i32>) {
match self.state.replace(State::Completed(result)) {
State::Submitted(waker) => {
waker.wake();
},
State::Cancelled(callback) => {
drop(callback);
},
_ => unreachable!(),
}
}
}

View File

@ -1,300 +0,0 @@
use std::cell::UnsafeCell;
use std::os::unix::prelude::RawFd;
use std::pin::Pin;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU32, compiler_fence, Ordering};
use std::task::{Context, Poll, Waker};
use crossbeam_queue::SegQueue;
use nix::sys::mman::munmap;
use crate::completion::Completion;
use crate::cqe::CQE;
use crate::ctypes::{CQOffsets, IORING_CQ};
#[derive(Debug)]
pub struct CQ {
/// Head of the completion queue. Moved by the program to indicate that it has consumed
/// completions.
///
/// While it's important that the kernel sees the same value as the userspace program the
/// main problem that can happen otherwise is that the kernel assumes it lost completions
/// which we already successfully pulled from the queue.
head: &'static AtomicU32,
/// Tail of the completion queue. Moved by the kernel when new completions are stored.
///
/// Since this is modified by the kernel we should use atomic operations to read it, making
/// sure both the kernel and any program have a consistent view of its contents.
tail: &'static AtomicU32,
/// A cached version of `tail` which additionally counts reserved slots for future
/// completions, i.e. slots that the kernel will fill in the future.
predicted_tail: UnsafeCell<u32>,
ring_mask: u32,
num_entries: u32,
flags: &'static AtomicU32,
entries: &'static [CQE],
waiters: SegQueue<Waker>,
// cq_ptr is set to `None` if we used a single mmap for both SQ and CQ.
cq_ptr: *mut libc::c_void,
cq_map_size: usize,
}
impl Drop for CQ {
fn drop(&mut self) {
if !self.cq_ptr.is_null() {
unsafe { munmap(self.cq_ptr, self.cq_map_size) };
}
}
}
impl CQ {
pub unsafe fn new(ptr: *mut libc::c_void,
offs: CQOffsets,
cq_entries: u32,
split_mmap: bool,
cq_map_size: usize,
) -> Self {
// Sanity check the pointer and offsets. If these fail we were probably passed an
// offsets from an uninitialized parameter struct.
assert!(!ptr.is_null());
assert_ne!(offs.head, offs.tail);
// Eagerly extract static values. Since they won't ever change again there's no reason to
// not read them now.
let ring_mask = *(ptr.offset(offs.ring_mask as isize).cast());
let num_entries = *(ptr.offset(offs.ring_entries as isize).cast());
let head: &AtomicU32 = &*(ptr.offset(offs.head as isize).cast());
let tail: &AtomicU32 = &*(ptr.offset(offs.tail as isize).cast());
let predicted_tail = UnsafeCell::new(head.load(Ordering::Acquire));
let flags: &AtomicU32 = &*(ptr.offset(offs.flags as isize).cast());
let entries = std::slice::from_raw_parts(
ptr.offset(offs.cqes as isize).cast(),
cq_entries as usize
);
Self {
head,
predicted_tail,
tail,
ring_mask,
num_entries,
flags,
entries,
waiters: SegQueue::new(),
// Only store a pointer if we used a separate mmap() syscall for the CQ
cq_ptr: if split_mmap { ptr } else { std::ptr::null_mut() },
cq_map_size,
}
}
#[inline(always)]
fn predicted_tail(&self) -> &mut u32 {
unsafe { &mut *self.predicted_tail.get() }
}
#[inline(always)]
/// Currently used + reserved slots
pub fn used(&self) -> u32 {
let tail = *self.predicted_tail();
let head = self.head.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
tail.wrapping_sub(head)
}
#[inline(always)]
/// Amount of available slots taking reservations into account.
pub fn available(&self) -> u32 {
self.num_entries - self.used()
}
/// Try to reserve a number of CQ slots to make sure that
pub fn try_reserve(&self, count: u32) -> bool {
if self.available() >= count {
let tail = self.predicted_tail();
*tail = (*tail).wrapping_add(count);
true
} else {
false
}
}
pub fn poll_reserve(self: Pin<&mut Self>, ctx: &mut Context<'_>, count: u32) -> Poll<()> {
if self.available() >= count {
Poll::Ready(())
} else {
self.waiters.push(ctx.waker().clone());
Poll::Pending
}
}
pub fn get_next(&self) -> Option<&CQE> {
let tail = self.tail.load(Ordering::Relaxed);
let head = self.head.load(Ordering::Relaxed);
if tail == head {
None
} else {
compiler_fence(Ordering::Acquire);
self.head.fetch_add(1, Ordering::Release);
let index = (head & self.ring_mask) as usize;
Some(&self.entries[index])
}
}
pub fn ready(&self) -> u32 {
let tail = self.tail.load(Ordering::Relaxed);
let head = self.head.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
tail.wrapping_sub(head)
}
pub fn handle(&self, handler: impl Fn(&CQE)) {
let tail = self.tail.load(Ordering::Relaxed);
let head = self.head.load(Ordering::Relaxed);
for i in head..tail {
let index = (i & self.ring_mask) as usize;
let cqe = &self.entries[index];
handler(cqe);
}
compiler_fence(Ordering::Acquire);
self.head.store(tail, Ordering::Release);
}
#[cfg(test)]
fn test_insert_cqe(&self, cqe: impl Iterator<Item=CQE>) {
let head = self.head.load(Ordering::Relaxed);
let mut tail = self.tail.load(Ordering::Acquire);
unsafe {
for entry in cqe {
let index = (tail & self.ring_mask) as usize;
// Yes, this is absolutely not safe or defined behaviour in the first place. This
// function must *never* be used outside simple testing setups.
let ptr = &self.entries[index] as *const _ as *mut CQE;
ptr.write(entry);
tail += 1;
// If we would overflow, crash instead
assert!((tail - head) <= self.num_entries, "test_insert_cqe overflowed the buffer");
}
}
self.tail.store(tail, Ordering::Release);
}
}
mod tests {
use std::sync::atomic::AtomicU64;
use super::*;
fn gen_cq(num_entries: u32) -> CQ {
let head = Box::leak(Box::new(AtomicU32::new(0)));
let tail = Box::leak(Box::new(AtomicU32::new(0)));
let flags = Box::leak(Box::new(AtomicU32::new(0)));
let entries = Box::leak((0..num_entries).map(|_| CQE::default()).collect());
CQ {
head,
tail,
predicted_tail: UnsafeCell::new(0),
ring_mask: num_entries - 1,
num_entries,
flags,
entries,
cq_ptr: std::ptr::null_mut(),
cq_map_size: 0,
waiters: SegQueue::new(),
}
}
#[test]
fn test_test_insert_cqe() {
let cq = gen_cq(4);
cq.test_insert_cqe([
CQE {
user_data: 1,
.. Default::default()
},
CQE {
user_data: 2,
.. Default::default()
},
CQE {
user_data: 3,
.. Default::default()
},
CQE {
user_data: 4,
.. Default::default()
},
].into_iter());
println!("{:?}", cq.entries);
for i in 0..4 {
assert_eq!(cq.entries[i].user_data, (i+1) as u64);
}
}
#[test]
#[should_panic]
fn test_test_insert_cqe_overflow() {
let cq = gen_cq(2);
cq.test_insert_cqe([
CQE {
user_data: 1,
.. Default::default()
},
CQE {
user_data: 2,
.. Default::default()
},
CQE {
user_data: 3,
.. Default::default()
},
CQE {
user_data: 4,
.. Default::default()
},
].into_iter());
println!("{:?}", cq.entries);
}
#[test]
fn test_cq_reserve_insert() {
let cq = gen_cq(4);
assert_eq!(cq.tail.load(Ordering::Relaxed), 0);
assert_eq!(cq.head.load(Ordering::Relaxed), 0);
assert_eq!(*cq.predicted_tail(), 0);
cq.try_reserve(2);
assert_eq!(cq.tail.load(Ordering::Relaxed), 0);
assert_eq!(*cq.predicted_tail(), 2);
cq.test_insert_cqe([
CQE {
user_data: 1,
.. Default::default()
},
CQE {
user_data: 2,
.. Default::default()
},
].into_iter());
assert_eq!(cq.head.load(Ordering::Relaxed), 0);
assert_eq!(cq.tail.load(Ordering::Relaxed), 2);
assert_eq!(*cq.predicted_tail(), 2);
let mut o = AtomicU64::new(1);
cq.handle(|cqe| {
assert_eq!(cqe.user_data, o.fetch_add(1, Ordering::Relaxed))
});
assert_eq!(o.load(Ordering::Relaxed), 3);
}
}

View File

@ -1,137 +0,0 @@
use std::io;
use std::ptr::NonNull;
use std::sync::atomic::Ordering;
use crate::cq::CQ;
use crate::io_uring::{IoUring};
#[repr(C)]
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
/// Completion Queue Event
pub struct CQE {
pub user_data: u64,
pub(crate) res: i32,
pub flags: IOCQE,
}
impl CQE {
pub fn raw_result(&self) -> i32 {
self.res
}
pub fn result(&self) -> io::Result<i32> {
if self.res < 0 {
let err = io::Error::from_raw_os_error(-self.res);
Err(err)
} else {
Ok(self.res)
}
}
}
pub struct CQEs<'a> {
cq: &'a CQ,
ready: u32,
}
impl<'a> CQEs<'a> {
pub fn new(cq: &'a CQ) -> Self {
Self { cq, ready: 0 }
}
fn get(&mut self) -> Option<CQE> {
self.cq.get_next().map(|cqe| *cqe)
}
fn ready(&mut self) -> u32 {
self.cq.ready()
}
}
impl<'a> Iterator for CQEs<'a> {
type Item = CQE;
fn next(&mut self) -> Option<Self::Item> {
if self.ready == 0 {
self.ready = self.ready();
if self.ready == 0 {
return None;
}
}
self.ready -= 1;
self.get()
}
}
bitflags::bitflags! {
#[derive(Default)]
#[repr(C)]
pub struct IOCQE: u32 {
const F_BUFFER = 1;
const F_MORE = 1 << 1;
}
}
static_assertions::assert_eq_size!(u32, IOCQE);
mod tests {
use super::*;
#[test]
fn test_result_into_std() {
let cqe = CQE { res: 0, .. Default::default() };
assert_eq!(cqe.result().unwrap(), 0);
let cqe = CQE { res: 42567, .. Default::default() };
assert_eq!(cqe.result().unwrap(), 42567);
let cqe = CQE { res: -32, .. Default::default() };
assert_eq!(cqe.result().unwrap_err().kind(), io::ErrorKind::BrokenPipe);
let cqe = CQE { res: -2, .. Default::default() };
assert_eq!(cqe.result().unwrap_err().kind(), io::ErrorKind::NotFound);
}
#[test]
fn test_layout_io_uring_cqe() {
assert_eq!(
::std::mem::size_of::<CQE>(),
16usize,
concat!("Size of: ", stringify!(io_uring_cqe))
);
assert_eq!(
::std::mem::align_of::<CQE>(),
8usize,
concat!("Alignment of ", stringify!(io_uring_cqe))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<CQE>())).user_data as *const _ as usize },
0usize,
concat!(
"Offset of field: ",
stringify!(io_uring_cqe),
"::",
stringify!(user_data)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<CQE>())).res as *const _ as usize },
8usize,
concat!(
"Offset of field: ",
stringify!(io_uring_cqe),
"::",
stringify!(res)
)
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<CQE>())).flags as *const _ as usize },
12usize,
concat!(
"Offset of field: ",
stringify!(io_uring_cqe),
"::",
stringify!(flags)
)
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
use std::cell::Cell;
use std::io::IoSliceMut;
use std::os::unix::prelude::RawFd;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_io::AsyncRead;
use crate::completion::Completion;
use crate::ctypes::IORING_OP;
use crate::io_uring::IoUring;
use crate::sqe::{SQE, SQEs};
use crate::submission::Submission;
pub struct File {
fd: RawFd,
submission: Submission,
}
impl File {
pub fn new(fd: RawFd, io_uring: &'static IoUring) -> Self {
Self { fd, submission: Submission::new(io_uring) }
}
fn prepare_read<'sq>(
fd: RawFd,
buf: &mut [u8],
sqes: &mut SQEs<'sq>,
) -> SQE<'sq>
{
let mut sqe = sqes.next().expect("prepare_read requires at least one SQE");
sqe.set_opcode(IORING_OP::READ);
sqe.set_address(buf.as_ptr() as u64);
sqe.set_fd(fd);
sqe.set_len(buf.len() as i32);
sqe
}
}
impl AsyncRead for File {
fn poll_read(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, buf: &mut [u8])
-> Poll<std::io::Result<usize>>
{
let fd = self.fd;
Pin::new(&mut self.submission).poll(ctx, 1, |sqes| {
Self::prepare_read(fd, buf, sqes)
}).map(|res| res.map(|val| val as usize))
}
}

View File

@ -1,168 +0,0 @@
use std::fmt::{Debug, Formatter};
use std::future::Future;
use std::io;
use std::marker::PhantomData;
use std::mem::{size_of, align_of};
use std::ops::Deref;
use std::sync::atomic::{AtomicU32, Ordering};
use std::os::unix::prelude::RawFd;
use std::pin::Pin;
use std::ptr::NonNull;
use std::sync::Arc;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use nix::sys::{mman, mman::{MapFlags, ProtFlags}};
use crate::completion::Completion;
use crate::cq::CQ;
use crate::cqe::{CQE, CQEs};
use crate::ctypes::{CQOffsets, IORING_ENTER, SQOffsets};
use crate::sq::SQ;
use crate::sqe::{SQE, SQEs};
use super::ctypes::{Params, io_uring_sqe, IORING_CQ, IORING_FEAT,
IORING_OFF_CQ_RING, IORING_OFF_SQ_RING, IORING_OFF_SQES, IORING_SQ};
use super::syscall;
#[derive(Debug)]
pub struct IoUring {
fd: RawFd,
params: Params,
sq: SQ,
cq: CQ,
}
unsafe fn mmap(map_size: usize, fd: RawFd, offset: i64) -> nix::Result<*mut libc::c_void> {
mman::mmap(
std::ptr::null_mut(),
map_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_SHARED | MapFlags::MAP_POPULATE,
fd,
offset
)
}
impl IoUring {
pub fn setup(entries: u32) -> io::Result<Self> {
let mut params = Params::default();
let fd = syscall::setup(entries, &mut params)?;
let mut sq_map_size = (params.sq_off.array as usize) +
(params.sq_entries as usize) * size_of::<u32>();
let mut cq_map_size = (params.cq_off.cqes as usize) +
(params.cq_entries as usize) * size_of::<CQE>();
// If we can use a single mmap() syscall to map sq, cq and cqe the size of the total map
// is the largest of `sq_map_size` and `cq_map_size`.
if params.features.contains(IORING_FEAT::SINGLE_MMAP) {
sq_map_size = sq_map_size.max(cq_map_size);
cq_map_size = sq_map_size;
}
let sq_ptr = unsafe {
mmap(sq_map_size as usize, fd, IORING_OFF_SQ_RING as i64)?
};
let sqes_map_size = (params.sq_entries as usize) * size_of::<io_uring_sqe>();
let sqes = unsafe {
let ptr = mmap(sqes_map_size, fd, IORING_OFF_SQES as i64)?.cast();
std::slice::from_raw_parts_mut(ptr, params.sq_entries as usize)
};
let sq = unsafe {
SQ::new(sq_ptr,
params.sq_off,
sqes,
sq_map_size,
sqes_map_size
)
};
let cq_ptr = if params.features.contains(IORING_FEAT::SINGLE_MMAP) {
sq_ptr
} else {
unsafe {
mmap(cq_map_size, fd, IORING_OFF_CQ_RING as i64)?
}
};
let cq = unsafe {
CQ::new(cq_ptr,
params.cq_off,
params.cq_entries,
sq_ptr != cq_ptr,
cq_map_size,
)
};
Ok(IoUring {
fd,
params,
sq,
cq,
})
}
pub fn try_prepare(
&self,
count: u32,
prepare: impl FnOnce(SQEs<'_>)
) -> Option<()> {
self.handle_completions();
if !self.cq.try_reserve(count) {
return None;
}
if let Some(sqes) = self.sq.try_reserve(count) {
let start = sqes.start();
prepare(sqes);
self.sq.prepare(start, count);
Some(())
} else {
None
}
}
pub fn poll_prepare<'cx>(
mut self: Pin<&mut Self>,
ctx: &mut Context<'cx>,
count: u32,
prepare: impl for<'sq> FnOnce(SQEs<'sq>, &mut Context<'cx>) -> Completion
) -> Poll<Completion> {
Pin::new(&mut self.sq).poll_prepare(ctx, count, prepare)
}
pub fn poll_submit<'cx>(
mut self: Pin<&mut Self>,
ctx: &mut Context<'cx>,
head: u32,
) -> Poll<()> {
let fd = self.fd;
Pin::new(&mut self.sq).poll_submit(ctx, fd, head)
}
pub fn submit_wait(&self) -> io::Result<u32> {
self.sq.submit_wait(self.fd)
}
pub fn handle_completions(&self) {
self.cq.handle(|cqe| {
let udata = cqe.user_data;
if udata != 0 {
let completion = unsafe {
Completion::from_raw(udata)
};
completion.complete(cqe.result())
}
});
}
}
impl Future for &IoUring {
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.handle_completions();
match self.sq.submit(self.fd, Some(cx.waker())) {
Ok(_) => Poll::Pending,
Err(e) => Poll::Ready(Err(e)),
}
}
}

View File

@ -1,26 +0,0 @@
// Raw typedefs and structs for kernel communication via syscalls
pub mod ctypes;
mod syscall;
pub mod io_uring;
mod sq;
mod sqe;
mod cq;
mod cqe;
mod submission;
mod completion;
mod cancellation;
pub mod fs;
#[macro_export]
macro_rules! ready {
($e:expr $(,)?) => {
match $e {
std::task::Poll::Ready(t) => t,
std::task::Poll::Pending => return std::task::Poll::Pending,
}
};
}

View File

@ -1,5 +0,0 @@
pub struct CQE {
}

View File

@ -1,529 +0,0 @@
use std::cell::{Cell, UnsafeCell};
use std::fmt::{Debug, Formatter};
use std::io;
use std::mem::ManuallyDrop;
use std::os::unix::prelude::RawFd;
use std::pin::Pin;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicU32, compiler_fence, fence, Ordering};
use std::task::{Context, Poll, Waker};
use crossbeam_queue::SegQueue;
use nix::sys::mman::munmap;
use crate::completion::Completion;
use crate::ctypes::{IORING_ENTER, IORING_SQ, io_uring_sqe, SQOffsets};
use crate::sqe::{SQE, SQEs};
use crate::syscall;
pub struct SQ {
/// Head of the submission queue. This value is set by the kernel when it consumes SQE.
/// Thus we need to use atomic operations when passing information, making sure both the kernel
/// and program have a consistent view of its contents.
array_head: &'static AtomicU32,
/// The head of the sqes buffer. This value is our local cache of `array_head` that's not
/// shared with or modified by the kernel. We use it to index the start of the prepared SQE.
/// This means that this value lags behind after `array_head`.
sqes_head: UnsafeCell<u32>,
/// Tail of the submission queue. While this will be modified by the userspace program only,
/// the kernel uses atomic operations to read it so we want to use atomic operations to write
/// it.
array_tail: &'static AtomicU32,
// non-atomic cache of array_tail
cached_tail: UnsafeCell<u32>,
/// Tail of the sqes buffer. This value serves as our local cache of `array_tail` and, in
/// combination with `sqes_head` allows us to more efficiently submit SQE by skipping already
/// submitted ones.
/// `sqes_tail` marks the end of the prepared SQE.
sqes_tail: UnsafeCell<u32>,
ring_mask: u32,
num_entries: u32,
flags: &'static AtomicU32,
dropped: &'static AtomicU32,
array: &'static [AtomicU32],
sqes: &'static mut [UnsafeCell<io_uring_sqe>],
sq_ptr: NonNull<()>,
sq_map_size: usize,
sqes_map_size: usize,
/// Queue of tasks waiting for a submission, either because they need free slots or because
waiters: SegQueue<Waker>,
submitter: Cell<Option<Waker>>,
}
static_assertions::assert_not_impl_any!(SQ: Send, Sync);
impl Drop for SQ {
fn drop(&mut self) {
unsafe {
munmap(self.sq_ptr.as_ptr().cast(), self.sq_map_size);
let sqes_ptr: *mut libc::c_void = self.sqes.as_mut_ptr().cast();
munmap(sqes_ptr, self.sqes_map_size);
}
}
}
impl Debug for SQ {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
unsafe {
// TODO: Complete
f.debug_struct("SQ")
.field("head", self.array_head)
.field("tail", self.array_tail)
.field("ring_mask", &self.ring_mask)
.field("num_entries", &self.num_entries)
.field("flags", self.flags)
.field("dropped", self.dropped)
.field("array", &self.array)
.finish()
}
}
}
impl SQ {
pub unsafe fn new(ptr: *mut libc::c_void,
offs: SQOffsets,
sqes: &'static mut [UnsafeCell<io_uring_sqe>],
sq_map_size: usize,
sqes_map_size: usize,
) -> Self {
// Sanity check the pointer and offsets. If these fail we were probably passed an
// offsets from an uninitialized parameter struct.
assert!(!ptr.is_null());
assert_ne!(offs.head, offs.tail);
// Eagerly extract static values. Since they won't ever change again there's no reason to
// not read them now.
let ring_mask = *(ptr.offset(offs.ring_mask as isize).cast());
let num_entries = *(ptr.offset(offs.ring_entries as isize).cast());
// These are valid Rust references; they are valid for the entire lifetime of self,
// properly initialized by the kernel and well aligned.
let array_head: &AtomicU32 = &*(ptr.offset(offs.head as isize).cast());
let sqes_head = UnsafeCell::new(array_head.load(Ordering::Acquire));
let array_tail: &AtomicU32 = &*ptr.offset(offs.tail as isize).cast();
let sqes_tail = UnsafeCell::new(array_tail.load(Ordering::Acquire));
let cached_tail = UnsafeCell::new(array_tail.load(Ordering::Acquire));
let flags = &*ptr.offset(offs.flags as isize).cast();
let dropped = &*ptr.offset(offs.dropped as isize).cast();
let array = std::slice::from_raw_parts(
ptr.offset(offs.array as isize).cast(),
sqes.len() as usize,
);
let sq_ptr = NonNull::new_unchecked(ptr).cast();
Self {
array_head,
sqes_head,
array_tail,
sqes_tail,
cached_tail,
ring_mask,
num_entries,
flags,
dropped,
array,
sqes,
sq_ptr,
sq_map_size,
sqes_map_size,
waiters: SegQueue::new(),
submitter: Cell::new(None),
}
}
#[inline(always)]
fn sqes_head(&self) -> &mut u32 {
unsafe { &mut *self.sqes_head.get() }
}
#[inline(always)]
fn sqes_tail(&self) -> &mut u32 {
unsafe { &mut *self.sqes_tail.get() }
}
#[inline(always)]
fn cached_tail(&self) -> &mut u32 {
unsafe { &mut *self.cached_tail.get() }
}
#[inline(always)]
fn increment_tail(&self, count: u32) -> u32 {
let tail = self.sqes_tail();
let old = *tail;
*tail = (*tail).wrapping_add(count);
old
}
#[inline(always)]
fn increment_head(&self, count: u32) -> u32{
let head = self.sqes_head();
let old = *head;
*head = (*head).wrapping_add(count);
old
}
#[inline(always)]
fn used(&self) -> u32 {
(*self.sqes_tail()).wrapping_sub(*self.sqes_head())
}
#[inline(always)]
fn available(&self) -> u32 {
self.num_entries - self.used()
}
#[inline(always)]
fn to_submit(&self) -> u32 {
let shared_tail = self.array_tail.load(Ordering::Relaxed);
let cached_tail = *self.cached_tail();
cached_tail.wrapping_sub(shared_tail)
}
pub fn submit_wait(&self, fd: RawFd) -> io::Result<u32> {
// Ensure that the writes into the array are not moved after the write of the tail.
// Otherwise kernelside may read completely wrong indices from array.
compiler_fence(Ordering::Release);
self.array_tail.store(*self.cached_tail(), Ordering::Release);
let retval = syscall::enter(
fd,
self.num_entries,
1,
IORING_ENTER::GETEVENTS,
std::ptr::null(),
0,
)? as u32;
// Return SQE into circulation that we successfully submitted to the kernel.
self.increment_head(retval);
self.notify();
Ok(retval)
}
/// Submit all prepared entries to the kernel. This function will return the number of
/// entries successfully submitted to the kernel.
pub fn submit(&self, fd: RawFd, waker: Option<&Waker>) -> io::Result<u32> {
if let Some(waker) = waker {
let new = if let Some(old) = self.submitter.take() {
if old.will_wake(waker) { old } else { waker.clone() }
} else {
waker.clone()
};
self.submitter.set(Some(new));
}
// Ensure that the writes into the array are not moved after the write of the tail.
// Otherwise kernelside may read completely wrong indices from array.
compiler_fence(Ordering::Release);
self.array_tail.store(*self.cached_tail(), Ordering::Release);
let retval = syscall::enter(
fd,
self.num_entries,
0,
IORING_ENTER::GETEVENTS,
std::ptr::null(),
0,
)? as u32;
// Return SQE into circulation that we successfully submitted to the kernel.
self.increment_head(retval);
self.notify();
Ok(retval)
}
/// Prepare actions for submission by shuffling them into the correct order.
///
/// Kernelside `array` is used to index into the sqes, more specifically the code behaves
/// like this:
/// ```C
/// u32 mask = ctx->sq_entries - 1;
/// u32 sq_idx = ctx->cached_sq_head++ & mask;
/// u32 head = READ_ONCE(ctx->sq_array[sq_idx]);
/// if (likely(head < ctx->sq_entries))
/// return &ctx->sq_sqes[head];
/// ```
/// Where `ctx->sq_entries` is the number of slots in the ring (i.e. simply a boundary check).
///
/// So we need to make sure that for every new entry since we last submitted we have the
/// correct index set. In our case shuffle will map the next `count` entries in `self.array` to
/// point to `count` entries in `self.sqes` starting at `start`. This allows actions to be
/// submitted to the kernel even when there are still reserved SQE in between that weren't yet
/// filled.
pub fn prepare(&self, start: u32, count: u32) {
// Load the tail of the array (i.e. where we will start filling)
let tail = self.cached_tail();
let mut head = start;
for _ in 0..count {
let index = (*tail & self.ring_mask) as usize;
// We can allow this store to be an Relaxed operation since updating the shared tail
// is done after a memory barrier.
self.array[index].store(head & self.ring_mask, Ordering::Relaxed);
// Same here. We need to take the overflow into account but don't have to explicitly
// handle it.
head = head.wrapping_add(1);
*tail = (*tail).wrapping_add(1);
}
// FIXME: This should really be done by epoll
if let Some(waker) = self.submitter.take() {
waker.wake_by_ref();
self.submitter.set(Some(waker));
}
}
pub fn poll_prepare<'cx>(
self: Pin<&mut Self>,
ctx: &mut Context<'cx>,
count: u32,
prepare: impl for<'sq> FnOnce(SQEs<'sq>, &mut Context<'cx>) -> Completion
) -> Poll<Completion> {
if let Some(sqes) = self.try_reserve(count) {
let start = sqes.start();
let completion = prepare(sqes, ctx);
self.prepare(start, count);
Poll::Ready(completion)
} else {
self.waiters.push(ctx.waker().clone());
Poll::Pending
}
}
/// Suggest to submit pending events to the kernel. Returns `Ready` when the relevant event
/// was submitted to the kernel, i.e. when kernelside `head` >= the given `head`.
pub fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>, fd: RawFd, head: u32)
-> Poll<()>
{
let shared_tail = self.array_tail.load(Ordering::Relaxed);
let cached_tail = *self.cached_tail();
let to_submit = cached_tail.wrapping_sub(shared_tail);
// TODO: Do some smart cookie thinking here and batch submissions in a sensible way
if to_submit > 4 {
self.submit(fd, None);
}
if *self.sqes_head() < head {
self.waiters.push(ctx.waker().clone());
Poll::Pending
} else {
Poll::Ready(())
}
}
pub fn notify(&self) {
if self.waiters.len() > 0 && self.available() > 0 {
while let Some(waker) = self.waiters.pop() {
waker.wake()
}
}
}
pub fn try_reserve(&self, count: u32) -> Option<SQEs<'_>> {
if self.available() >= count {
let start = self.increment_tail(count);
Some(SQEs::new(self.sqes, start, count))
} else {
None
}
}
}
mod tests {
use std::mem::ManuallyDrop;
use std::sync::atomic::Ordering::Relaxed;
use crate::ctypes::{IORING_OP, IOSQE};
use super::*;
fn gen_sq(num_entries: u32, head: u32, tail: u32) -> ManuallyDrop<SQ> {
assert!((0 < num_entries && num_entries <= 4096), "entries must be between 1 and 4096");
assert_eq!(num_entries.count_ones(), 1, "entries must be a power of two");
let array_head = Box::leak(Box::new(AtomicU32::new(head)));
let array_tail = Box::leak(Box::new(AtomicU32::new(tail)));
let flags = Box::leak(Box::new(AtomicU32::new(0)));
let dropped = Box::leak(Box::new(AtomicU32::new(0)));
let array = Box::leak((0..num_entries)
.map(|n| AtomicU32::new(n))
.collect::<Box<[_]>>());
let sqes = Box::leak((0..num_entries)
.map(|_| UnsafeCell::new(io_uring_sqe::default()))
.collect::<Box<[_]>>());
unsafe {
ManuallyDrop::new(SQ {
array_head,
sqes_head: UnsafeCell::new(head),
array_tail,
sqes_tail: UnsafeCell::new(tail),
cached_tail: UnsafeCell::new(0),
ring_mask: num_entries - 1,
num_entries,
flags,
dropped,
array,
sqes,
sq_ptr: NonNull::dangling(),
sq_map_size: 0,
sqes_map_size: 0,
waiters: SegQueue::new(),
submitter: Cell::new(None),
})
}
}
#[test]
fn test_head_tail() {
let mut sq = gen_sq(64, 30, 30);
assert_eq!(*sq.sqes_head(), 30);
assert_eq!(*sq.sqes_tail(), 30);
assert_eq!(sq.used(), 0);
assert_eq!(sq.available(), 64);
sq.increment_tail(4);
assert_eq!(*sq.sqes_head(), 30);
assert_eq!(*sq.sqes_tail(), 34);
assert_eq!(sq.used(), 4);
assert_eq!(sq.available(), 60);
sq.increment_head(2);
assert_eq!(*sq.sqes_head(), 32);
assert_eq!(*sq.sqes_tail(), 34);
assert_eq!(sq.used(), 2);
assert_eq!(sq.available(), 62);
}
#[test]
fn test_sq_getter_setter() {
let mut sq = gen_sq(64, 30, 30);
assert_eq!(*sq.sqes_head(), 30);
assert_eq!(*sq.sqes_tail(), 30);
assert_eq!(sq.used(), 0);
assert_eq!(sq.available(), 64);
{
let mut sqes = sq.try_reserve(2).unwrap();
assert_eq!(sq.used(), 2);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::READV);
sqe.add_flags(IOSQE::IO_HARDLINK);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::WRITEV);
sqe.set_userdata(823);
}
assert_eq!(sq.used(), 2);
{
let sqes = &mut sq.sqes;
assert_eq!(sqes[30].get_mut().opcode, IORING_OP::READV);
assert_eq!(sqes[30].get_mut().flags, IOSQE::IO_HARDLINK);
assert_eq!(sqes[31].get_mut().opcode, IORING_OP::WRITEV);
assert_eq!(sqes[31].get_mut().user_data, 823);
}
}
#[test]
fn test_sq_full() {
let mut sq = gen_sq(64, 1, 65);
let sqe = sq.try_reserve(1);
assert!(sqe.is_none());
}
#[test]
fn test_out_of_order_submit() {
let mut sq = gen_sq(64, 0, 0);
let start;
{
let mut sqes = sq.try_reserve(4).unwrap();
start = sqes.start();
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::READV);
sqe.add_flags(IOSQE::IO_HARDLINK);
sqe.set_address(1);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::READV);
sqe.add_flags(IOSQE::IO_HARDLINK);
sqe.set_address(2);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::READV);
sqe.add_flags(IOSQE::IO_HARDLINK);
sqe.set_address(3);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::READV);
sqe.set_address(4);
sqe.set_userdata(823);
}
assert_eq!(sq.used(), 4);
let start2;
{
let mut sqes = sq.try_reserve(4).unwrap();
start2 = sqes.start();
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::WRITEV);
sqe.add_flags(IOSQE::IO_LINK);
sqe.set_address(1);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::WRITEV);
sqe.add_flags(IOSQE::IO_LINK);
sqe.set_address(2);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::WRITEV);
sqe.add_flags(IOSQE::IO_LINK);
sqe.set_address(3);
let mut sqe = sqes.next().unwrap();
sqe.set_opcode(IORING_OP::WRITEV);
sqe.set_address(4);
sqe.set_userdata(0xDEADBEEF);
}
assert_eq!(sq.used(), 8);
sq.prepare(start2, 4);
sq.prepare(start, 4);
let sqes: Vec<_> = sq.sqes.iter_mut()
.map(|c| c.get_mut().clone())
.collect();
let mut out: Vec<_> = sq.array.iter().map(|n| {
let i = n.load(Relaxed) as usize;
sqes[i]
}).collect();
for (n, s) in out.iter().take(4).enumerate() {
assert_eq!(s.opcode, IORING_OP::WRITEV);
assert_eq!(s.address, n as u64 + 1);
if n == 3 {
assert_eq!(s.user_data, 0xDEADBEEF);
} else {
assert_eq!(s.flags, IOSQE::IO_LINK);
}
}
for (n, s) in out.iter().skip(4).take(4).enumerate() {
assert_eq!(s.opcode, IORING_OP::READV);
assert_eq!(s.address, n as u64 + 1);
if n == 3 {
assert_eq!(s.user_data, 823);
} else {
assert_eq!(s.flags, IOSQE::IO_HARDLINK);
}
}
let mut i = out.iter().skip(8);
while let Some(sqe) = i.next() {
assert_eq!(*sqe, io_uring_sqe::default());
}
}
}

View File

@ -1,362 +0,0 @@
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::os::unix::prelude::RawFd;
use std::slice::IterMut;
use crate::ctypes::{IORING_OP, IOSQE, io_uring_sqe, SQEOpFlags};
#[derive(Debug)]
pub struct SQE<'iou> {
sqe: &'iou mut io_uring_sqe,
}
impl<'iou> SQE<'iou> {
pub fn new(sqe: &'iou mut io_uring_sqe) -> Self {
Self { sqe }
}
#[inline(always)]
pub fn add_flags(&mut self, flags: IOSQE) {
self.sqe.flags |= flags;
}
#[inline(always)]
pub fn set_opcode(&mut self, opcode: IORING_OP) {
self.sqe.opcode = opcode;
}
#[inline(always)]
pub fn set_userdata(&mut self, user_data: u64) {
self.sqe.user_data = user_data;
}
#[inline(always)]
pub fn set_address(&mut self, address: u64) {
self.sqe.address = address;
}
#[inline(always)]
pub fn set_len(&mut self, len: i32) {
self.sqe.len = len;
}
#[inline(always)]
pub fn set_fd(&mut self, fd: RawFd) {
self.sqe.fd = fd;
}
#[inline(always)]
pub fn set_offset(&mut self, offset: u64) {
self.sqe.offset = offset;
}
#[inline(always)]
pub fn set_op_flags(&mut self, op_flags: SQEOpFlags) {
self.sqe.op_flags = op_flags;
}
pub fn prepare_cancel(&mut self, user_data: u64) {
self.set_opcode(IORING_OP::ASYNC_CANCEL);
self.set_address(user_data);
}
}
pub struct SQEs<'iou> {
slice: &'iou [UnsafeCell<io_uring_sqe>],
mask: u32,
start: u32,
count: u32,
capacity: u32,
}
impl<'iou> SQEs<'iou> {
pub(crate) fn new(slice: &'iou [UnsafeCell<io_uring_sqe>], start: u32, capacity: u32)
-> Self
{
let mask = (slice.len() - 1) as u32;
Self { slice, mask, count: 0, start, capacity }
}
pub fn last(&mut self) -> Option<SQE<'iou>> {
let mut last = None;
while let Some(sqe) = self.consume() { last = Some(sqe) }
last
}
/// An iterator of [`HardLinkedSQE`]s. These will be [`SQE`]s that are hard linked together.
///
/// Hard linked SQEs will occur sequentially. All of them will be completed, even if one of the
/// events resolves to an error.
pub fn hard_linked(&mut self) -> HardLinked<'iou, '_> {
HardLinked { sqes: self }
}
/// An iterator of [`SoftLinkedSQE`]s. These will be [`SQE`]s that are soft linked together.
///
/// Soft linked SQEs will occur sequentially. If one the events errors, all events after it
/// will be cancelled.
pub fn soft_linked(&mut self) -> SoftLinked<'iou, '_> {
SoftLinked { sqes: self }
}
/// Remaining [`SQE`]s that can be modified.
pub fn remaining(&self) -> u32 {
self.capacity - self.count
}
pub fn start(&self) -> u32 {
self.start
}
pub fn capacity(&self) -> u32 {
self.capacity
}
pub fn used(&self) -> u32 {
self.count
}
fn consume(&mut self) -> Option<SQE<'iou>> {
if self.count >= self.capacity {
None
} else {
let index = (self.start + self.count) & self.mask;
self.count += 1;
let sqe: &mut io_uring_sqe = unsafe {
&mut *self.slice.get_unchecked(index as usize).get()
};
// Ensure that all SQE passing through here are wiped into NOPs first.
*sqe = io_uring_sqe::default();
sqe.opcode = IORING_OP::NOP;
Some(SQE { sqe })
}
}
/// Exhaust this iterator, thus ensuring all entries are set to NOP
fn exhaust(&mut self) {
while let Some(_) = self.consume() {}
}
}
impl<'iou> Iterator for SQEs<'iou> {
type Item = SQE<'iou>;
fn next(&mut self) -> Option<SQE<'iou>> {
self.consume()
}
}
impl<'iou> Drop for SQEs<'iou> {
fn drop(&mut self) {
if self.count != 0 {
// This iterator is responsible for all of its SQE and must NOP every not used one.
self.exhaust()
}
}
}
/// An Iterator of [`SQE`]s which will be hard linked together.
pub struct HardLinked<'iou, 'a> {
sqes: &'a mut SQEs<'iou>,
}
impl<'iou> HardLinked<'iou, '_> {
pub fn terminate(self) -> Option<SQE<'iou>> {
self.sqes.consume()
}
}
impl<'iou> Iterator for HardLinked<'iou, '_> {
type Item = HardLinkedSQE<'iou>;
fn next(&mut self) -> Option<Self::Item> {
let is_final = self.sqes.remaining() == 1;
self.sqes.consume().map(|sqe| HardLinkedSQE { sqe, is_final })
}
}
pub struct HardLinkedSQE<'iou> {
sqe: SQE<'iou>,
is_final: bool,
}
impl<'iou> Deref for HardLinkedSQE<'iou> {
type Target = SQE<'iou>;
fn deref(&self) -> &SQE<'iou> {
&self.sqe
}
}
impl<'iou> DerefMut for HardLinkedSQE<'iou> {
fn deref_mut(&mut self) -> &mut SQE<'iou> {
&mut self.sqe
}
}
impl<'iou> Drop for HardLinkedSQE<'iou> {
fn drop(&mut self) {
if !self.is_final {
self.sqe.add_flags(IOSQE::IO_HARDLINK);
}
}
}
/// An Iterator of [`SQE`]s which will be soft linked together.
pub struct SoftLinked<'iou, 'a> {
sqes: &'a mut SQEs<'iou>,
}
impl<'iou> SoftLinked<'iou, '_> {
pub fn terminate(self) -> Option<SQE<'iou>> {
self.sqes.consume()
}
}
impl<'iou> Iterator for SoftLinked<'iou, '_> {
type Item = SoftLinkedSQE<'iou>;
fn next(&mut self) -> Option<Self::Item> {
let is_final = self.sqes.remaining() == 1;
self.sqes.consume().map(|sqe| SoftLinkedSQE { sqe, is_final })
}
}
pub struct SoftLinkedSQE<'iou> {
sqe: SQE<'iou>,
is_final: bool,
}
impl<'iou> Deref for SoftLinkedSQE<'iou> {
type Target = SQE<'iou>;
fn deref(&self) -> &SQE<'iou> {
&self.sqe
}
}
impl<'iou> DerefMut for SoftLinkedSQE<'iou> {
fn deref_mut(&mut self) -> &mut SQE<'iou> {
&mut self.sqe
}
}
impl<'iou> Drop for SoftLinkedSQE<'iou> {
fn drop(&mut self) {
if !self.is_final {
self.sqe.add_flags(IOSQE::IO_LINK);
}
}
}
mod tests {
use super::*;
fn gen_buf(num_entries: usize) -> &'static mut [UnsafeCell<io_uring_sqe>]{
Box::leak((0..num_entries)
.map(|_| UnsafeCell::new(io_uring_sqe::default()))
.collect::<Box<[_]>>())
}
#[test]
fn test_wrapping_sqes() {
let mut sqe_buf = gen_buf(64);
{
let mut sqes = SQEs::new(&mut sqe_buf[..], 62, 5);
assert_eq!(sqes.next().map(|i| i.sqe.user_data = 1), Some(()));
assert_eq!(sqes.next().map(|i| i.sqe.user_data = 2), Some(()));
assert_eq!(sqes.next().map(|i| i.sqe.user_data = 3), Some(()));
assert_eq!(sqes.next().map(|i| i.sqe.user_data = 4), Some(()));
assert_eq!(sqes.next().map(|i| i.sqe.user_data = 5), Some(()));
assert_eq!(sqes.next().map(|i| i.sqe.user_data = 6), None);
}
assert_eq!(sqe_buf[61].get_mut().user_data, 0);
assert_eq!(sqe_buf[62].get_mut().user_data, 1);
assert_eq!(sqe_buf[63].get_mut().user_data, 2);
assert_eq!(sqe_buf[0].get_mut().user_data, 3);
assert_eq!(sqe_buf[1].get_mut().user_data, 4);
assert_eq!(sqe_buf[2].get_mut().user_data, 5);
assert_eq!(sqe_buf[3].get_mut().user_data, 0);
}
#[test]
fn test_hard_linked_sqes() {
let mut sqe_buf = gen_buf(64);
{
let mut sqes = SQEs::new(&mut sqe_buf, 62, 5);
let mut linked = sqes.hard_linked();
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::READ), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::TEE), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::ACCEPT), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CLOSE), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CONNECT), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::FADVISE), None);
}
assert_eq!(sqe_buf[61].get_mut().opcode, IORING_OP::NOP);
assert_eq!(sqe_buf[61].get_mut().flags, IOSQE::empty());
assert_eq!(sqe_buf[62].get_mut().opcode, IORING_OP::READ);
assert_eq!(sqe_buf[62].get_mut().flags, IOSQE::IO_HARDLINK);
assert_eq!(sqe_buf[63].get_mut().opcode, IORING_OP::TEE);
assert_eq!(sqe_buf[63].get_mut().flags, IOSQE::IO_HARDLINK);
assert_eq!(sqe_buf[0].get_mut().opcode, IORING_OP::ACCEPT);
assert_eq!(sqe_buf[0].get_mut().flags, IOSQE::IO_HARDLINK);
assert_eq!(sqe_buf[1].get_mut().opcode, IORING_OP::CLOSE);
assert_eq!(sqe_buf[1].get_mut().flags, IOSQE::IO_HARDLINK);
assert_eq!(sqe_buf[2].get_mut().opcode, IORING_OP::CONNECT);
assert_eq!(sqe_buf[2].get_mut().flags, IOSQE::empty());
assert_eq!(sqe_buf[3].get_mut().opcode, IORING_OP::NOP);
assert_eq!(sqe_buf[3].get_mut().flags, IOSQE::empty());
}
#[test]
fn test_soft_linked_sqes() {
let mut sqe_buf = gen_buf(64);
{
let mut sqes = SQEs::new(&mut sqe_buf, 62, 5);
let mut linked = sqes.soft_linked();
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::READ), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::TEE), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::ACCEPT), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CLOSE), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CONNECT), Some(()));
assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::FADVISE), None);
}
assert_eq!(sqe_buf[61].get_mut().opcode, IORING_OP::NOP);
assert_eq!(sqe_buf[61].get_mut().flags, IOSQE::empty());
assert_eq!(sqe_buf[62].get_mut().opcode, IORING_OP::READ);
assert_eq!(sqe_buf[62].get_mut().flags, IOSQE::IO_LINK);
assert_eq!(sqe_buf[63].get_mut().opcode, IORING_OP::TEE);
assert_eq!(sqe_buf[63].get_mut().flags, IOSQE::IO_LINK);
assert_eq!(sqe_buf[0].get_mut().opcode, IORING_OP::ACCEPT);
assert_eq!(sqe_buf[0].get_mut().flags, IOSQE::IO_LINK);
assert_eq!(sqe_buf[1].get_mut().opcode, IORING_OP::CLOSE);
assert_eq!(sqe_buf[1].get_mut().flags, IOSQE::IO_LINK);
assert_eq!(sqe_buf[2].get_mut().opcode, IORING_OP::CONNECT);
assert_eq!(sqe_buf[2].get_mut().flags, IOSQE::empty());
assert_eq!(sqe_buf[3].get_mut().opcode, IORING_OP::NOP);
assert_eq!(sqe_buf[3].get_mut().flags, IOSQE::empty());
}
}

View File

@ -1,136 +0,0 @@
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::cancellation::Cancellation;
use crate::completion::Completion;
use crate::io_uring::IoUring;
use crate::sq::SQ;
use crate::sqe::{SQE, SQEs};
pub struct Submission {
iouring: &'static IoUring,
state: State,
}
enum State {
Inert,
Prepared(u32, Completion),
Submitted(Completion),
Cancelled(u64),
Lost,
}
impl Submission {
pub fn new(iouring: &'static IoUring) -> Self {
Self { iouring, state: State::Inert }
}
fn split_pinned(self: Pin<&mut Self>) -> (Pin<&mut IoUring>, &mut State) {
unsafe {
let this = Pin::get_unchecked_mut(self);
let iouring = &mut *(this.iouring as *const _ as *mut _);
(Pin::new_unchecked(iouring), &mut this.state)
}
}
pub fn poll(
mut self: Pin<&mut Self>,
ctx: &mut Context<'_>,
count: u32,
prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>
) -> Poll<io::Result<i32>> {
match self.state {
State::Inert | State::Cancelled(_) => {
let head = crate::ready!(self.as_mut().poll_prepare(ctx, count, prepare));
crate::ready!(self.as_mut().poll_submit(ctx, head));
self.poll_complete(ctx)
},
State::Prepared(head, _) => {
crate::ready!(self.as_mut().poll_submit(ctx, head));
self.poll_complete(ctx)
},
State::Submitted(_) => self.poll_complete(ctx),
State::Lost => {
panic!("Ring in invalid state")
},
}
}
pub fn poll_prepare(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
count: u32,
prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>
) -> Poll<u32> {
let (sq, state) = self.split_pinned();
let mut head = 0u32;
let completion = match *state {
State::Inert => {
crate::ready!(sq.poll_prepare(ctx, count, |mut sqes, ctx| {
*state = State::Lost;
let mut sqe = prepare(&mut sqes);
let completion = Completion::new(ctx.waker().clone());
sqe.set_userdata(completion.addr());
head = sqes.used();
completion
}))
},
State::Cancelled(prev) => {
crate::ready!(sq.poll_prepare(ctx, count + 1, |mut sqes, ctx| {
*state = State::Lost;
sqes.soft_linked().next().unwrap().prepare_cancel(prev);
let mut sqe = prepare(&mut sqes);
let completion = Completion::new(ctx.waker().clone());
sqe.set_userdata(completion.addr());
head = sqes.used();
completion
}))
},
_ => unreachable!(),
};
*state = State::Prepared(head, completion);
Poll::Ready(head)
}
pub fn poll_submit(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
head: u32,
) -> Poll<()> {
let (iouring, state) = self.split_pinned();
match iouring.poll_submit(ctx, head) {
Poll::Ready(()) => {
match std::mem::replace(state, State::Lost) {
State::Prepared(_, completion) => {
*state = State::Submitted(completion);
},
_ => unreachable!(),
}
Poll::Ready(())
},
Poll::Pending => Poll::Pending,
}
}
pub fn poll_complete(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
) -> Poll<io::Result<i32>> {
let (_, state) = self.split_pinned();
if let State::Submitted(completion) = std::mem::replace(state, State::Inert) {
match completion.check(ctx.waker()) {
Ok(result) => return Poll::Ready(result),
Err(completion) => {
*state = State::Submitted(completion)
}
}
}
Poll::Pending
}
}

View File

@ -1,61 +0,0 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use iou::{SQE, SQEs};
use super::{Event, Submission};
pub struct Completion<'cx> {
inner: super::Completion,
marker: PhantomData<fn(&'cx ()) -> &'cx ()>,
}
impl<'cx> Completion<'cx> {
pub(crate) fn new(mut sqe: SQE<'_>, _sqes: SQEs<'_>, cx: &mut Context<'cx>) -> Self {
let inner = super::Completion::new(cx.waker().clone());
// Make the userdata for the (final) SQE a pointer to the waker for the task blocking on
// this IO.
unsafe { sqe.set_user_data(inner.addr()) };
Self { inner, marker: PhantomData }
}
#[inline(always)]
pub(crate) fn into_inner(self) -> super::Completion {
self.inner
}
}
pub trait Driver: Clone {
/// Poll to prepare a number of submissions for the submission queue.
///
/// If the driver has space for `count` SQE available it calls `prepare` to have said `SQE`
/// inserted. A driver can assume that prepare will use exactly `count` slots. Using this
/// drivers can implement backpressure by returning `Poll::Pending` if less than `count`
/// slots are available and waking the respective task up if enough slots have become available.
fn poll_prepare<'cx>(
self: Pin<&mut Self>,
ctx: &mut Context<'cx>,
count: u32,
prepare: impl FnOnce(SQEs<'_>, &mut Context<'cx>) -> Completion<'cx>,
) -> Poll<Completion<'cx>>;
/// Suggestion for the driver to submit their queue to the kernel.
///
/// This will be called by tasks after they have finished preparing submissions. Drivers must
/// eventually submit these to the kernel but aren't required to do so right away.
fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<()>;
/// Completion hint
///
/// This should return `Poll::Ready` if an completion with the given user_data may have been
/// received since the last call to this function. It is safe to always return `Poll::Ready`,
/// even if no actions were completed.
fn poll_complete(self: Pin<&mut Self>, ctx: &mut Context<'_>, user_data: u64) -> Poll<()>;
fn submit<E: Event>(self, event: E) -> Submission<Self, E>
where Self: Sized
{
Submission::new(self, event)
}
}

View File

@ -1,27 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::sqe::{SockFlag, SockAddrStorage};
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
pub struct Accept<FD = RawFd> {
pub addr: Option<Box<SockAddrStorage>>,
pub fd: FD,
pub flags: SockFlag,
}
impl<FD: UringFd + Copy> Event for Accept<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_accept(self.fd, self.addr.as_deref_mut(), self.flags);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).addr)
}
}

View File

@ -1,19 +0,0 @@
use std::os::unix::io::RawFd;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs};
pub struct Close<FD = RawFd> {
pub fd: FD,
}
impl<FD: UringFd + Copy> Event for Close<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_close(self.fd);
sqe
}
}

View File

@ -1,26 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::sqe::SockAddr;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
pub struct Connect<FD = RawFd> {
pub fd: FD,
pub addr: Box<SockAddr>,
}
impl<FD: UringFd + Copy> Event for Connect<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_connect(self.fd, &mut *self.addr);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).addr)
}
}

View File

@ -1,27 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::sqe::{EpollOp, EpollEvent};
use super::{Event, SQE, SQEs, Cancellation};
pub struct EpollCtl {
pub epoll_fd: RawFd,
pub op: EpollOp,
pub fd: RawFd,
pub event: Option<Box<EpollEvent>>,
}
impl Event for EpollCtl {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_epoll_ctl(self.epoll_fd, self.op, self.fd, self.event.as_deref_mut());
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).event)
}
}

View File

@ -1,23 +0,0 @@
use std::os::unix::io::RawFd;
use iou::sqe::PosixFadviseAdvice;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs};
pub struct Fadvise<FD = RawFd> {
pub fd: FD,
pub offset: u64,
pub size: u64,
pub flags: PosixFadviseAdvice,
}
impl<FD: UringFd + Copy> Event for Fadvise<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_fadvise(self.fd, self.offset, self.size, self.flags);
sqe
}
}

View File

@ -1,23 +0,0 @@
use std::os::unix::io::RawFd;
use iou::registrar::UringFd;
use iou::sqe::FallocateFlags;
use super::{Event, SQE, SQEs};
pub struct Fallocate<FD = RawFd> {
pub fd: FD,
pub offset: u64,
pub size: u64,
pub flags: FallocateFlags,
}
impl<FD: UringFd + Copy> Event for Fallocate<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_fallocate(self.fd, self.offset, self.size, self.flags);
sqe
}
}

View File

@ -1,23 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use super::{Event, SQE, SQEs, Cancellation};
pub struct FilesUpdate {
pub files: Box<[RawFd]>,
pub offset: u32,
}
impl Event for FilesUpdate {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_files_update(&self.files[..], self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).files)
}
}

View File

@ -1,21 +0,0 @@
use std::os::unix::io::RawFd;
use iou::registrar::UringFd;
use iou::sqe::FsyncFlags;
use super::{Event, SQE, SQEs};
pub struct Fsync<FD = RawFd> {
pub fd: FD,
pub flags: FsyncFlags,
}
impl<FD: UringFd + Copy> Event for Fsync<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_fsync(self.fd, self.flags);
sqe
}
}

View File

@ -1,56 +0,0 @@
mod accept;
mod close;
mod connect;
mod epoll_ctl;
mod fadvise;
mod fallocate;
mod files_update;
mod fsync;
mod openat;
mod provide_buffers;
mod read;
mod readv;
mod recv;
mod send;
mod splice;
mod statx;
mod timeout;
mod write;
mod writev;
use std::mem::ManuallyDrop;
use iou::{SQE, SQEs};
use super::Cancellation;
pub use accept::Accept;
pub use close::Close;
pub use connect::Connect;
pub use epoll_ctl::EpollCtl;
pub use fadvise::Fadvise;
pub use fallocate::Fallocate;
pub use files_update::FilesUpdate;
pub use fsync::Fsync;
pub use openat::OpenAt;
pub use provide_buffers::ProvideBuffers;
pub use read::Read;
pub use readv::ReadVectored;
pub use recv::Recv;
pub use send::Send;
pub use splice::Splice;
pub use statx::Statx;
pub use timeout::Timeout;
pub use write::Write;
pub use writev::WriteVectored;
pub trait Event {
fn sqes_needed() -> u32;
unsafe fn prepare<'a>(&mut self, sqs: &mut SQEs<'a>) -> SQE<'a>;
fn cancel(_: ManuallyDrop<Self>) -> Cancellation
where Self: Sized
{
Cancellation::from(())
}
}

View File

@ -1,39 +0,0 @@
use std::ffi::CString;
use std::mem::ManuallyDrop;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::prelude::RawFd;
use std::path::Path;
use iou::{SQE, SQEs};
use iou::sqe::{Mode, OFlag};
use crate::sys::linux::io_uring::cancellation::Cancellation;
use super::Event;
pub struct OpenAt {
pub path: CString,
pub dir_fd: RawFd,
pub flags: OFlag,
pub mode: Mode,
}
impl OpenAt {
pub fn without_dir(path: impl AsRef<Path>, flags: OFlag, mode: Mode) -> Self {
let path = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
Self { path, dir_fd: libc::AT_FDCWD, flags, mode }
}
}
impl Event for OpenAt {
fn sqes_needed() -> u32 {
1
}
unsafe fn prepare<'a>(&mut self, sqs: &mut SQEs<'a>) -> SQE<'a> {
let mut sqe = sqs.single().unwrap();
sqe.prep_openat(self.dir_fd, &*self.path, self.flags, self.mode);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation where Self: Sized {
ManuallyDrop::into_inner(this).path.into()
}
}

View File

@ -1,40 +0,0 @@
use std::mem::ManuallyDrop;
use iou::sqe::BufferGroupId;
use super::{Event, SQE, SQEs, Cancellation};
pub struct ProvideBuffers {
pub bufs: Box<[u8]>,
pub count: u32,
pub group: BufferGroupId,
pub index: u32,
}
impl Event for ProvideBuffers {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_provide_buffers(&mut self.bufs[..], self.count, self.group, self.index);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).bufs)
}
}
pub struct RemoveBuffers {
pub count: u32,
pub group: BufferGroupId,
}
impl Event for RemoveBuffers {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_remove_buffers(self.count, self.group);
sqe
}
}

View File

@ -1,47 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::registrar::{UringFd, RegisteredBuf};
use super::{Event, SQE, SQEs, Cancellation};
/// A basic read event.
pub struct Read<FD = RawFd> {
pub fd: FD,
pub buf: Box<[u8]>,
pub offset: u64,
}
impl<FD: UringFd + Copy> Event for Read<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_read(self.fd, &mut self.buf[..], self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).buf)
}
}
pub struct ReadFixed<FD = RawFd> {
pub fd: FD,
pub buf: RegisteredBuf,
pub offset: u64,
}
impl<FD: UringFd + Copy> Event for ReadFixed<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_read(self.fd, self.buf.as_mut(), self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).buf)
}
}

View File

@ -1,48 +0,0 @@
use std::io::IoSliceMut;
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
/// A `readv` event.
pub struct ReadVectored<FD = RawFd> {
pub fd: FD,
pub bufs: Box<[Box<[u8]>]>,
pub offset: u64,
}
impl<FD> ReadVectored<FD> {
fn as_iovecs(buffers: &mut [Box<[u8]>]) -> &mut [IoSliceMut] {
// Unsafe contract:
// This pointer cast is defined behaviour because Box<[u8]> (wide pointer)
// is currently ABI compatible with libc::iovec.
//
// Then, libc::iovec is guaranteed ABI compatible with IoSliceMut on Unix:
// https://doc.rust-lang.org/beta/std/io/struct.IoSliceMut.html
//
// We are relying on the internals of Box<[u8]>, but this is such a
// foundational part of Rust it's unlikely the data layout would change
// without warning.
//
// Pointer cast expression adapted from the "Turning a &mut T into an &mut U"
// example of: https://doc.rust-lang.org/std/mem/fn.transmute.html#alternatives
unsafe { &mut *(buffers as *mut [Box<[u8]>] as *mut [IoSliceMut]) }
}
}
impl<FD: UringFd + Copy> Event for ReadVectored<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_read_vectored(self.fd, Self::as_iovecs(&mut self.bufs[..]), self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).bufs)
}
}

View File

@ -1,27 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::sqe::MsgFlags;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
pub struct Recv<FD = RawFd> {
pub fd: FD,
pub buf: Box<[u8]>,
pub flags: MsgFlags,
}
impl<FD: UringFd + Copy> Event for Recv<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_recv(self.fd, &mut self.buf[..], self.flags);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).buf)
}
}

View File

@ -1,27 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::sqe::MsgFlags;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
pub struct Send<FD = RawFd> {
pub fd: FD,
pub buf: Box<[u8]>,
pub flags: MsgFlags,
}
impl<FD: UringFd + Copy> Event for Send<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_send(self.fd, &self.buf[..], self.flags);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).buf)
}
}

View File

@ -1,24 +0,0 @@
use std::os::unix::io::RawFd;
use iou::sqe::SpliceFlags;
use super::{Event, SQE, SQEs};
pub struct Splice {
pub fd_in: RawFd,
pub off_in: i64,
pub fd_out: RawFd,
pub off_out: i64,
pub bytes: u32,
pub flags: SpliceFlags,
}
impl Event for Splice {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_splice(self.fd_in, self.off_in, self.fd_out, self.off_out, self.bytes, self.flags);
sqe
}
}

View File

@ -1,53 +0,0 @@
use std::ffi::CString;
use std::mem::{self, ManuallyDrop};
use std::os::unix::io::RawFd;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use iou::sqe::{StatxFlags, StatxMode};
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
pub struct Statx<FD = RawFd> {
pub dir_fd: FD,
pub path: CString,
pub flags: StatxFlags,
pub mask: StatxMode,
pub statx: Box<libc::statx>,
}
impl Statx {
pub fn without_dir(path: impl AsRef<Path>, flags: StatxFlags, mask: StatxMode) -> Statx {
let path = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap();
let statx = unsafe { Box::new(mem::zeroed()) };
Statx { path, dir_fd: libc::AT_FDCWD, flags, mask, statx }
}
}
impl<FD: UringFd> Statx<FD> {
pub fn without_path(fd: FD, mut flags: StatxFlags, mask: StatxMode) -> Statx<FD> {
unsafe {
// TODO don't allocate? Use Cow? Use NULL?
let path = CString::new("").unwrap();
let statx = Box::new(mem::zeroed());
flags.insert(StatxFlags::AT_EMPTY_PATH);
Statx { dir_fd: fd, path, flags, mask, statx }
}
}
}
impl<FD: UringFd + Copy> Event for Statx<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_statx(self.dir_fd, self.path.as_c_str(), self.flags, self.mask, &mut *self.statx);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
let this = ManuallyDrop::into_inner(this);
Cancellation::from((this.statx, this.path))
}
}

View File

@ -1,67 +0,0 @@
use std::mem::ManuallyDrop;
use std::time::Duration;
use super::{Event, SQE, SQEs, Cancellation};
use iou::sqe::TimeoutFlags;
pub struct StaticTimeout {
ts: uring_sys::__kernel_timespec,
events: u32,
flags: TimeoutFlags,
}
impl StaticTimeout {
pub const fn new(duration: Duration, events: u32, flags: TimeoutFlags) -> StaticTimeout {
StaticTimeout {
ts: timespec(duration),
events, flags,
}
}
}
impl Event for &'static StaticTimeout {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_timeout(&self.ts, self.events, self.flags);
sqe
}
}
pub struct Timeout {
ts: Box<uring_sys::__kernel_timespec>,
events: u32,
flags: TimeoutFlags,
}
impl Timeout {
pub fn new(duration: Duration, events: u32, flags: TimeoutFlags) -> Timeout {
Timeout {
ts: Box::new(timespec(duration)),
events, flags,
}
}
}
impl Event for Timeout {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_timeout(&*self.ts, self.events, self.flags);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).ts)
}
}
const fn timespec(duration: Duration) -> uring_sys::__kernel_timespec {
uring_sys::__kernel_timespec {
tv_sec: duration.as_secs() as i64,
tv_nsec: duration.subsec_nanos() as _,
}
}

View File

@ -1,47 +0,0 @@
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::registrar::{UringFd, RegisteredBuf};
use super::{Event, SQE, SQEs, Cancellation};
/// A basic write event.
pub struct Write<FD = RawFd> {
pub fd: FD,
pub buf: Box<[u8]>,
pub offset: u64,
}
impl<FD: UringFd + Copy> Event for Write<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_write(self.fd, &self.buf[..], self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).buf)
}
}
pub struct WriteFixed<FD = RawFd> {
pub fd: FD,
pub buf: RegisteredBuf,
pub offset: u64,
}
impl<FD: UringFd + Copy> Event for WriteFixed<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_write(self.fd, self.buf.as_ref(), self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).buf)
}
}

View File

@ -1,34 +0,0 @@
use std::io::IoSlice;
use std::mem::ManuallyDrop;
use std::os::unix::io::RawFd;
use iou::registrar::UringFd;
use super::{Event, SQE, SQEs, Cancellation};
/// A `writev` event.
pub struct WriteVectored<FD = RawFd> {
pub fd: FD,
pub bufs: Box<[Box<[u8]>]>,
pub offset: u64,
}
impl<FD> WriteVectored<FD> {
fn iovecs(&self) -> &[IoSlice] {
unsafe { & *(&self.bufs[..] as *const [Box<[u8]>] as *const [IoSlice]) }
}
}
impl<FD: UringFd + Copy> Event for WriteVectored<FD> {
fn sqes_needed() -> u32 { 1 }
unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> {
let mut sqe = sqs.single().unwrap();
sqe.prep_write_vectored(self.fd, self.iovecs(), self.offset);
sqe
}
fn cancel(this: ManuallyDrop<Self>) -> Cancellation {
Cancellation::from(ManuallyDrop::into_inner(this).bufs)
}
}

View File

@ -1,187 +0,0 @@
// Imported here for modules
use std::future::Future;
use std::{fs, io};
use std::mem::ManuallyDrop;
use std::os::unix::prelude::{FromRawFd, RawFd};
use std::path::Path;
use std::pin::Pin;
use std::task::{Context, Poll};
use super::{Driver, Ring, Submission, events::*};
use futures_core::ready;
use futures_io::{AsyncRead, AsyncWrite, AsyncSeek, AsyncBufRead};
use iou::sqe::{Mode, OFlag};
pub struct File<D: Driver> {
ring: Ring<D>,
fd: RawFd,
active: Op,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Op {
Read,
Write,
Close,
Nothing,
Statx,
Closed,
}
impl<D: Driver> File<D> {
fn from_fd(fd: RawFd, driver: D) -> File<D> {
File {
ring: Ring::new(driver),
fd,
active: Op::Nothing,
}
}
pub fn open<P: AsRef<Path>>(driver: D, path: P) -> impl Future<Output = io::Result<Self>> {
let flags = OFlag::O_CLOEXEC | OFlag::O_RDONLY;
open::Open(driver.submit(OpenAt::without_dir(
path, flags, Mode::from_bits(0o666).unwrap()
)))
}
pub fn create<P: AsRef<Path>>(driver: D, path: P) -> impl Future<Output = io::Result<Self>> {
let flags = OFlag::O_CLOEXEC | OFlag::O_WRONLY | OFlag::O_CREAT | OFlag::O_TRUNC;
create::Create(driver.submit(OpenAt::without_dir(
path, flags, Mode::from_bits(0o666).unwrap()
)))
}
}
mod open;
mod create;
impl<D: Driver> AsyncRead for File<D> {
fn poll_read(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, buf: &mut [u8])
-> Poll<io::Result<usize>>
{
let mut inner = ready!(self.as_mut().poll_fill_buf(ctx))?;
let len = io::Read::read(&mut inner, buf)?;
self.consume(len);
Poll::Ready(Ok(len))
}
}
impl<D: Driver> AsyncBufRead for File<D> {
fn poll_fill_buf(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
let fd = self.fd;
let (ring, buf, pos, ..) = self.split_with_buf();
buf.fill_buf(|buf| {
let n = ready!(ring.poll(ctx, 1, |sqs| {
let mut sqe = sqs.single().unwrap();
unsafe {
sqe.prep_read(fd, buf, *pos);
}
sqe
}))?;
*pos += n as u64;
Poll::Ready(Ok(n as u32))
})
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.buf().consume(amt);
}
}
impl<D: Driver> AsyncWrite for File<D> {
fn poll_write(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, slice: &[u8]) -> Poll<io::Result<usize>> {
let fd = self.fd;
let (ring, buf, pos, ..) = self.split_with_buf();
let data = ready!(buf.fill_buf(|mut buf| {
Poll::Ready(Ok(io::Write::write(&mut buf, slice)? as u32))
}))?;
let n = ready!(ring.poll(ctx, 1, |sqs| {
let mut sqe = sqs.single().unwrap();
unsafe {
sqe.prep_write(fd, data, *pos);
}
sqe
}))?;
*pos += n as u64;
buf.clear();
Poll::Ready(Ok(n as usize))
}
fn poll_flush(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.poll_write(ctx, &[]))?;
Poll::Ready(Ok(()))
}
fn poll_close(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.as_mut().guard_op(Op::Close);
let fd = self.fd;
ready!(self.as_mut().ring().poll(ctx, 1, |sqs| {
let mut sqe = sqs.single().unwrap();
unsafe {
sqe.prep_close(fd);
}
sqe
}))?;
self.confirm_close();
Poll::Ready(Ok(()))
}
}
impl<D: Driver> AsyncSeek for File<D> {
fn poll_seek(mut self: Pin<&mut Self>, ctx: &mut Context, pos: io::SeekFrom)
-> Poll<io::Result<u64>>
{
let (start, offset) = match pos {
io::SeekFrom::Start(n) => {
*self.as_mut().pos() = n;
return Poll::Ready(Ok(self.pos));
}
io::SeekFrom::Current(n) => (self.pos, n),
io::SeekFrom::End(n) => {
(ready!(self.as_mut().poll_file_size(ctx))?, n)
}
};
let valid_seek = if offset.is_negative() {
match start.checked_sub(offset.abs() as u64) {
Some(valid_seek) => valid_seek,
None => {
let invalid = io::Error::from(io::ErrorKind::InvalidInput);
return Poll::Ready(Err(invalid));
}
}
} else {
match start.checked_add(offset as u64) {
Some(valid_seek) => valid_seek,
None => {
let overflow = io::Error::from_raw_os_error(libc::EOVERFLOW);
return Poll::Ready(Err(overflow));
}
}
};
*self.as_mut().pos() = valid_seek;
Poll::Ready(Ok(self.pos))
}
}
impl<D: Driver> From<File<D>> for fs::File {
fn from(mut file: File<D>) -> fs::File {
file.cancel();
let file = ManuallyDrop::new(file);
unsafe {
fs::File::from_raw_fd(file.fd)
}
}
}
impl<D: Driver> Drop for File<D> {
fn drop(&mut self) {
match self.active {
Op::Closed => { }
Op::Nothing => unsafe { libc::close(self.fd); },
_ => self.cancel(),
}
}
}

View File

@ -1,18 +0,0 @@
use std::future::Future;
use futures_core::ready;
use super::*;
pub(super) struct Create<D: Driver>(pub(super) Submission<D, OpenAt>);
impl<D: Driver> Future for Create<D> {
type Output = io::Result<File<D>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut inner = unsafe {
self.map_unchecked_mut(|this| &mut this.0)
};
let (_, ready) = ready!(inner.as_mut().poll(cx));
let fd = ready? as i32;
Poll::Ready(Ok(File::from_fd(fd, inner.driver().clone())))
}
}

View File

@ -1,18 +0,0 @@
use std::future::Future;
use futures_core::ready;
use super::*;
pub(super) struct Open<D: Driver>(pub(super) Submission<D, OpenAt>);
impl<D: Driver> Future for Open<D> {
type Output = io::Result<File<D>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut inner = unsafe {
self.map_unchecked_mut(|this| &mut this.0)
};
let (_, ready) = ready!(inner.as_mut().poll(cx));
let fd = ready? as i32;
Poll::Ready(Ok(File::from_fd(fd, inner.driver().clone())))
}
}

View File

@ -1,20 +0,0 @@
mod completion;
use completion::Completion;
mod cancellation;
use cancellation::Cancellation;
mod ring;
use ring::Ring;
mod events;
use events::Event;
mod submission;
use submission::Submission;
mod driver;
use driver::Driver;
mod fs;

View File

@ -1,176 +0,0 @@
use std::{io, mem};
use std::pin::Pin;
use std::task::{Context, Poll};
use iou::{SQE, SQEs};
use super::{driver, Driver};
use super::Completion;
use futures_core::ready;
use crate::sys::linux::io_uring::cancellation::Cancellation;
///
pub struct Ring<D: Driver> {
state: State,
driver: D,
}
enum State {
Empty,
Prepared(Completion),
Submitted(Completion),
Cancelled(u64),
Lost,
}
impl<D: Driver> Ring<D> {
pub fn new(driver: D) -> Self {
Self {
state: State::Empty,
driver,
}
}
pub fn driver(&self) -> &D {
&self.driver
}
fn split_pinned(self: Pin<&mut Self>) -> (&mut State, Pin<&mut D>) {
unsafe {
let this = Pin::get_unchecked_mut(self);
(&mut this.state, Pin::new_unchecked(&mut this.driver))
}
}
pub fn poll(
mut self: Pin<&mut Self>,
ctx: &mut Context<'_>,
count: u32,
prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>,
) -> Poll<io::Result<u32>> {
match self.state {
State::Empty => {
ready!(self.as_mut().poll_prepare_empty(ctx, count, prepare));
ready!(self.as_mut().poll_submit(ctx));
self.poll_complete(ctx)
},
State::Cancelled(previous) => {
ready!(self.as_mut().poll_prepare_canceled(ctx, previous, count, prepare));
ready!(self.as_mut().poll_submit(ctx));
self.poll_complete(ctx)
},
State::Prepared(_) => match self.as_mut().poll_complete(ctx) {
Poll::Pending => {
ready!(self.as_mut().poll_submit(ctx));
self.poll_complete(ctx)
},
ready @ Poll::Ready(_) => ready,
},
State::Submitted(_) => self.poll_complete(ctx),
State::Lost => panic!("Lost events, ring is now in an invalid state"),
}
}
fn poll_prepare_empty(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
count: u32,
prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>,
) -> Poll<()> {
let (state, driver) = self.split_pinned();
let completion = ready!(driver.poll_prepare(ctx, count, |mut sqes, ctx| {
*state = State::Lost;
let sqe = prepare(&mut sqes);
let completion = driver::Completion::new(sqe, sqes, ctx);
completion
}));
*state = State::Prepared(completion.into_inner());
Poll::Ready(())
}
fn poll_prepare_canceled(
self: Pin<&mut Self>,
ctx: &mut Context<'_>,
previous: u64,
count: u32,
prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>,
) -> Poll<()> {
let (mut state, driver) = self.split_pinned();
let completion = ready!(driver.poll_prepare(ctx, count + 1, |mut sqes, ctx| {
*state = State::Lost;
unsafe { sqes.hard_linked().next().unwrap().prep_cancel(previous, 0); }
let sqe = prepare(&mut sqes);
let completion = driver::Completion::new(sqe, sqes, ctx);
completion
}));
*state = State::Prepared(completion.into_inner());
Poll::Ready(())
}
fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<()> {
let (state, driver) = self.split_pinned();
let _ = ready!(driver.poll_submit(ctx));
if let State::Prepared(completion) | State::Submitted(completion)
= mem::replace(state, State::Lost)
{
*state = State::Submitted(completion);
Poll::Ready(())
} else {
unreachable!();
}
}
fn poll_complete(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<io::Result<u32>> {
let (state, driver) = self.split_pinned();
match mem::replace(state, State::Lost) {
State::Prepared(completion) => {
ready!(driver.poll_complete(ctx, completion.addr()));
match completion.check(ctx.waker()) {
Ok(result) => {
*state = State::Empty;
Poll::Ready(result)
},
Err(completion) => {
*state = State::Prepared(completion);
Poll::Pending
}
}
},
State::Submitted(completion) => {
ready!(driver.poll_complete(ctx, completion.addr()));
match completion.check(ctx.waker()) {
Ok(result) => {
*state = State::Empty;
Poll::Ready(result)
},
Err(completion) => {
*state = State::Submitted(completion);
Poll::Pending
}
}
},
_ => unreachable!(),
}
}
pub fn cancel_pinned(self: Pin<&mut Self>, cancellation: Cancellation) {
self.split_pinned().0.cancel(cancellation);
}
pub fn cancel(&mut self, cancellation: Cancellation) {
self.state.cancel(cancellation)
}
}
impl State {
fn cancel(&mut self, cancellation: Cancellation) {
match mem::replace(self, State::Lost) {
State::Submitted(completion) | State::Prepared(completion) => {
*self = State::Cancelled(completion.addr());
completion.cancel(cancellation);
},
state=> {
*self = state;
}
}
}
}

View File

@ -1,48 +0,0 @@
use std::future::Future;
use futures_core::ready;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use super::{Ring, Driver, Event};
pub struct Submission<D: Driver, E: Event> {
ring: Ring<D>,
event: Option<E>,
}
impl<D: Driver, E: Event> Submission<D, E> {
pub fn new(driver: D, event: E) -> Self {
Self {
ring: Ring::new(driver),
event: Some(event),
}
}
pub fn driver(&self) -> &D {
self.ring.driver()
}
fn split_pinned(self: Pin<&mut Self>) -> (Pin<&mut Ring<D>>, &mut Option<E>) {
unsafe {
let this = Pin::get_unchecked_mut(self);
(Pin::new_unchecked(&mut this.ring), &mut this.event)
}
}
}
impl<D: Driver, E: Event> Future for Submission<D, E> {
type Output = (E, io::Result<u32>);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let (ring, event) = self.split_pinned();
let result = if let Some(event) = event {
let count = E::sqes_needed();
ready!(ring.poll(cx, count, |sqes| unsafe { event.prepare(sqes) }))
} else {
panic!("polled Submission after completion")
};
Poll::Ready((event.take().unwrap(), result))
}
}

View File

@ -1,5 +0,0 @@
#[cfg(feature = "io_uring")]
mod io_uring;
#[cfg(feature = "epoll")]
mod epoll;

View File

@ -1,2 +0,0 @@
#[cfg(target_os = "linux")]
mod linux;

View File

@ -1,72 +0,0 @@
use std::io;
use std::os::unix::prelude::RawFd;
use libc::{c_ulong, c_long};
use crate::ctypes::{IORING_ENTER, IORING_REGISTER_OP};
use super::ctypes::Params;
const ENOMEM: i32 = 12;
const SYS_SETUP: c_long = libc::SYS_io_uring_setup;
const SYS_ENTER: c_long = libc::SYS_io_uring_enter;
const SYS_REGISTER: c_long = libc::SYS_io_uring_register;
/// Syscall io_uring_setup, creating the io_uring ringbuffers
pub fn setup(entries: u32, params: *mut Params) -> io::Result<RawFd> {
assert!((0 < entries && entries <= 4096), "entries must be between 1 and 4096");
assert_eq!(entries.count_ones(), 1, "entries must be a power of two");
let retval = unsafe {
libc::syscall(SYS_SETUP, entries, params)
};
if retval < 0 {
let err = io::Error::last_os_error();
if let Some(ENOMEM) = err.raw_os_error() {
return Err(io::Error::new(
io::ErrorKind::Other,
"Failed to lock enough memory. You may need to increase the memlock limit using \
rlimits"
));
}
return Err(err);
} else {
Ok(retval as RawFd)
}
}
static_assertions::assert_eq_size!(i64, c_long);
/// enter io_uring, returning when at least `min_complete` events have been completed
pub fn enter(fd: RawFd,
to_submit: u32,
min_complete: u32,
flags: IORING_ENTER,
args: *const libc::c_void,
argsz: libc::size_t
) -> io::Result<i64> {
let retval = unsafe {
libc::syscall(SYS_ENTER, fd, to_submit, min_complete, flags.bits(), args, argsz)
};
if retval < 0 {
let err = io::Error::last_os_error();
Err(err)
} else {
Ok(retval)
}
}
/// Register buffers or file descriptors with the kernel for faster usage and not having to use
/// atomics.
pub fn register(fd: RawFd, opcode: IORING_REGISTER_OP, args: *const (), nargs: u32)
-> io::Result<i64>
{
let retval = unsafe {
libc::syscall(SYS_REGISTER, fd, opcode, args, nargs)
};
if retval < 0 {
let err = io::Error::last_os_error();
Err(err)
} else {
Ok(retval)
}
}

View File

@ -13,6 +13,21 @@ exclude = [
"scripts/*", "scripts/*",
] ]
[[bench]]
name = "perf"
harness = false
path = "benches/perf.rs"
[[bench]]
name = "spawn"
harness = false
path = "benches/spawn.rs"
[[bench]]
name = "stats"
harness = false
path = "benches/stats.rs"
[dependencies] [dependencies]
lightproc = { path = "../lightproc" } lightproc = { path = "../lightproc" }
@ -24,6 +39,7 @@ lazy_static = "1.4"
libc = "0.2" libc = "0.2"
num_cpus = "1.13" num_cpus = "1.13"
pin-utils = "0.1.0" pin-utils = "0.1.0"
slab = "0.4"
# Allocator # Allocator
arrayvec = { version = "0.7.0" } arrayvec = { version = "0.7.0" }
@ -32,3 +48,11 @@ once_cell = "1.4.0"
lever = "0.1" lever = "0.1"
tracing = "0.1.19" tracing = "0.1.19"
crossbeam-queue = "0.3.0" crossbeam-queue = "0.3.0"
[dev-dependencies]
async-std = "1.10.0"
tracing = { version = "0.1.19", features = ["max_level_trace"]}
tracing-subscriber = "0.3.1"
futures-util = "0.3"
rand = "0.8"
criterion = "0.3"

View File

@ -1,25 +1,22 @@
#![feature(test)] use executor::prelude::*;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
extern crate test; fn increment(b: &mut Criterion) {
use bastion_executor::prelude::*;
use lightproc::proc_stack::ProcStack;
use test::{black_box, Bencher};
#[bench]
fn increment(b: &mut Bencher) {
let mut sum = 0; let mut sum = 0;
let executor = Executor::new();
b.iter(|| { b.bench_function("Executor::run", |b| b.iter(|| {
run( executor.run(
async { async {
(0..10_000_000).for_each(|_| { (0..10_000_000).for_each(|_| {
sum += 1; sum += 1;
}); });
}, },
ProcStack::default(),
); );
}); }));
black_box(sum); black_box(sum);
} }
criterion_group!(perf, increment);
criterion_main!(perf);

View File

@ -1,23 +1,16 @@
#![feature(test)] use executor::load_balancer;
use executor::prelude::*;
extern crate test;
use bastion_executor::load_balancer;
use bastion_executor::prelude::spawn;
use futures_timer::Delay; use futures_timer::Delay;
use lightproc::proc_stack::ProcStack;
use std::time::Duration; use std::time::Duration;
use test::Bencher; use criterion::{black_box, criterion_group, criterion_main, Criterion};
#[cfg(feature = "tokio-runtime")] #[cfg(feature = "tokio-runtime")]
mod tokio_benchs { mod benches {
use super::*; use super::*;
#[bench] pub fn spawn_lot(b: &mut Bencher) {
fn spawn_lot(b: &mut Bencher) {
tokio_test::block_on(async { _spawn_lot(b) }); tokio_test::block_on(async { _spawn_lot(b) });
} }
#[bench] pub fn spawn_single(b: &mut Bencher) {
fn spawn_single(b: &mut Bencher) {
tokio_test::block_on(async { tokio_test::block_on(async {
_spawn_single(b); _spawn_single(b);
}); });
@ -25,46 +18,47 @@ mod tokio_benchs {
} }
#[cfg(not(feature = "tokio-runtime"))] #[cfg(not(feature = "tokio-runtime"))]
mod no_tokio_benchs { mod benches {
use super::*; use super::*;
#[bench]
fn spawn_lot(b: &mut Bencher) { pub fn spawn_lot(b: &mut Criterion) {
_spawn_lot(b); _spawn_lot(b);
} }
#[bench] pub fn spawn_single(b: &mut Criterion) {
fn spawn_single(b: &mut Bencher) {
_spawn_single(b); _spawn_single(b);
} }
} }
criterion_group!(spawn, benches::spawn_lot, benches::spawn_single);
criterion_main!(spawn);
// Benchmark for a 10K burst task spawn // Benchmark for a 10K burst task spawn
fn _spawn_lot(b: &mut Bencher) { fn _spawn_lot(b: &mut Criterion) {
let proc_stack = ProcStack::default(); let executor = Executor::new();
b.iter(|| { b.bench_function("spawn_lot", |b| b.iter(|| {
let _ = (0..10_000) let _ = (0..10_000)
.map(|_| { .map(|_| {
spawn( executor.spawn(
async { async {
let duration = Duration::from_millis(1); let duration = Duration::from_millis(1);
Delay::new(duration).await; Delay::new(duration).await;
}, },
proc_stack.clone(),
) )
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
}); }));
} }
// Benchmark for a single task spawn // Benchmark for a single task spawn
fn _spawn_single(b: &mut Bencher) { fn _spawn_single(b: &mut Criterion) {
let proc_stack = ProcStack::default(); let executor = Executor::new();
b.iter(|| { b.bench_function("spawn single", |b| b.iter(|| {
spawn( executor.spawn(
async { async {
let duration = Duration::from_millis(1); let duration = Duration::from_millis(1);
Delay::new(duration).await; Delay::new(duration).await;
}, },
proc_stack.clone(),
); );
}); }));
} }

View File

@ -1,10 +1,7 @@
#![feature(test)] use executor::load_balancer::{core_count, get_cores, stats, SmpStats};
use executor::placement;
extern crate test;
use bastion_executor::load_balancer::{core_count, get_cores, stats, SmpStats};
use bastion_executor::placement;
use std::thread; use std::thread;
use test::Bencher; use criterion::{black_box, criterion_group, criterion_main, Criterion};
fn stress_stats<S: SmpStats + Sync + Send>(stats: &'static S) { fn stress_stats<S: SmpStats + Sync + Send>(stats: &'static S) {
let mut handles = Vec::with_capacity(*core_count()); let mut handles = Vec::with_capacity(*core_count());
@ -29,15 +26,13 @@ fn stress_stats<S: SmpStats + Sync + Send>(stats: &'static S) {
// previous lock based stats benchmark 1,352,791 ns/iter (+/- 2,682,013) // previous lock based stats benchmark 1,352,791 ns/iter (+/- 2,682,013)
// 158,278 ns/iter (+/- 117,103) // 158,278 ns/iter (+/- 117,103)
#[bench] fn lockless_stats_bench(b: &mut Criterion) {
fn lockless_stats_bench(b: &mut Bencher) { b.bench_function("stress_stats", |b| b.iter(|| {
b.iter(|| {
stress_stats(stats()); stress_stats(stats());
}); }));
} }
#[bench] fn lockless_stats_bad_load(b: &mut Criterion) {
fn lockless_stats_bad_load(b: &mut Bencher) {
let stats = stats(); let stats = stats();
const MAX_CORE: usize = 256; const MAX_CORE: usize = 256;
for i in 0..MAX_CORE { for i in 0..MAX_CORE {
@ -50,13 +45,12 @@ fn lockless_stats_bad_load(b: &mut Bencher) {
} }
} }
b.iter(|| { b.bench_function("get_sorted_load", |b| b.iter(|| {
let _sorted_load = stats.get_sorted_load(); let _sorted_load = stats.get_sorted_load();
}); }));
} }
#[bench] fn lockless_stats_good_load(b: &mut Criterion) {
fn lockless_stats_good_load(b: &mut Bencher) {
let stats = stats(); let stats = stats();
const MAX_CORE: usize = 256; const MAX_CORE: usize = 256;
for i in 0..MAX_CORE { for i in 0..MAX_CORE {
@ -65,7 +59,11 @@ fn lockless_stats_good_load(b: &mut Bencher) {
stats.store_load(i, i); stats.store_load(i, i);
} }
b.iter(|| { b.bench_function("get_sorted_load", |b| b.iter(|| {
let _sorted_load = stats.get_sorted_load(); let _sorted_load = stats.get_sorted_load();
}); }));
} }
criterion_group!(stats_bench, lockless_stats_bench, lockless_stats_bad_load,
lockless_stats_good_load);
criterion_main!(stats_bench);

View File

@ -1,42 +1,119 @@
use std::io::Write; use std::io::Write;
use std::panic::resume_unwind; use std::panic::resume_unwind;
use std::rc::Rc;
use std::time::Duration; use std::time::Duration;
use futures_util::{stream::FuturesUnordered, Stream};
use futures_util::{FutureExt, StreamExt};
use executor::pool; use executor::pool;
use executor::prelude::*; use executor::prelude::*;
use lightproc::prelude::RecoverableHandle;
fn main() { fn main() {
std::panic::set_hook(Box::new(|info| { tracing_subscriber::fmt()
.with_max_level(tracing::Level::DEBUG)
.init();
let hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |info| {
let span = tracing::span!(tracing::Level::ERROR, "panic hook").entered();
let tid = std::thread::current().id(); let tid = std::thread::current().id();
println!("Panicking ThreadId: {:?}", tid); tracing::error!("Panicking ThreadId: {:?}", tid);
std::io::stdout().flush(); tracing::error!("{}", info);
println!("panic hook: {:?}", info); span.exit();
})); }));
let tid = std::thread::current().id();
println!("Main ThreadId: {:?}", tid);
let handle = spawn( let executor = Executor::new();
let mut handles: FuturesUnordered<RecoverableHandle<usize>> = (0..2000).map(|n| {
executor.spawn(
async move {
let m: u64 = rand::random::<u64>() % 200;
tracing::debug!("Will sleep {} * 1 ms", m);
// simulate some really heavy load.
for i in 0..m {
async_std::task::sleep(Duration::from_millis(1)).await;
}
return n;
},
)
}).collect();
//let handle = handles.fuse().all(|opt| async move { opt.is_some() });
/* Futures passed to `spawn` need to be `Send` so this won't work:
* let n = 1;
* let unsend = spawn(async move {
* let rc = Rc::new(n);
* let tid = std::thread::current().id();
* tracing::info!("!Send fut {} running on thread {:?}", *rc, tid);
* async_std::task::sleep(Duration::from_millis(20)).await;
* tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid);
* async_std::task::sleep(Duration::from_millis(20)).await;
* tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid);
* async_std::task::sleep(Duration::from_millis(20)).await;
* *rc
* });
*/
// But you can use `spawn_local` which will make sure to never Send your task to other threads.
// However, you can't pass it a future outright but have to hand it a generator creating the
// future on the correct thread.
let fut = async {
let local_futs: FuturesUnordered<_> = (0..200).map(|ref n| {
let n = *n;
let exe = executor.clone();
async move {
exe.spawn(
async {
let tid = std::thread::current().id();
tracing::info!("spawn_local({}) is on thread {:?}", n, tid);
exe.spawn_local(async move {
let rc = Rc::new(n);
let tid = std::thread::current().id();
tracing::info!("!Send fut {} running on thread {:?}", *rc, tid);
async_std::task::sleep(Duration::from_millis(20)).await;
let tid2 = std::thread::current().id();
tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid2);
assert_eq!(tid, tid2);
async_std::task::sleep(Duration::from_millis(20)).await;
let tid3 = std::thread::current().id();
tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid3);
assert_eq!(tid2, tid3);
*rc
})
}
).await
}
}).collect();
local_futs
};
let a = async move {
let mut local_futs = fut.await;
while let Some(fut) = local_futs.next().await {
assert!(fut.is_some());
tracing::info!("local fut returned {:?}", fut.unwrap().await)
}
while let Some(a) = handles.next().await {
assert!(a.is_some());
tracing::info!("shared fut returned {}", a.unwrap())
}
};
let b = async move {
async_std::task::sleep(Duration::from_secs(20)).await;
tracing::info!("This is taking too long.");
};
executor.run(
async { async {
panic!("test"); let res = futures_util::select! {
_ = a.fuse() => {},
_ = b.fuse() => {},
};
}, },
); );
run(
async {
handle.await;
},
ProcStack {},
);
let pool = pool::get();
let manager = pool::get_manager().unwrap();
println!("After panic: {:?}", pool);
println!("{:#?}", manager);
let h = std::thread::spawn(|| {
panic!("This is a test");
});
std::thread::sleep(Duration::from_secs(30));
println!("After panic");
} }

View File

@ -20,9 +20,6 @@
//! [lightproc]: https://docs.rs/lightproc //! [lightproc]: https://docs.rs/lightproc
//! //!
#![doc(
html_logo_url = "https://raw.githubusercontent.com/bastion-rs/bastion/master/img/bastion-logo.png"
)]
// Force missing implementations // Force missing implementations
#![warn(missing_docs)] #![warn(missing_docs)]
#![warn(missing_debug_implementations)] #![warn(missing_debug_implementations)]
@ -30,21 +27,15 @@
#![forbid(unused_must_use)] #![forbid(unused_must_use)]
#![forbid(unused_import_braces)] #![forbid(unused_import_braces)]
pub mod blocking;
pub mod load_balancer; pub mod load_balancer;
pub mod placement; pub mod placement;
pub mod pool; pub mod pool;
pub mod run; pub mod run;
pub mod sleepers;
mod thread_manager; mod thread_manager;
pub mod worker; mod worker;
mod proc_stack;
/// ///
/// Prelude of Bastion Executor /// Prelude of Bastion Executor
pub mod prelude { pub mod prelude {
pub use crate::blocking::*;
pub use crate::pool::*; pub use crate::pool::*;
pub use crate::run::*;
pub use crate::proc_stack::*;
} }

View File

@ -7,217 +7,217 @@
//! [`spawn`]: crate::pool::spawn //! [`spawn`]: crate::pool::spawn
//! [`Worker`]: crate::run_queue::Worker //! [`Worker`]: crate::run_queue::Worker
use crate::thread_manager::{DynamicPoolManager, DynamicRunner}; use std::cell::Cell;
use crate::worker; use crate::thread_manager::{ThreadManager, DynamicRunner};
use crossbeam_channel::{unbounded, Receiver, Sender};
use lazy_static::lazy_static;
use lightproc::lightproc::LightProc; use lightproc::lightproc::LightProc;
use lightproc::recoverable_handle::RecoverableHandle; use lightproc::recoverable_handle::RecoverableHandle;
use once_cell::sync::{Lazy, OnceCell};
use std::future::Future; use std::future::Future;
use std::iter::Iterator; use std::iter::Iterator;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use std::{env, thread}; use crossbeam_deque::{Injector, Stealer};
use tracing::trace; use crate::run::block;
use crate::worker::{Sleeper, WorkerThread};
/// #[derive(Debug)]
/// Spawn a process (which contains future + process stack) onto the executor from the global level. struct Spooler<'a> {
/// pub spool: Arc<Injector<LightProc>>,
/// # Example threads: &'a ThreadManager<AsyncRunner>,
/// ```rust _marker: PhantomData<&'a ()>,
/// use executor::prelude::*;
///
/// # #[cfg(feature = "tokio-runtime")]
/// # #[tokio::main]
/// # async fn main() {
/// # start();
/// # }
/// #
/// # #[cfg(not(feature = "tokio-runtime"))]
/// # fn main() {
/// # start();
/// # }
/// #
/// # fn start() {
///
/// let handle = spawn(
/// async {
/// panic!("test");
/// },
/// );
///
/// run(
/// async {
/// handle.await;
/// },
/// ProcStack { },
/// );
/// # }
/// ```
pub fn spawn<F, R>(future: F) -> RecoverableHandle<R>
where
F: Future<Output = R> + Send + 'static,
R: Send + 'static,
{
let (task, handle) = LightProc::recoverable(future, worker::schedule);
task.schedule();
handle
} }
/// Spawns a blocking task. impl Spooler<'_> {
/// pub fn new() -> Self {
/// The task will be spawned onto a thread pool specifically dedicated to blocking tasks. let spool = Arc::new(Injector::new());
pub fn spawn_blocking<F, R>(future: F) -> RecoverableHandle<R> let threads = Box::leak(Box::new(
where ThreadManager::new(2, AsyncRunner, spool.clone())));
F: Future<Output = R> + Send + 'static, threads.initialize();
R: Send + 'static, Self { spool, threads, _marker: PhantomData }
{ }
let (task, handle) = LightProc::recoverable(future, schedule);
task.schedule();
handle
} }
/// #[derive(Clone, Debug)]
/// Acquire the static Pool reference pub struct Executor<'a> {
#[inline] spooler: Arc<Spooler<'a>>,
pub fn get() -> &'static Pool {
&*POOL
} }
pub fn get_manager() -> Option<&'static DynamicPoolManager<AsyncRunner>> { impl<'a, 'executor: 'a> Executor<'executor> {
DYNAMIC_POOL_MANAGER.get() pub fn new() -> Self {
} Executor {
spooler: Arc::new(Spooler::new()),
}
}
fn schedule(&self) -> impl Fn(LightProc) + 'a {
let task_queue = self.spooler.spool.clone();
move |lightproc: LightProc| {
task_queue.push(lightproc)
}
}
impl Pool {
/// ///
/// Spawn a process (which contains future + process stack) onto the executor via [Pool] interface. /// Spawn a process (which contains future + process stack) onto the executor from the global level.
///
/// # Example
/// ```rust
/// use executor::prelude::*;
///
/// # #[cfg(feature = "tokio-runtime")]
/// # #[tokio::main]
/// # async fn main() {
/// # start();
/// # }
/// #
/// # #[cfg(not(feature = "tokio-runtime"))]
/// # fn main() {
/// # start();
/// # }
/// #
/// # fn start() {
///
/// let executor = Spooler::new();
///
/// let handle = executor.spawn(
/// async {
/// panic!("test");
/// },
/// );
///
/// executor.run(
/// async {
/// handle.await;
/// }
/// );
/// # }
/// ```
pub fn spawn<F, R>(&self, future: F) -> RecoverableHandle<R> pub fn spawn<F, R>(&self, future: F) -> RecoverableHandle<R>
where where
F: Future<Output = R> + Send + 'static, F: Future<Output = R> + Send + 'a,
R: Send + 'static, R: Send + 'a,
{ {
let (task, handle) = LightProc::recoverable(future, worker::schedule); let (task, handle) =
LightProc::recoverable(future, self.schedule());
task.schedule(); task.schedule();
handle handle
} }
}
/// Enqueues work, attempting to send to the thread pool in a pub fn spawn_local<F, R>(&self, future: F) -> RecoverableHandle<R>
/// nonblocking way and spinning up needed amount of threads where
/// based on the previous statistics without relying on F: Future<Output = R> + 'a,
/// if there is not a thread ready to accept the work or not. R: Send + 'a,
pub(crate) fn schedule(t: LightProc) { {
if let Err(err) = POOL.sender.try_send(t) { let (task, handle) =
// We were not able to send to the channel without LightProc::recoverable(future, schedule_local());
// blocking. task.schedule();
POOL.sender.send(err.into_inner()).unwrap(); handle
}
// Add up for every incoming scheduled task
DYNAMIC_POOL_MANAGER.get().unwrap().increment_frequency();
}
///
/// Low watermark value, defines the bare minimum of the pool.
/// Spawns initial thread set.
/// Can be configurable with env var `BASTION_BLOCKING_THREADS` at runtime.
#[inline]
fn low_watermark() -> &'static u64 {
lazy_static! {
static ref LOW_WATERMARK: u64 = {
env::var_os("BASTION_BLOCKING_THREADS")
.map(|x| x.to_str().unwrap().parse::<u64>().unwrap())
.unwrap_or(DEFAULT_LOW_WATERMARK)
};
} }
&*LOW_WATERMARK /// Block the calling thread until the given future completes.
} ///
/// # Example
/// ```rust
/// use executor::prelude::*;
/// use lightproc::prelude::*;
///
/// let executor = Spooler::new();
///
/// let mut sum = 0;
///
/// executor.run(
/// async {
/// (0..10_000_000).for_each(|_| {
/// sum += 1;
/// });
/// }
/// );
/// ```
pub fn run<F, R>(&self, future: F) -> R
where
F: Future<Output = R>,
{
unsafe {
// An explicitly uninitialized `R`. Until `assume_init` is called this will not call any
// drop code for R
let mut out = MaybeUninit::uninit();
/// If low watermark isn't configured this is the default scaler value. // Wrap the future into one that stores the result into `out`.
/// This value is used for the heuristics of the scaler let future = {
const DEFAULT_LOW_WATERMARK: u64 = 2; let out = out.as_mut_ptr();
/// Pool interface between the scheduler and thread pool async move {
#[derive(Debug)] *out = future.await;
pub struct Pool { }
sender: Sender<LightProc>, };
receiver: Receiver<LightProc>,
// Pin the future onto the stack.
pin_utils::pin_mut!(future);
// Block on the future and and wait for it to complete.
block(future);
// Assume that if the future completed and didn't panic it fully initialized its output
out.assume_init()
}
}
} }
#[derive(Debug)] #[derive(Debug)]
pub struct AsyncRunner { struct AsyncRunner;
}
impl DynamicRunner for AsyncRunner { impl DynamicRunner for AsyncRunner {
fn run_static(&self, park_timeout: Duration) -> ! { fn setup(task_queue: Arc<Injector<LightProc>>) -> Sleeper<LightProc> {
loop { let (worker, sleeper) = WorkerThread::new(task_queue);
for task in &POOL.receiver { install_worker(worker);
trace!("static: running task");
self.run(task);
}
trace!("static: empty queue, parking with timeout"); sleeper
thread::park_timeout(park_timeout);
}
} }
fn run_dynamic(&self, parker: impl Fn()) -> ! {
loop { fn run_static<'b>(fences: impl Iterator<Item=&'b Stealer<LightProc>>, park_timeout: Duration) -> ! {
while let Ok(task) = POOL.receiver.try_recv() { let worker = get_worker();
trace!("dynamic thread: running task"); worker.run_timeout(fences, park_timeout)
self.run(task);
}
trace!(
"dynamic thread: parking - {:?}",
std::thread::current().id()
);
parker();
}
} }
fn run_standalone(&self) {
while let Ok(task) = POOL.receiver.try_recv() { fn run_dynamic<'b>(fences: impl Iterator<Item=&'b Stealer<LightProc>>) -> ! {
self.run(task); let worker = get_worker();
} worker.run(fences)
trace!("standalone thread: quitting."); }
fn run_standalone<'b>(fences: impl Iterator<Item=&'b Stealer<LightProc>>) {
let worker = get_worker();
worker.run_once(fences)
} }
} }
impl AsyncRunner { thread_local! {
fn run(&self, task: LightProc) { static WORKER: Cell<Option<WorkerThread<'static, LightProc>>> = Cell::new(None);
task.run();
}
} }
static DYNAMIC_POOL_MANAGER: OnceCell<DynamicPoolManager<AsyncRunner>> = OnceCell::new(); fn get_worker() -> &'static WorkerThread<'static, LightProc> {
WORKER.with(|cell| {
static POOL: Lazy<Pool> = Lazy::new(|| { let worker = unsafe {
#[cfg(feature = "tokio-runtime")] &*cell.as_ptr() as &'static Option<WorkerThread<_>>
{
let runner = AsyncRunner {
// We use current() here instead of try_current()
// because we want bastion to crash as soon as possible
// if there is no available runtime.
runtime_handle: tokio::runtime::Handle::current(),
}; };
worker.as_ref()
.expect("AsyncRunner running outside Executor context")
})
}
DYNAMIC_POOL_MANAGER fn install_worker(worker_thread: WorkerThread<'static, LightProc>) {
.set(DynamicPoolManager::new(*low_watermark() as usize, runner)) WORKER.with(|cell| {
.expect("couldn't create dynamic pool manager"); cell.replace(Some(worker_thread));
});
}
fn schedule_local() -> impl Fn(LightProc) {
let worker = get_worker();
let unparker = worker.unparker().clone();
move |lightproc| {
// This is safe because we never replace the value in that Cell and thus never drop the
// SharedWorker pointed to.
worker.schedule_local(lightproc);
// We have to unpark the worker thread for our task to be run.
unparker.unpark();
} }
#[cfg(not(feature = "tokio-runtime"))] }
{
let runner = AsyncRunner {};
DYNAMIC_POOL_MANAGER
.set(DynamicPoolManager::new(*low_watermark() as usize, runner))
.expect("couldn't create dynamic pool manager");
}
DYNAMIC_POOL_MANAGER
.get()
.expect("couldn't get static pool manager")
.initialize();
let (sender, receiver) = unbounded();
Pool { sender, receiver }
});

View File

@ -1,73 +1,15 @@
//! //!
//! Blocking run of the async processes //! Blocking run of the async processes
//! //!
//!
use crate::worker;
use crossbeam_utils::sync::{Parker, Unparker}; use crossbeam_utils::sync::{Parker, Unparker};
use std::cell::Cell; use std::cell::Cell;
use std::future::Future; use std::future::Future;
use std::mem; use std::mem::ManuallyDrop;
use std::mem::{ManuallyDrop, MaybeUninit}; use std::ops::Deref;
use std::pin::Pin;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use crate::proc_stack::ProcStack;
/// pub(crate) fn block<F, T>(f: F) -> T
/// This method blocks the current thread until passed future is resolved with an output.
///
/// It is called `block_on` or `blocking` in some executors.
///
/// # Example
/// ```rust
/// use executor::prelude::*;
/// use lightproc::prelude::*;
/// let mut sum = 0;
///
/// run(
/// async {
/// (0..10_000_000).for_each(|_| {
/// sum += 1;
/// });
/// },
/// ProcStack::default(),
/// );
/// ```
pub fn run<F, T>(future: F, stack: ProcStack) -> T
where
F: Future<Output = T>,
{
unsafe {
// An explicitly uninitialized `T`. Until `assume_init` is called this will not call any
// drop code for T
let mut out = MaybeUninit::uninit();
// Wrap the future into one that stores the result into `out`.
let future = {
let out = out.as_mut_ptr();
async move {
*out = future.await;
}
};
// Pin the future onto the stack.
pin_utils::pin_mut!(future);
// Extend the lifetime of the future to 'static.
let future = mem::transmute::<
Pin<&'_ mut dyn Future<Output = ()>>,
Pin<&'static mut dyn Future<Output = ()>>,
>(future);
// Block on the future and and wait for it to complete.
worker::set_stack(&stack, || block(future));
// Assume that if the future completed and didn't panic it fully initialized its output
out.assume_init()
}
}
fn block<F, T>(f: F) -> T
where where
F: Future<Output = T>, F: Future<Output = T>,
{ {
@ -116,9 +58,10 @@ fn vtable() -> &'static RawWakerVTable {
/// original RawWaker. /// original RawWaker.
unsafe fn clone_raw(ptr: *const ()) -> RawWaker { unsafe fn clone_raw(ptr: *const ()) -> RawWaker {
// [`Unparker`] implements `Clone` and upholds the contract stated above. The current // [`Unparker`] implements `Clone` and upholds the contract stated above. The current
// Implementation is simply an Arc over the actual inner values. // Implementation is simply an Arc over the actual inner values. However clone takes the
let unparker = Unparker::from_raw(ptr).clone(); // original value by reference so we need to make sure to not drop it.
RawWaker::new(Unparker::into_raw(unparker), vtable()) let unparker = ManuallyDrop::new(Unparker::from_raw(ptr));
RawWaker::new(Unparker::into_raw(unparker.deref().clone()), vtable())
} }
/// This function will be called when wake is called on the Waker. It must wake up the task /// This function will be called when wake is called on the Waker. It must wake up the task

View File

@ -52,22 +52,21 @@ use fmt::{Debug, Formatter};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use lever::prelude::TTas; use lever::prelude::TTas;
use placement::CoreId; use placement::CoreId;
use std::collections::{HashMap, VecDeque}; use std::collections::VecDeque;
use std::time::Duration; use std::time::Duration;
use std::{ use std::{
sync::{ sync::{
atomic::{AtomicU64, Ordering}, atomic::{AtomicU64, Ordering},
Mutex, Mutex,
}, },
thread::{self, Thread}, thread,
}; };
use std::any::Any; use std::sync::{Arc, RwLock};
use std::panic::resume_unwind; use crossbeam_channel::bounded;
use std::thread::{JoinHandle, ThreadId}; use crossbeam_deque::{Injector, Stealer};
use crossbeam_deque::Worker;
use crossbeam_utils::sync::{Parker, Unparker};
use tracing::{debug, trace}; use tracing::{debug, trace};
use lightproc::lightproc::LightProc; use lightproc::lightproc::LightProc;
use crate::worker::Sleeper;
/// The default thread park timeout before checking for new tasks. /// The default thread park timeout before checking for new tasks.
const THREAD_PARK_TIMEOUT: Duration = Duration::from_millis(1); const THREAD_PARK_TIMEOUT: Duration = Duration::from_millis(1);
@ -113,16 +112,16 @@ lazy_static! {
/// run_standalone should return once it has no more tasks to process. /// run_standalone should return once it has no more tasks to process.
/// The `DynamicPoolManager` will spawn other standalone threads if needs be. /// The `DynamicPoolManager` will spawn other standalone threads if needs be.
pub trait DynamicRunner { pub trait DynamicRunner {
fn run_static(&self, park_timeout: Duration) -> ! { fn setup(task_queue: Arc<Injector<LightProc>>) -> Sleeper<LightProc>;
let parker = Parker::new();
self.run_dynamic(|| parker.park_timeout(park_timeout)); fn run_static<'b>(fences: impl Iterator<Item=&'b Stealer<LightProc>>,
} park_timeout: Duration) -> !;
fn run_dynamic(&self, parker: impl Fn()) -> !; fn run_dynamic<'b>(fences: impl Iterator<Item=&'b Stealer<LightProc>>) -> !;
fn run_standalone(&self); fn run_standalone<'b>(fences: impl Iterator<Item=&'b Stealer<LightProc>>);
} }
/// The `DynamicPoolManager` is responsible for /// The `ThreadManager` is creates and destroys worker threads depending on demand according to
/// growing and shrinking a pool according to EMA rules. /// EMA rules.
/// ///
/// It needs to be passed a structure that implements `DynamicRunner`, /// It needs to be passed a structure that implements `DynamicRunner`,
/// That will be responsible for actually spawning threads. /// That will be responsible for actually spawning threads.
@ -159,21 +158,38 @@ pub trait DynamicRunner {
/// ///
/// If you use tracing, you can have a look at the trace! logs generated by the structure. /// If you use tracing, you can have a look at the trace! logs generated by the structure.
/// ///
pub struct DynamicPoolManager<Runner> { pub struct ThreadManager<Runner> {
static_threads: usize, static_threads: usize,
dynamic_threads: usize, dynamic_threads: usize,
parked_threads: ArrayQueue<Unparker>, parked_threads: ArrayQueue<Sleeper<LightProc>>,
task_queue: Arc<Injector<LightProc>>,
fences: Arc<RwLock<Vec<Stealer<LightProc>>>>,
runner: Runner, runner: Runner,
last_frequency: AtomicU64, last_frequency: AtomicU64,
frequencies: TTas<VecDeque<u64>>, frequencies: TTas<VecDeque<u64>>,
} }
impl<Runner: Debug> Debug for DynamicPoolManager<Runner> { impl<Runner: Debug> Debug for ThreadManager<Runner> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
struct ThreadCount<'a>(&'a usize, &'a usize, &'a usize);
impl<'a> Debug for ThreadCount<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("")
.field("static", self.0)
.field("dynamic", self.1)
.field("parked", self.2)
.finish()
}
}
fmt.debug_struct("DynamicPoolManager") fmt.debug_struct("DynamicPoolManager")
.field("static_threads", &self.static_threads) .field("thread pool", &ThreadCount(
.field("dynamic_threads", &self.dynamic_threads) &self.static_threads,
.field("parked_threads", &self.parked_threads.len()) &self.dynamic_threads,
&self.parked_threads.len(),
))
.field("runner", &self.runner) .field("runner", &self.runner)
.field("last_frequency", &self.last_frequency) .field("last_frequency", &self.last_frequency)
.field("frequencies", &self.frequencies.try_lock()) .field("frequencies", &self.frequencies.try_lock())
@ -181,14 +197,20 @@ impl<Runner: Debug> Debug for DynamicPoolManager<Runner> {
} }
} }
impl<Runner: DynamicRunner + Sync + Send> DynamicPoolManager<Runner> { impl<Runner: DynamicRunner + Sync + Send> ThreadManager<Runner> {
pub fn new(static_threads: usize, runner: Runner) -> Self { pub fn new(static_threads: usize, runner: Runner, task_queue: Arc<Injector<LightProc>>) -> Self {
let dynamic_threads = 1.max(num_cpus::get().checked_sub(static_threads).unwrap_or(0)); let dynamic_threads = 1.max(num_cpus::get().checked_sub(static_threads).unwrap_or(0));
let parked_threads = ArrayQueue::new(1.max(static_threads + dynamic_threads));
let fences = Arc::new(RwLock::new(Vec::new()));
Self { Self {
static_threads, static_threads,
dynamic_threads, dynamic_threads,
parked_threads: ArrayQueue::new(dynamic_threads), parked_threads,
task_queue,
fences,
runner, runner,
last_frequency: AtomicU64::new(0), last_frequency: AtomicU64::new(0),
frequencies: TTas::new(VecDeque::with_capacity( frequencies: TTas::new(VecDeque::with_capacity(
@ -203,42 +225,79 @@ impl<Runner: DynamicRunner + Sync + Send> DynamicPoolManager<Runner> {
/// Initialize the dynamic pool /// Initialize the dynamic pool
/// That will be scaled /// That will be scaled
pub fn initialize(&'static self) { pub(crate) fn initialize(&'static self) {
let (tx, rx) = bounded(self.static_threads + self.dynamic_threads);
let fencelock = &self.fences;
let _guard = fencelock.write().unwrap();
let mut i = 0;
// Static thread manager that will always be available // Static thread manager that will always be available
trace!("spooling up {} static worker threads", self.static_threads); debug!("spooling up {} static worker threads", self.static_threads);
(0..self.static_threads).for_each(|n| { (0..self.static_threads).for_each(|_| {
let runner = &self.runner; let tx = tx.clone();
let fencelock = fencelock.clone();
let task_queue = self.task_queue.clone();
thread::Builder::new() thread::Builder::new()
.name(format!("static #{}", n)) .name(format!("rt({}) [static]", i))
.spawn(move || { .spawn(move || {
Self::affinity_pinner(); Self::affinity_pinner();
runner.run_static(THREAD_PARK_TIMEOUT);
let sleeper = Runner::setup(task_queue);
tx.send(sleeper).expect("Failed to push to parked_threads");
drop(tx);
let fencelock = fencelock.clone();
let fences = fencelock.read().unwrap();
Runner::run_static(fences.iter(), THREAD_PARK_TIMEOUT);
}) })
.expect("failed to spawn static worker thread"); .expect("failed to spawn static worker thread");
i += 1;
}); });
// Dynamic thread manager that will allow us to unpark threads when needed // Dynamic thread manager that will allow us to unpark threads when needed
trace!("spooling up {} dynamic worker threads", self.dynamic_threads); debug!("spooling up {} dynamic worker threads", self.dynamic_threads);
(0..self.dynamic_threads).for_each(|n| { (0..self.dynamic_threads).for_each(|_| {
let runner = &self.runner; let tx = tx.clone();
let fencelock = fencelock.clone();
let task_queue = self.task_queue.clone();
thread::Builder::new() thread::Builder::new()
.name(format!("dynamic #{}", n)) .name(format!("rt({}) [dyn]", i))
.spawn(move || { .spawn(move || {
Self::affinity_pinner(); Self::affinity_pinner();
let parker = Parker::new();
let unparker = parker.unparker(); let sleeper = Runner::setup(task_queue);
runner.run_dynamic(|| self.park_thread(&parker, unparker)); tx.send(sleeper).expect("Failed to push to parked_threads");
drop(tx);
let fences = fencelock.read().unwrap();
Runner::run_dynamic(fences.iter());
}) })
.expect("failed to spawn dynamic worker thread"); .expect("failed to spawn dynamic worker thread");
i += 1;
}); });
drop(tx);
let span = tracing::span!(tracing::Level::INFO, "sleepers").entered();
tracing::info!("Spawned {} threads", i);
for _ in 0..i {
let sleeper = rx.recv().unwrap();
tracing::info!("{:?}", &sleeper);
self.parked_threads.push(sleeper).unwrap();
}
span.exit();
// Pool manager to check frequency of task rates // Pool manager to check frequency of task rates
// and take action by scaling the pool accordingly. // and take action by scaling the pool accordingly.
thread::Builder::new() thread::Builder::new()
.name("pool manager".to_string()) .name("pool manager".to_string())
.spawn(move || { .spawn(move || {
let poll_interval = Duration::from_millis(SCALER_POLL_INTERVAL); let poll_interval = Duration::from_millis(SCALER_POLL_INTERVAL);
trace!("setting up the pool manager"); debug!("setting up the pool manager");
loop { loop {
self.scale_pool(); self.scale_pool();
thread::park_timeout(poll_interval); thread::park_timeout(poll_interval);
@ -249,56 +308,46 @@ impl<Runner: DynamicRunner + Sync + Send> DynamicPoolManager<Runner> {
/// Provision threads takes a number of threads that need to be made available. /// Provision threads takes a number of threads that need to be made available.
/// It will try to unpark threads from the dynamic pool, and spawn more threads if needs be. /// It will try to unpark threads from the dynamic pool, and spawn more threads if needs be.
pub fn provision_threads(&'static self, n: usize) { pub fn provision_threads(&'static self,
for i in 0..n { n: usize,
if !self.unpark_thread() { fencelock: &Arc<RwLock<Vec<Stealer<LightProc>>>>)
let new_threads = n - i; {
trace!( let rem = self.unpark_thread(n);
"no more threads to unpark, spawning {} new threads", if rem != 0 {
new_threads debug!("no more threads to unpark, spawning {} new threads", rem);
); //self.spawn_threads(rem, fencelock);
return self.spawn_threads(new_threads);
}
} }
} }
fn spawn_threads(&'static self, n: usize) { fn spawn_threads(&'static self, n: usize, fencelock: &Arc<RwLock<Vec<Stealer<LightProc>>>>) {
(0..n).for_each(|_| { (0..n).for_each(|_| {
let runner = &self.runner; let fencelock = fencelock.clone();
let task_queue = self.task_queue.clone();
thread::Builder::new() thread::Builder::new()
.name("standalone worker".to_string()) .name("standalone worker".to_string())
.spawn(move || { .spawn(move || {
Self::affinity_pinner(); Self::affinity_pinner();
runner.run_standalone(); let _ = Runner::setup(task_queue);
let fences = fencelock.read().unwrap();
Runner::run_standalone(fences.iter());
}) })
.unwrap(); .unwrap();
}) })
} }
/// Parks a thread until [`unpark_thread`] unparks it /// Tries to unpark the given number of threads.
pub fn park_thread(&self, parker: &Parker, unparker: &Unparker) { /// Returns `num - (number of threads unparked)`
if let Err(unparker) = self.parked_threads fn unpark_thread(&self, num: usize) -> usize {
// Unparker is an Arc internally so this is (comparatively) cheap to do. let len = self.parked_threads.len();
.push(unparker.clone()) { debug!("parked_threads: len is {}", len);
panic!("Failed to park with {:?}", unparker); // Only check threads once
for _ in 0..len {
if let Some(thread) = self.parked_threads.pop() {
thread.wakeup();
}
} }
trace!("parking thread {:?}", std::thread::current().id()); num
parker.park();
}
/// Pops a thread from the parked_threads queue and unparks it.
///
/// Returns true if there were threads to unpark
fn unpark_thread(&self) -> bool {
trace!("parked_threads: len is {}", self.parked_threads.len());
if let Some(unparker) = self.parked_threads.pop() {
debug!("Unparking thread with {:?}", &unparker);
unparker.unpark();
true
} else {
false
}
} }
/// Affinity pinner for blocking pool /// Affinity pinner for blocking pool
@ -372,6 +421,7 @@ impl<Runner: DynamicRunner + Sync + Send> DynamicPoolManager<Runner> {
// Calculates current time window's EMA value (including last sample) // Calculates current time window's EMA value (including last sample)
let curr_ema_frequency = Self::calculate_ema(&freq_queue); let curr_ema_frequency = Self::calculate_ema(&freq_queue);
trace!("Current EMA freq: {}", curr_ema_frequency);
// Adapts the thread count of pool // Adapts the thread count of pool
// //
@ -389,7 +439,7 @@ impl<Runner: DynamicRunner + Sync + Send> DynamicPoolManager<Runner> {
trace!("unparking {} threads", scale); trace!("unparking {} threads", scale);
// It is time to scale the pool! // It is time to scale the pool!
self.provision_threads(scale); self.provision_threads(scale, &self.fences);
} else if (curr_ema_frequency - prev_ema_frequency).abs() < f64::EPSILON } else if (curr_ema_frequency - prev_ema_frequency).abs() < f64::EPSILON
&& current_frequency != 0 && current_frequency != 0
{ {
@ -398,7 +448,7 @@ impl<Runner: DynamicRunner + Sync + Send> DynamicPoolManager<Runner> {
// For unblock the flow we should add up some threads to the pool, but not that many to // For unblock the flow we should add up some threads to the pool, but not that many to
// stagger the program's operation. // stagger the program's operation.
trace!("unparking {} threads", DEFAULT_LOW_WATERMARK); trace!("unparking {} threads", DEFAULT_LOW_WATERMARK);
self.provision_threads(DEFAULT_LOW_WATERMARK as usize); self.provision_threads(DEFAULT_LOW_WATERMARK as usize, &self.fences);
} }
} }
} }

View File

@ -0,0 +1,174 @@
use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;
use crossbeam_deque::{Injector, Steal, Stealer, Worker};
use crossbeam_queue::SegQueue;
use crossbeam_utils::sync::{Parker, Unparker};
use lightproc::prelude::LightProc;
pub trait Runnable {
fn run(self);
}
impl Runnable for LightProc {
fn run(self) {
LightProc::run(self)
}
}
#[derive(Debug)]
/// A thread worker pulling tasks from a shared injector queue and executing them
pub(crate) struct WorkerThread<'a, Task> {
/// Shared task queue
task_queue: Arc<Injector<Task>>,
/// This threads task queue. For efficiency reasons worker threads pull a batch of tasks
/// from the injector queue and work on them instead of pulling them one by one. Should the
/// global queue become empty worker threads can steal tasks from each other.
tasks: Worker<Task>,
/// Queue of `!Send` tasks that have to be entirely ran on this thread and must not be moved
/// or stolen to other threads.
local_tasks: SegQueue<Task>,
/// Thread parker.
///
/// A worker thread will park when there is no more work it can do. Work threads can be
/// unparked by either a local task being woken up or by the Executor owning the Injector queue.
parker: Parker,
_marker: PhantomData<&'a ()>,
}
#[derive(Debug)]
pub struct Sleeper<Task> {
stealer: Stealer<Task>,
unparker: Unparker,
}
impl<Task> Sleeper<Task> {
pub fn wakeup(&self) {
self.unparker.unpark();
}
}
impl<'a, T: Runnable + 'a> WorkerThread<'a, T> {
pub fn new(task_queue: Arc<Injector<T>>) -> (WorkerThread<'a, T>, Sleeper<T>) {
let tasks: Worker<T> = Worker::new_fifo();
let stealer = tasks.stealer();
let local_tasks: SegQueue<T> = SegQueue::new();
let parker = Parker::new();
let _marker = PhantomData;
let unparker = parker.unparker().clone();
(
Self { task_queue, tasks, local_tasks, parker, _marker },
Sleeper { stealer, unparker }
)
}
pub fn unparker(&self) -> &Unparker {
self.parker.unparker()
}
/// Run this worker thread "forever" (i.e. until the thread panics or is otherwise killed)
pub fn run(&self, fences: impl Iterator<Item=&'a Stealer<T>>) -> ! {
let fences: Vec<Stealer<T>> = fences
.map(|stealer| stealer.clone())
.collect();
loop {
self.run_inner(&fences);
self.parker.park();
}
}
pub fn run_timeout(&self, fences: impl Iterator<Item=&'a Stealer<T>>, timeout: Duration) -> ! {
let fences: Vec<Stealer<T>> = fences
.map(|stealer| stealer.clone())
.collect();
loop {
self.run_inner(&fences);
self.parker.park_timeout(timeout);
}
}
pub fn run_once(&self, fences: impl Iterator<Item=&'a Stealer<T>>) {
let fences: Vec<Stealer<T>> = fences
.map(|stealer| stealer.clone())
.collect();
self.run_inner(fences);
}
fn run_inner<F: AsRef<[Stealer<T>]>>(&self, fences: F) {
// Continue working until there is no work to do.
'work: while {
// Always run local tasks first since they can't be done by anybody else.
if let Some(task) = self.local_tasks.pop() {
task.run();
continue 'work;
} else if let Some(task) = self.tasks.pop() {
task.run();
continue 'work;
} else {
// If we were woken up by the global scheduler `should_steal` is set to true,
// so we now try to clean out.
// First try to take work from the global queue.
let mut i = 0;
loop {
match self.task_queue.steal_batch_and_pop(&self.tasks) {
// If we could steal from the global queue do more work.
Steal::Success(task) => {
task.run();
continue 'work;
},
// If there is no more work to steal from the global queue, try other
// workers next
Steal::Empty => break,
// If a race condition occurred try again with backoff
Steal::Retry => for _ in 0..(1 << i) {
core::hint::spin_loop();
i += 1;
},
}
}
// If the global queue is empty too, steal from the thread with the most work.
// This is only None when there are no stealers installed which, given that we
// exist, *should* never be the case.
while let Some(fence) = select_fence(fences.as_ref().iter()) {
match fence.steal_batch_and_pop(&self.tasks) {
Steal::Success(task) => {
task.run();
continue 'work;
},
// If no other worker has work to do we're done once again.
Steal::Empty => break,
// If another worker is currently stealing chances are that the
// current `stealer` will not have the most task afterwards so we do
// want to do the maths regarding that again.
Steal::Retry => core::hint::spin_loop(),
}
}
}
// If we get here we're done and need to park.
false
} {}
}
pub fn schedule_local(&self, task: T) {
self.local_tasks.push(task);
}
}
#[inline(always)]
fn select_fence<'a, T>(fences: impl Iterator<Item=&'a Stealer<T>>) -> Option<&'a Stealer<T>> {
fences.max_by_key(|fence| fence.len())
}

View File

@ -1,8 +1,8 @@
use bastion_executor::blocking; use std::io::Write;
use bastion_executor::run::run; use executor::run::run;
use lightproc::proc_stack::ProcStack;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use executor::prelude::{ProcStack, spawn};
#[cfg(feature = "tokio-runtime")] #[cfg(feature = "tokio-runtime")]
mod tokio_tests { mod tokio_tests {
@ -21,18 +21,18 @@ mod no_tokio_tests {
} }
fn run_test() { fn run_test() {
let output = run( let handle = spawn(
blocking::spawn_blocking( async {
async { let duration = Duration::from_millis(1);
let duration = Duration::from_millis(1); thread::sleep(duration);
thread::sleep(duration); //42
42 },
}, );
ProcStack::default(),
),
ProcStack::default(),
)
.unwrap();
assert_eq!(42, output); let output = run(handle, ProcStack {});
println!("{:?}", output);
std::io::stdout().flush();
assert!(output.is_some());
std::thread::sleep(Duration::from_millis(200));
} }

View File

@ -1,11 +1,11 @@
use bastion_executor::blocking; use executor::blocking;
use bastion_executor::run::run; use executor::run::run;
use futures::future::join_all; use futures_util::future::join_all;
use lightproc::proc_stack::ProcStack;
use lightproc::recoverable_handle::RecoverableHandle; use lightproc::recoverable_handle::RecoverableHandle;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use std::time::Instant; use std::time::Instant;
use executor::prelude::ProcStack;
// Test for slow joins without task bursts during joins. // Test for slow joins without task bursts during joins.
#[test] #[test]
@ -22,12 +22,11 @@ fn slow_join() {
let duration = Duration::from_millis(1); let duration = Duration::from_millis(1);
thread::sleep(duration); thread::sleep(duration);
}, },
ProcStack::default(),
) )
}) })
.collect::<Vec<RecoverableHandle<()>>>(); .collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack::default()); run(join_all(handles), ProcStack {});
// Let them join to see how it behaves under different workloads. // Let them join to see how it behaves under different workloads.
let duration = Duration::from_millis(thread_join_time_max); let duration = Duration::from_millis(thread_join_time_max);
@ -41,12 +40,11 @@ fn slow_join() {
let duration = Duration::from_millis(100); let duration = Duration::from_millis(100);
thread::sleep(duration); thread::sleep(duration);
}, },
ProcStack::default(),
) )
}) })
.collect::<Vec<RecoverableHandle<()>>>(); .collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack::default()); run(join_all(handles), ProcStack {});
// Slow joins shouldn't cause internal slow down // Slow joins shouldn't cause internal slow down
let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128; let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128;
@ -70,12 +68,11 @@ fn slow_join_interrupted() {
let duration = Duration::from_millis(1); let duration = Duration::from_millis(1);
thread::sleep(duration); thread::sleep(duration);
}, },
ProcStack::default(),
) )
}) })
.collect::<Vec<RecoverableHandle<()>>>(); .collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack::default()); run(join_all(handles), ProcStack {});
// Let them join to see how it behaves under different workloads. // Let them join to see how it behaves under different workloads.
// This time join under the time window. // This time join under the time window.
@ -90,12 +87,11 @@ fn slow_join_interrupted() {
let duration = Duration::from_millis(100); let duration = Duration::from_millis(100);
thread::sleep(duration); thread::sleep(duration);
}, },
ProcStack::default(),
) )
}) })
.collect::<Vec<RecoverableHandle<()>>>(); .collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack::default()); run(join_all(handles), ProcStack {});
// Slow joins shouldn't cause internal slow down // Slow joins shouldn't cause internal slow down
let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128; let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128;
@ -120,7 +116,6 @@ fn longhauling_task_join() {
let duration = Duration::from_millis(1000); let duration = Duration::from_millis(1000);
thread::sleep(duration); thread::sleep(duration);
}, },
ProcStack::default(),
) )
}) })
.collect::<Vec<RecoverableHandle<()>>>(); .collect::<Vec<RecoverableHandle<()>>>();
@ -137,12 +132,11 @@ fn longhauling_task_join() {
let duration = Duration::from_millis(100); let duration = Duration::from_millis(100);
thread::sleep(duration); thread::sleep(duration);
}, },
ProcStack::default(),
) )
}) })
.collect::<Vec<RecoverableHandle<()>>>(); .collect::<Vec<RecoverableHandle<()>>>();
run(join_all(handles), ProcStack::default()); run(join_all(handles), ProcStack {});
// Slow joins shouldn't cause internal slow down // Slow joins shouldn't cause internal slow down
let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128; let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128;

View File

@ -4,23 +4,21 @@ use lightproc::prelude::*;
use std::future::Future; use std::future::Future;
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::thread;
use std::thread::JoinHandle;
use std::time::Duration; use std::time::Duration;
fn spawn_on_thread<F, R>(fut: F) -> ProcHandle<R> fn spawn_on_thread<F, R>(fut: F) -> (JoinHandle<()>, ProcHandle<R>)
where where
F: Future<Output = R> + Send + 'static, F: Future<Output = R> + Send + 'static,
R: Send + 'static, R: Send + 'static,
{ {
let (sender, receiver) = channel::unbounded(); let (sender, receiver) = channel::unbounded();
let sender = Arc::new(sender);
let s = Arc::downgrade(&sender);
let future = async move { let future = async move {
let _ = sender;
fut.await fut.await
}; };
let schedule = move |t| s.upgrade().unwrap().send(t).unwrap(); let schedule = move |t| sender.send(t).unwrap();
let (proc, handle) = LightProc::build( let (proc, handle) = LightProc::build(
future, future,
schedule, schedule,
@ -28,19 +26,30 @@ where
proc.schedule(); proc.schedule();
thread::spawn(move || { let join = thread::spawn(move || {
for proc in receiver { for proc in receiver {
println!("Got a task: {:?}", proc);
proc.run(); proc.run();
} }
}); });
handle (join, handle)
} }
fn main() { fn main() {
executor::block_on(spawn_on_thread(async { let (join, handle) = spawn_on_thread(async {
println!("Sleeping!"); println!("Sleeping!");
async_std::task::sleep(Duration::from_secs(1)).await; async_std::task::sleep(Duration::from_millis(100)).await;
println!("Done sleeping"); println!("Done sleeping 1");
})); async_std::task::sleep(Duration::from_millis(100)).await;
println!("Done sleeping 2");
async_std::task::sleep(Duration::from_millis(100)).await;
println!("Done sleeping 3");
async_std::task::sleep(Duration::from_millis(100)).await;
println!("Done sleeping 4");
return 32;
});
let output = executor::block_on(handle);
assert_eq!(output, Some(32));
assert!(join.join().is_ok());
} }

View File

@ -76,10 +76,10 @@ impl LightProc {
/// println!("future panicked!: {}", &reason); /// println!("future panicked!: {}", &reason);
/// }); /// });
/// ``` /// ```
pub fn recoverable<F, R, S>(future: F, schedule: S) -> (Self, RecoverableHandle<R>) pub fn recoverable<'a, F, R, S>(future: F, schedule: S) -> (Self, RecoverableHandle<R>)
where F: Future<Output=R> + 'static, where F: Future<Output=R> + 'a,
R: 'static, R: 'a,
S: Fn(LightProc) + 'static, S: Fn(LightProc) + 'a,
{ {
let recovery_future = AssertUnwindSafe(future).catch_unwind(); let recovery_future = AssertUnwindSafe(future).catch_unwind();
let (proc, handle) = Self::build(recovery_future, schedule); let (proc, handle) = Self::build(recovery_future, schedule);
@ -114,10 +114,10 @@ impl LightProc {
/// schedule_function, /// schedule_function,
/// ); /// );
/// ``` /// ```
pub fn build<F, R, S>(future: F, schedule: S) -> (Self, ProcHandle<R>) pub fn build<'a, F, R, S>(future: F, schedule: S) -> (Self, ProcHandle<R>)
where F: Future<Output=R> + 'static, where F: Future<Output=R> + 'a,
R: 'static, R: 'a,
S: Fn(LightProc) + 'static, S: Fn(LightProc) + 'a,
{ {
let raw_proc = RawProc::allocate(future, schedule); let raw_proc = RawProc::allocate(future, schedule);
let proc = LightProc { raw_proc }; let proc = LightProc { raw_proc };

View File

@ -37,14 +37,16 @@ impl ProcData {
loop { loop {
// If the proc has been completed or closed, it can't be cancelled. // If the proc has been completed or closed, it can't be cancelled.
if state.intersects(COMPLETED | CLOSED) { if state.get_flags().intersects(COMPLETED | CLOSED) {
break; break;
} }
let (flags, references) = state.parts();
let new = State::new(flags | CLOSED, references);
// Mark the proc as closed. // Mark the proc as closed.
match self.state.compare_exchange_weak( match self.state.compare_exchange_weak(
state.into(), state,
(state | CLOSED).into(), new,
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
@ -96,9 +98,9 @@ impl ProcData {
loop { loop {
// Acquire the lock. If we're storing an awaiter, then also set the awaiter flag. // Acquire the lock. If we're storing an awaiter, then also set the awaiter flag.
let state = if new_is_none { let state = if new_is_none {
self.state.fetch_or(LOCKED.into(), Ordering::Acquire) self.state.fetch_or(LOCKED, Ordering::Acquire)
} else { } else {
self.state.fetch_or((LOCKED | AWAITER).into(), Ordering::Acquire) self.state.fetch_or(LOCKED | AWAITER, Ordering::Acquire)
}; };
// If the lock was acquired, break from the loop. // If the lock was acquired, break from the loop.

View File

@ -52,15 +52,16 @@ impl<R> ProcHandle<R> {
loop { loop {
// If the proc has been completed or closed, it can't be cancelled. // If the proc has been completed or closed, it can't be cancelled.
if state.intersects(COMPLETED | CLOSED) { if state.get_flags().intersects(COMPLETED | CLOSED) {
break; break;
} }
// If the proc is not scheduled nor running, we'll need to schedule it. // If the proc is not scheduled nor running, we'll need to schedule it.
let new = if state.intersects(SCHEDULED | RUNNING) { let (flags, references) = state.parts();
(state | SCHEDULED | CLOSED) + 1 let new = if flags.intersects(SCHEDULED | RUNNING) {
State::new(flags | SCHEDULED | CLOSED, references + 1)
} else { } else {
state | CLOSED State::new(flags | CLOSED, references)
}; };
// Mark the proc as closed. // Mark the proc as closed.
@ -73,7 +74,7 @@ impl<R> ProcHandle<R> {
Ok(_) => { Ok(_) => {
// If the proc is not scheduled nor running, schedule it so that its future // If the proc is not scheduled nor running, schedule it so that its future
// gets dropped by the executor. // gets dropped by the executor.
if !state.intersects(SCHEDULED | RUNNING) { if !state.get_flags().intersects(SCHEDULED | RUNNING) {
((*pdata).vtable.schedule)(ptr); ((*pdata).vtable.schedule)(ptr);
} }
@ -142,9 +143,11 @@ impl<R> Future for ProcHandle<R> {
} }
// Since the proc is now completed, mark it as closed in order to grab its output. // Since the proc is now completed, mark it as closed in order to grab its output.
let (flags, references) = state.parts();
let new = State::new(flags | CLOSED, references);
match (*pdata).state.compare_exchange( match (*pdata).state.compare_exchange(
state, state,
state | CLOSED, new,
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
@ -190,8 +193,8 @@ impl<R> Drop for ProcHandle<R> {
// proc. This is a common case so if the handle is not used, the overhead of it is only // proc. This is a common case so if the handle is not used, the overhead of it is only
// one compare-exchange operation. // one compare-exchange operation.
if let Err(mut state) = (*pdata).state.compare_exchange_weak( if let Err(mut state) = (*pdata).state.compare_exchange_weak(
SCHEDULED | HANDLE | REFERENCE, State::new(SCHEDULED | HANDLE, 1),
SCHEDULED | REFERENCE, State::new(SCHEDULED, 1),
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
@ -200,9 +203,10 @@ impl<R> Drop for ProcHandle<R> {
// must be dropped. // must be dropped.
if state.is_completed() && !state.is_closed() { if state.is_completed() && !state.is_closed() {
// Mark the proc as closed in order to grab its output. // Mark the proc as closed in order to grab its output.
let (flags, references) = state.parts();
match (*pdata).state.compare_exchange_weak( match (*pdata).state.compare_exchange_weak(
state, state,
state | CLOSED, State::new(flags | CLOSED, references),
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
@ -211,7 +215,7 @@ impl<R> Drop for ProcHandle<R> {
output = Some((((*pdata).vtable.get_output)(ptr) as *mut R).read()); output = Some((((*pdata).vtable.get_output)(ptr) as *mut R).read());
// Update the state variable because we're continuing the loop. // Update the state variable because we're continuing the loop.
state |= CLOSED; state = State::new(flags | CLOSED, references);
} }
Err(s) => state = s, Err(s) => state = s,
} }
@ -220,9 +224,10 @@ impl<R> Drop for ProcHandle<R> {
// close it and schedule one more time so that its future gets dropped by // close it and schedule one more time so that its future gets dropped by
// the executor. // the executor.
let new = if state.get_refcount() == 0 && !state.is_closed() { let new = if state.get_refcount() == 0 && !state.is_closed() {
SCHEDULED | CLOSED | REFERENCE State::new(SCHEDULED | CLOSED, 1)
} else { } else {
state & !HANDLE let (flags, references) = state.parts();
State::new(flags & !HANDLE, references)
}; };
// Unset the handle flag. // Unset the handle flag.

View File

@ -8,6 +8,7 @@ use crate::state::*;
use std::alloc::{self, Layout}; use std::alloc::{self, Layout};
use std::cell::Cell; use std::cell::Cell;
use std::future::Future; use std::future::Future;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop}; use std::mem::{self, ManuallyDrop};
use std::panic::AssertUnwindSafe; use std::panic::AssertUnwindSafe;
use std::pin::Pin; use std::pin::Pin;
@ -17,18 +18,21 @@ use std::sync::atomic::Ordering;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
/// Raw pointers to the fields of a proc. /// Raw pointers to the fields of a proc.
pub(crate) struct RawProc<F, R, S> { pub(crate) struct RawProc<'a, F, R, S> {
pub(crate) pdata: *const ProcData, pub(crate) pdata: *const ProcData,
pub(crate) schedule: *const S, pub(crate) schedule: *const S,
pub(crate) future: *mut F, pub(crate) future: *mut F,
pub(crate) output: *mut R, pub(crate) output: *mut R,
// Make the lifetime 'a of the future invariant
_marker: PhantomData<&'a ()>,
} }
impl<F, R, S> RawProc<F, R, S> impl<'a, F, R, S> RawProc<'a, F, R, S>
where where
F: Future<Output = R> + 'static, F: Future<Output = R> + 'a,
R: 'static, R: 'a,
S: Fn(LightProc) + 'static, S: Fn(LightProc) + 'a,
{ {
/// Allocates a proc with the given `future` and `schedule` function. /// Allocates a proc with the given `future` and `schedule` function.
/// ///
@ -46,10 +50,11 @@ where
let raw = Self::from_ptr(raw_proc.as_ptr()); let raw = Self::from_ptr(raw_proc.as_ptr());
let state = AtomicState::new(State::new(SCHEDULED | HANDLE, 1));
// Write the pdata as the first field of the proc. // Write the pdata as the first field of the proc.
(raw.pdata as *mut ProcData).write(ProcData { (raw.pdata as *mut ProcData).write(ProcData {
state: AtomicState::new(SCHEDULED | HANDLE | REFERENCE), state,
awaiter: Cell::new(None), awaiter: Cell::new(None),
vtable: &ProcVTable { vtable: &ProcVTable {
raw_waker: RawWakerVTable::new( raw_waker: RawWakerVTable::new(
@ -115,6 +120,7 @@ where
schedule: p.add(proc_layout.offset_schedule) as *const S, schedule: p.add(proc_layout.offset_schedule) as *const S,
future: p.add(proc_layout.offset_future) as *mut F, future: p.add(proc_layout.offset_future) as *mut F,
output: p.add(proc_layout.offset_output) as *mut R, output: p.add(proc_layout.offset_output) as *mut R,
_marker: PhantomData,
} }
} }
} }
@ -127,7 +133,7 @@ where
loop { loop {
// If the proc is completed or closed, it can't be woken. // If the proc is completed or closed, it can't be woken.
if state.intersects(COMPLETED | CLOSED) { if state.get_flags().intersects(COMPLETED | CLOSED) {
// Drop the waker. // Drop the waker.
Self::decrement(ptr); Self::decrement(ptr);
break; break;
@ -138,8 +144,8 @@ where
if state.is_scheduled() { if state.is_scheduled() {
// Update the state without actually modifying it. // Update the state without actually modifying it.
match (*raw.pdata).state.compare_exchange_weak( match (*raw.pdata).state.compare_exchange_weak(
state.into(), state,
state.into(), state,
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
@ -151,10 +157,12 @@ where
Err(s) => state = s, Err(s) => state = s,
} }
} else { } else {
let (flags, references) = state.parts();
let new = State::new(flags | SCHEDULED, references);
// Mark the proc as scheduled. // Mark the proc as scheduled.
match (*raw.pdata).state.compare_exchange_weak( match (*raw.pdata).state.compare_exchange_weak(
state, state,
state | SCHEDULED, new,
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
@ -188,7 +196,7 @@ where
loop { loop {
// If the proc is completed or closed, it can't be woken. // If the proc is completed or closed, it can't be woken.
if state.intersects(COMPLETED | CLOSED) { if state.get_flags().intersects(COMPLETED | CLOSED) {
break; break;
} }
@ -206,11 +214,12 @@ where
Err(s) => state = s, Err(s) => state = s,
} }
} else { } else {
let (flags, references) = state.parts();
// If the proc is not scheduled nor running, we'll need to schedule after waking. // If the proc is not scheduled nor running, we'll need to schedule after waking.
let new = if !state.intersects(SCHEDULED | RUNNING) { let new = if !state.get_flags().intersects(SCHEDULED | RUNNING) {
(state | SCHEDULED) + 1 State::new(flags | SCHEDULED, references + 1)
} else { } else {
state | SCHEDULED State::new(flags | SCHEDULED, references)
}; };
// Mark the proc as scheduled. // Mark the proc as scheduled.
@ -222,7 +231,7 @@ where
) { ) {
Ok(_) => { Ok(_) => {
// If the proc is not scheduled nor running, now is the time to schedule. // If the proc is not scheduled nor running, now is the time to schedule.
if !state.intersects(SCHEDULED | RUNNING) { if !state.get_flags().intersects(SCHEDULED | RUNNING) {
// Schedule the proc. // Schedule the proc.
let proc = LightProc { let proc = LightProc {
raw_proc: NonNull::new_unchecked(ptr as *mut ()), raw_proc: NonNull::new_unchecked(ptr as *mut ()),
@ -248,7 +257,7 @@ where
let state = (*raw.pdata).state.fetch_add(1, Ordering::Relaxed); let state = (*raw.pdata).state.fetch_add(1, Ordering::Relaxed);
// If the reference count overflowed, abort. // If the reference count overflowed, abort.
if state.bits() > i64::MAX as u64 { if state.get_refcount() > i32::MAX as u32 {
std::process::abort(); std::process::abort();
} }
@ -264,10 +273,10 @@ where
let raw = Self::from_ptr(ptr); let raw = Self::from_ptr(ptr);
// Decrement the reference count. // Decrement the reference count.
let mut new = (*raw.pdata) let new = (*raw.pdata)
.state .state
.fetch_sub(1, Ordering::AcqRel); .fetch_sub(1, Ordering::AcqRel);
new.set_refcount(new.get_refcount().saturating_sub(1)); let new = new.set_refcount(new.get_refcount().saturating_sub(1));
// If this was the last reference to the proc and the `ProcHandle` has been dropped as // If this was the last reference to the proc and the `ProcHandle` has been dropped as
// well, then destroy the proc. // well, then destroy the proc.
@ -353,16 +362,17 @@ where
return; return;
} }
let (flags, references) = state.parts();
// Mark the proc as unscheduled and running. // Mark the proc as unscheduled and running.
match (*raw.pdata).state.compare_exchange_weak( match (*raw.pdata).state.compare_exchange_weak(
state, state,
(state & !SCHEDULED) | RUNNING, State::new((flags & !SCHEDULED) | RUNNING, references),
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {
Ok(_) => { Ok(_) => {
// Update the state because we're continuing with polling the future. // Update our local state because we're continuing with polling the future.
state = (state & !SCHEDULED) | RUNNING; state = State::new((flags & !SCHEDULED) | RUNNING, references);
break; break;
} }
Err(s) => state = s, Err(s) => state = s,
@ -387,12 +397,14 @@ where
// The proc is now completed. // The proc is now completed.
loop { loop {
let (flags, references) = state.parts();
// If the handle is dropped, we'll need to close it and drop the output. // If the handle is dropped, we'll need to close it and drop the output.
let new = if !state.is_handle() { let new_flags = if !state.is_handle() {
(state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED (flags & !(RUNNING & SCHEDULED)) | COMPLETED | CLOSED
} else { } else {
(state & !RUNNING & !SCHEDULED) | COMPLETED (flags & !(RUNNING & SCHEDULED)) | COMPLETED
}; };
let new = State::new(new_flags, references);
// Mark the proc as not running and completed. // Mark the proc as not running and completed.
match (*raw.pdata).state.compare_exchange_weak( match (*raw.pdata).state.compare_exchange_weak(
@ -430,11 +442,14 @@ where
loop { loop {
// If the proc was closed while running, we'll need to unschedule in case it // If the proc was closed while running, we'll need to unschedule in case it
// was woken and then clean up its resources. // was woken and then clean up its resources.
let new = if state.is_closed() { let (flags, references) = state.parts();
state & !( RUNNING | SCHEDULED ) let flags = if state.is_closed() {
flags & !( RUNNING | SCHEDULED )
} else { } else {
state & !RUNNING flags & !RUNNING
}; };
let new = State::new(flags, references);
// Mark the proc as not running. // Mark the proc as not running.
match (*raw.pdata).state.compare_exchange_weak( match (*raw.pdata).state.compare_exchange_weak(
@ -472,30 +487,31 @@ where
} }
} }
impl<F, R, S> Clone for RawProc<F, R, S> { impl<'a, F, R, S> Clone for RawProc<'a, F, R, S> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
pdata: self.pdata, pdata: self.pdata,
schedule: self.schedule, schedule: self.schedule,
future: self.future, future: self.future,
output: self.output, output: self.output,
_marker: PhantomData,
} }
} }
} }
impl<F, R, S> Copy for RawProc<F, R, S> {} impl<'a, F, R, S> Copy for RawProc<'a, F, R, S> {}
/// A guard that closes the proc if polling its future panics. /// A guard that closes the proc if polling its future panics.
struct Guard<F, R, S>(RawProc<F, R, S>) struct Guard<'a, F, R, S>(RawProc<'a, F, R, S>)
where where
F: Future<Output = R> + 'static, F: Future<Output = R> + 'a,
R: 'static, R: 'a,
S: Fn(LightProc) + 'static; S: Fn(LightProc) + 'a;
impl<F, R, S> Drop for Guard<F, R, S> impl<'a, F, R, S> Drop for Guard<'a, F, R, S>
where where
F: Future<Output = R> + 'static, F: Future<Output = R> + 'a,
R: 'static, R: 'a,
S: Fn(LightProc) + 'static, S: Fn(LightProc) + 'a,
{ {
fn drop(&mut self) { fn drop(&mut self) {
let raw = self.0; let raw = self.0;
@ -522,9 +538,11 @@ where
} }
// Mark the proc as not running, not scheduled, and closed. // Mark the proc as not running, not scheduled, and closed.
let (flags, references) = state.parts();
let new = State::new((flags & !(RUNNING & SCHEDULED)) | CLOSED, references);
match (*raw.pdata).state.compare_exchange_weak( match (*raw.pdata).state.compare_exchange_weak(
state, state,
(state & !RUNNING & !SCHEDULED) | CLOSED, new,
Ordering::AcqRel, Ordering::AcqRel,
Ordering::Acquire, Ordering::Acquire,
) { ) {

View File

@ -1,3 +1,4 @@
use std::fmt::{Debug, Formatter};
use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::atomic::{AtomicU64, Ordering};
/// Set if the proc is scheduled for running. /// Set if the proc is scheduled for running.
@ -8,7 +9,7 @@ use std::sync::atomic::{AtomicU64, Ordering};
/// ///
/// This flag can't be set when the proc is completed. However, it can be set while the proc is /// This flag can't be set when the proc is completed. However, it can be set while the proc is
/// running, in which case it will be rescheduled as soon as polling finishes. /// running, in which case it will be rescheduled as soon as polling finishes.
pub(crate) const SCHEDULED: State = State::SCHEDULED; pub(crate) const SCHEDULED: StateFlags = StateFlags::SCHEDULED;
/// Set if the proc is running. /// Set if the proc is running.
/// ///
@ -16,7 +17,7 @@ pub(crate) const SCHEDULED: State = State::SCHEDULED;
/// ///
/// This flag can't be set when the proc is completed. However, it can be in scheduled state while /// This flag can't be set when the proc is completed. However, it can be in scheduled state while
/// it is running, in which case it will be rescheduled when it stops being polled. /// it is running, in which case it will be rescheduled when it stops being polled.
pub(crate) const RUNNING: State = State::RUNNING; pub(crate) const RUNNING: StateFlags = StateFlags::RUNNING;
/// Set if the proc has been completed. /// Set if the proc has been completed.
/// ///
@ -25,7 +26,7 @@ pub(crate) const RUNNING: State = State::RUNNING;
/// the proc as stopped. /// the proc as stopped.
/// ///
/// This flag can't be set when the proc is scheduled or completed. /// This flag can't be set when the proc is scheduled or completed.
pub(crate) const COMPLETED: State = State::COMPLETED; pub(crate) const COMPLETED: StateFlags = StateFlags::COMPLETED;
/// Set if the proc is closed. /// Set if the proc is closed.
/// ///
@ -36,39 +37,29 @@ pub(crate) const COMPLETED: State = State::COMPLETED;
/// 2. Its output is awaited by the `ProcHandle`. /// 2. Its output is awaited by the `ProcHandle`.
/// 3. It panics while polling the future. /// 3. It panics while polling the future.
/// 4. It is completed and the `ProcHandle` is dropped. /// 4. It is completed and the `ProcHandle` is dropped.
pub(crate) const CLOSED: State = State::CLOSED; pub(crate) const CLOSED: StateFlags = StateFlags::CLOSED;
/// Set if the `ProcHandle` still exists. /// Set if the `ProcHandle` still exists.
/// ///
/// The `ProcHandle` is a special case in that it is only tracked by this flag, while all other /// The `ProcHandle` is a special case in that it is only tracked by this flag, while all other
/// proc references (`LightProc` and `Waker`s) are tracked by the reference count. /// proc references (`LightProc` and `Waker`s) are tracked by the reference count.
pub(crate) const HANDLE: State = State::HANDLE; pub(crate) const HANDLE: StateFlags = StateFlags::HANDLE;
/// Set if the `ProcHandle` is awaiting the output. /// Set if the `ProcHandle` is awaiting the output.
/// ///
/// This flag is set while there is a registered awaiter of type `Waker` inside the proc. When the /// This flag is set while there is a registered awaiter of type `Waker` inside the proc. When the
/// proc gets closed or completed, we need to wake the awaiter. This flag can be used as a fast /// proc gets closed or completed, we need to wake the awaiter. This flag can be used as a fast
/// check that tells us if we need to wake anyone without acquiring the lock inside the proc. /// check that tells us if we need to wake anyone without acquiring the lock inside the proc.
pub(crate) const AWAITER: State = State::AWAITER; pub(crate) const AWAITER: StateFlags = StateFlags::AWAITER;
/// Set if the awaiter is locked. /// Set if the awaiter is locked.
/// ///
/// This lock is acquired before a new awaiter is registered or the existing one is woken. /// This lock is acquired before a new awaiter is registered or the existing one is woken.
pub(crate) const LOCKED: State = State::LOCKED; pub(crate) const LOCKED: StateFlags = StateFlags::LOCKED;
/// A single reference.
///
/// The lower bits in the state contain various flags representing the proc state, while the upper
/// bits contain the reference count. The value of `REFERENCE` represents a single reference in the
/// total reference count.
///
/// Note that the reference counter only tracks the `LightProc` and `Waker`s. The `ProcHandle` is
/// tracked separately by the `HANDLE` flag.
pub(crate) const REFERENCE: State = State::REFERENCE;
bitflags::bitflags! { bitflags::bitflags! {
#[derive(Default)] #[derive(Default)]
pub struct State: u64 { pub struct StateFlags: u32 {
const SCHEDULED = 1 << 0; const SCHEDULED = 1 << 0;
const RUNNING = 1 << 1; const RUNNING = 1 << 1;
const COMPLETED = 1 << 2; const COMPLETED = 1 << 2;
@ -76,125 +67,115 @@ bitflags::bitflags! {
const HANDLE = 1 << 4; const HANDLE = 1 << 4;
const AWAITER = 1 << 5; const AWAITER = 1 << 5;
const LOCKED = 1 << 6; const LOCKED = 1 << 6;
const REFERENCE = 1 << 7;
} }
} }
#[repr(packed)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub struct State {
bytes: [u8; 8]
}
impl State { impl State {
#[inline(always)] #[inline(always)]
const fn new(bits: u64) -> Self { pub const fn new(flags: StateFlags, references: u32) -> Self {
unsafe { Self::from_bits_unchecked(bits) } let [a,b,c,d] = references.to_ne_bytes();
let [e,f,g,h] = flags.bits.to_ne_bytes();
Self::from_bytes([a,b,c,d,e,f,g,h])
} }
/// Returns `true` if the future is in the pending.
#[inline(always)] #[inline(always)]
pub fn is_pending(&self) -> bool { pub const fn parts(self: Self) -> (StateFlags, u32) {
!self.is_completed() let [a,b,c,d,e,f,g,h] = self.bytes;
let refcount = u32::from_ne_bytes([a,b,c,d]);
let state = unsafe {
StateFlags::from_bits_unchecked(u32::from_ne_bytes([e,f,g,h]))
};
(state, refcount)
} }
bitfield::bitfield_fields! { #[inline(always)]
u64; /// The lower bits in the state contain various flags representing the proc state, while the upper
#[inline(always)] /// bits contain the reference count.
/// A proc is considered to be scheduled whenever its `LightProc` reference exists. It is in scheduled /// Note that the reference counter only tracks the `LightProc` and `Waker`s. The `ProcHandle` is
/// state at the moment of creation and when it gets unpaused either by its `ProcHandle` or woken /// tracked separately by the `HANDLE` flag.
/// by a `Waker`. pub const fn get_refcount(self) -> u32 {
/// let [a,b,c,d,_,_,_,_] = self.bytes;
/// This flag can't be set when the proc is completed. However, it can be set while the proc is u32::from_ne_bytes([a,b,c,d])
/// running, in which case it will be rescheduled as soon as polling finishes. }
pub is_scheduled, set_scheduled: 0;
#[inline(always)] #[inline(always)]
/// A proc is running state while its future is being polled. #[must_use]
/// pub const fn set_refcount(self, refcount: u32) -> Self {
/// This flag can't be set when the proc is completed. However, it can be in scheduled state while let [a, b, c, d] = refcount.to_ne_bytes();
/// it is running, in which case it will be rescheduled when it stops being polled. let [_, _, _, _, e, f, g, h] = self.bytes;
pub is_running, set_running: 1; Self::from_bytes([a, b, c, d, e, f, g, h])
}
#[inline(always)] #[inline(always)]
/// Set if the proc has been completed. pub const fn get_flags(self) -> StateFlags {
/// let [_, _, _, _, e, f, g, h] = self.bytes;
/// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored unsafe { StateFlags::from_bits_unchecked(u32::from_ne_bytes([e,f,g,h])) }
/// inside the proc until it becomes stopped. In fact, `ProcHandle` picks the output up by marking }
/// the proc as stopped.
///
/// This flag can't be set when the proc is scheduled or completed.
pub is_completed, set_completed: 2;
#[inline(always)] #[inline(always)]
/// Set if the proc is closed. const fn from_bytes(bytes: [u8; 8]) -> Self {
/// Self { bytes }
/// If a proc is closed, that means its either cancelled or its output has been consumed by the }
/// `ProcHandle`. A proc becomes closed when:
///
/// 1. It gets cancelled by `LightProc::cancel()` or `ProcHandle::cancel()`.
/// 2. Its output is awaited by the `ProcHandle`.
/// 3. It panics while polling the future.
/// 4. It is completed and the `ProcHandle` is dropped.
pub is_closed, set_closed: 3;
#[inline(always)] #[inline(always)]
/// Set if the `ProcHandle` still exists. const fn into_u64(self) -> u64 {
/// u64::from_ne_bytes(self.bytes)
/// The `ProcHandle` is a special case in that it is only tracked by this flag, while all other }
/// proc references (`LightProc` and `Waker`s) are tracked by the reference count.
pub is_handle, set_handle: 4;
#[inline(always)] #[inline(always)]
/// Set if the `ProcHandle` is awaiting the output. const fn from_u64(value: u64) -> Self {
/// Self::from_bytes(value.to_ne_bytes())
/// This flag is set while there is a registered awaiter of type `Waker` inside the proc. When the }
/// proc gets closed or completed, we need to wake the awaiter. This flag can be used as a fast
/// check that tells us if we need to wake anyone without acquiring the lock inside the proc.
pub is_awaiter, set_awaiter: 5;
#[inline(always)] #[inline(always)]
/// Set if the awaiter is locked. pub const fn is_awaiter(&self) -> bool {
/// self.get_flags().contains(AWAITER)
/// This lock is acquired before a new awaiter is registered or the existing one is woken. }
pub is_locked, set_locked: 6;
#[inline(always)] #[inline(always)]
/// The lower bits in the state contain various flags representing the proc state, while the upper pub const fn is_closed(&self) -> bool {
/// bits contain the reference count. self.get_flags().contains(CLOSED)
/// Note that the reference counter only tracks the `LightProc` and `Waker`s. The `ProcHandle` is }
/// tracked separately by the `HANDLE` flag.
pub get_refcount, set_refcount: 63, 7; #[inline(always)]
pub const fn is_locked(&self) -> bool {
self.get_flags().contains(LOCKED)
}
#[inline(always)]
pub const fn is_scheduled(&self) -> bool {
self.get_flags().contains(SCHEDULED)
}
#[inline(always)]
pub const fn is_completed(&self) -> bool {
self.get_flags().contains(COMPLETED)
}
#[inline(always)]
pub const fn is_handle(&self) -> bool {
self.get_flags().contains(HANDLE)
}
#[inline(always)]
pub const fn is_running(&self) -> bool {
self.get_flags().contains(RUNNING)
} }
} }
impl std::ops::Add<u64> for State { impl Debug for State {
type Output = State; fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("State")
fn add(mut self, rhs: u64) -> Self::Output { .field("flags", &self.get_flags())
self.set_refcount(self.get_refcount() + rhs); .field("references", &self.get_refcount())
self .finish()
}
}
impl std::ops::Sub<u64> for State {
type Output = State;
fn sub(mut self, rhs: u64) -> Self::Output {
self.set_refcount(self.get_refcount() - rhs);
self
}
}
impl<T> bitfield::BitRange<T> for State
where u64: bitfield::BitRange<T>
{
fn bit_range(&self, msb: usize, lsb: usize) -> T {
self.bits.bit_range(msb, lsb)
}
fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) {
self.bits.set_bit_range(msb, lsb, value)
}
}
impl Into<usize> for State {
fn into(self) -> usize {
self.bits as usize
} }
} }
@ -206,19 +187,19 @@ pub struct AtomicState {
impl AtomicState { impl AtomicState {
#[inline(always)] #[inline(always)]
pub const fn new(v: State) -> Self { pub const fn new(v: State) -> Self {
let inner = AtomicU64::new(v.bits); let inner = AtomicU64::new(v.into_u64());
Self { inner } Self { inner }
} }
#[inline(always)] #[inline(always)]
pub fn load(&self, order: Ordering) -> State { pub fn load(&self, order: Ordering) -> State {
State::new(self.inner.load(order)) State::from_u64(self.inner.load(order))
} }
#[inline(always)] #[inline(always)]
#[allow(dead_code)] #[allow(dead_code)]
pub fn store(&self, val: State, order: Ordering) { pub fn store(&self, state: State, order: Ordering) {
self.inner.store(val.bits, order) self.inner.store(state.into_u64(), order)
} }
pub fn compare_exchange( pub fn compare_exchange(
@ -229,9 +210,9 @@ impl AtomicState {
failure: Ordering failure: Ordering
) -> Result<State, State> ) -> Result<State, State>
{ {
self.inner.compare_exchange(current.bits, new.bits, success, failure) self.inner.compare_exchange(current.into_u64(), new.into_u64(), success, failure)
.map(|u| State::new(u)) .map(|u| State::from_u64(u))
.map_err(|u| State::new(u)) .map_err(|u| State::from_u64(u))
} }
pub fn compare_exchange_weak( pub fn compare_exchange_weak(
@ -242,27 +223,35 @@ impl AtomicState {
failure: Ordering failure: Ordering
) -> Result<State, State> ) -> Result<State, State>
{ {
self.inner.compare_exchange_weak(current.bits, new.bits, success, failure) self.inner.compare_exchange_weak(current.into_u64(), new.into_u64(), success, failure)
.map(|u| State::new(u)) .map(|u| State::from_u64(u))
.map_err(|u| State::new(u)) .map_err(|u| State::from_u64(u))
} }
pub fn fetch_or(&self, val: State, order: Ordering) -> State { pub fn fetch_or(&self, val: StateFlags, order: Ordering) -> State {
State::new(self.inner.fetch_or(val.bits, order)) let [a,b,c,d] = val.bits.to_ne_bytes();
let store = u64::from_ne_bytes([0,0,0,0,a,b,c,d]);
State::from_u64(self.inner.fetch_or(store, order))
} }
pub fn fetch_and(&self, val: State, order: Ordering) -> State { pub fn fetch_and(&self, val: StateFlags, order: Ordering) -> State {
State::new(self.inner.fetch_and(val.bits, order)) let [a,b,c,d] = val.bits.to_ne_bytes();
let store = u64::from_ne_bytes([!0,!0,!0,!0,a,b,c,d]);
State::from_u64(self.inner.fetch_and(store, order))
} }
// FIXME: Do this properly // FIXME: Do this properly
pub fn fetch_add(&self, val: u64, order: Ordering) -> State { pub fn fetch_add(&self, val: u32, order: Ordering) -> State {
State::new(self.inner.fetch_add(val << 7, order)) let [a,b,c,d] = val.to_ne_bytes();
let store = u64::from_ne_bytes([a,b,c,d,0,0,0,0]);
State::from_u64(self.inner.fetch_add(store, order))
} }
// FIXME: Do this properly // FIXME: Do this properly
pub fn fetch_sub(&self, val: u64, order: Ordering) -> State { pub fn fetch_sub(&self, val: u32, order: Ordering) -> State {
State::new(self.inner.fetch_sub(val << 7, order)) let [a,b,c,d] = val.to_ne_bytes();
let store = u64::from_ne_bytes([a,b,c,d,0,0,0,0]);
State::from_u64(self.inner.fetch_sub(store, order))
} }
} }
@ -279,112 +268,66 @@ mod tests {
#[test] #[test]
fn test_is_scheduled_returns_true() { fn test_is_scheduled_returns_true() {
let state = SCHEDULED; let state = SCHEDULED;
assert_eq!(state.is_scheduled(), true); assert!(state.contains(SCHEDULED));
let mut state2 = State::default(); let mut state2 = StateFlags::default();
state2.set_scheduled(true); state2 |= SCHEDULED;
assert_eq!(state, state2) assert_eq!(state, state2)
} }
#[test] #[test]
fn test_is_scheduled_returns_false() { fn flags_work() {
let state = State::default(); let flags = SCHEDULED;
assert_eq!(state.is_scheduled(), false); assert_eq!(flags, SCHEDULED);
}
#[test] let flags = SCHEDULED | RUNNING;
fn test_is_running_returns_true() { assert_eq!(flags, SCHEDULED | RUNNING);
let state = RUNNING;
assert_eq!(state.is_running(), true);
}
#[test] let flags = RUNNING | AWAITER | COMPLETED;
fn test_is_running_returns_false() { assert_eq!(flags, RUNNING | AWAITER | COMPLETED);
let state = State::default();
assert_eq!(state.is_running(), false);
}
#[test]
fn test_is_completed_returns_true() {
let state = COMPLETED;
assert_eq!(state.is_completed(), true);
}
#[test]
fn test_is_completed_returns_false() {
let state = State::default();
assert_eq!(state.is_completed(), false);
}
#[test]
fn test_is_closed_returns_true() {
let state = CLOSED;
assert_eq!(state.is_closed(), true);
}
#[test]
fn test_is_closed_returns_false() {
let state = State::default();
assert_eq!(state.is_closed(), false);
}
#[test]
fn test_is_handle_returns_true() {
let state = HANDLE;
assert_eq!(state.is_handle(), true);
}
#[test]
fn test_is_handle_returns_false() {
let state = State::default();
assert_eq!(state.is_handle(), false);
}
#[test]
fn test_is_awaiter_returns_true() {
let state = AWAITER;
assert_eq!(state.is_awaiter(), true);
}
#[test]
fn test_is_awaiter_returns_false() {
let state = State::default();
assert_eq!(state.is_awaiter(), false);
}
#[test]
fn test_is_locked_returns_true() {
let state = LOCKED;
assert_eq!(state.is_locked(), true);
}
#[test]
fn test_is_locked_returns_false() {
let state = State::default();
assert_eq!(state.is_locked(), false);
}
#[test]
fn test_is_pending_returns_true() {
let state = State::default();
assert_eq!(state.is_pending(), true);
}
#[test]
fn test_is_pending_returns_false() {
let state = COMPLETED;
assert_eq!(state.is_pending(), false);
} }
#[test] #[test]
fn test_add_sub_refcount() { fn test_add_sub_refcount() {
let state = State::default(); let state = State::new(StateFlags::default(), 0);
assert_eq!(state.get_refcount(), 0); assert_eq!(state.get_refcount(), 0);
let state = state + 5; let state = state.set_refcount(5);
assert_eq!(state.get_refcount(), 5); assert_eq!(state.get_refcount(), 5);
let mut state = state - 2; let state = state.set_refcount(3);
assert_eq!(state.get_refcount(), 3); assert_eq!(state.get_refcount(), 3);
state.set_refcount(1); let state = state.set_refcount(1);
assert_eq!(state.get_refcount(), 1); assert_eq!(state.get_refcount(), 1);
} }
#[test]
fn test_mixed_refcount() {
let flags = SCHEDULED | RUNNING | AWAITER;
let state = State::new(flags, 0);
println!("{:?}", state);
assert_eq!(state.get_refcount(), 0);
let state = state.set_refcount(5);
println!("{:?}", state);
assert_eq!(state.get_refcount(), 5);
let (mut flags, references) = state.parts();
assert_eq!(references, 5);
flags &= !AWAITER;
let state = State::new(flags, references);
println!("{:?}", state);
assert_eq!(state.get_refcount(), 5);
let state = state.set_refcount(3);
println!("{:?}", state);
assert_eq!(state.get_refcount(), 3);
let state = state.set_refcount(1);
println!("{:?}", state);
assert_eq!(state.get_refcount(), 1);
assert_eq!(state.get_flags(), SCHEDULED | RUNNING);
}
} }