diff --git a/Cargo.lock b/Cargo.lock index 7fdc83f..e17a3fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,7 +19,7 @@ checksum = "372baaa5d3a422d8816b513bcdb2c120078c8614f7ecbcc3baf34a1634bbbe2e" dependencies = [ "abnf", "indexmap", - "itertools", + "itertools 0.9.0", "pretty", ] @@ -70,6 +70,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "anyhow" +version = "1.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e1f47f7dc0422027a4e370dd4548d4d66b26782e513e98dca1e689e058a80e" + [[package]] name = "api" version = "0.1.0" @@ -91,6 +97,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + [[package]] name = "async-channel" version = "1.6.1" @@ -127,6 +139,22 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "async-global-executor" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-mutex", + "blocking", + "futures-lite", + "num_cpus", + "once_cell", +] + [[package]] name = "async-io" version = "1.6.0" @@ -155,6 +183,15 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-mutex" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +dependencies = [ + "event-listener", +] + [[package]] name = "async-native-tls" version = "0.3.3" @@ -211,12 +248,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" dependencies = [ "async-channel", + "async-global-executor", + "async-io", "async-lock", - "crossbeam-utils", + "crossbeam-utils 0.8.5", "futures-channel", "futures-core", "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", "memchr", + "num_cpus", "once_cell", "pin-project-lite", "pin-utils", @@ -303,6 +347,12 @@ dependencies = [ "which", ] +[[package]] +name = "bitfield" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719" + [[package]] name = "bitflags" version = "1.3.2" @@ -316,7 +366,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.5.2", "constant_time_eq", ] @@ -364,6 +414,18 @@ dependencies = [ "once_cell", ] +[[package]] +name = "bstr" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +dependencies = [ + "lazy_static", + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.8.0" @@ -445,6 +507,15 @@ dependencies = [ "capnp", ] +[[package]] +name = "cast" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" +dependencies = [ + "rustc_version", +] + [[package]] name = "cc" version = "1.0.71" @@ -552,6 +623,126 @@ dependencies = [ "libc", ] +[[package]] +name = "criterion" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +dependencies = [ + "atty", + "cast", + "clap", + "criterion-plot", + "csv", + "itertools 0.10.1", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_cbor", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +dependencies = [ + "cast", + "itertools 0.10.1", +] + +[[package]] +name = "crossbeam" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch 0.9.5", + "crossbeam-queue", + "crossbeam-utils 0.8.5", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-epoch 0.9.5", + "crossbeam-utils 0.8.5", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset 0.5.6", + "scopeguard", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", + "lazy_static", + "memoffset 0.6.4", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", +] + [[package]] name = "crossbeam-utils" version = "0.8.5" @@ -562,6 +753,28 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "csv" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +dependencies = [ + "bstr", + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + [[package]] name = "ctor" version = "0.1.21" @@ -582,7 +795,7 @@ dependencies = [ "annotate-snippets", "elsa", "hex", - "itertools", + "itertools 0.9.0", "lazy_static", "once_cell", "percent-encoding", @@ -609,7 +822,7 @@ dependencies = [ [[package]] name = "diflouroborane" -version = "0.3.0" +version = "0.4.0" dependencies = [ "api", "async-channel", @@ -622,6 +835,7 @@ dependencies = [ "chrono", "clap", "erased-serde", + "executor", "futures-signals", "futures-test", "intmap", @@ -629,6 +843,7 @@ dependencies = [ "lazy_static", "libc", "lmdb-rkv", + "pin-utils", "ptr_meta", "rand", "rkyv", @@ -643,7 +858,7 @@ dependencies = [ "smol", "tempfile", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.2.25", "uuid", ] @@ -720,6 +935,33 @@ version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +[[package]] +name = "executor" +version = "0.3.0" +dependencies = [ + "arrayvec 0.7.2", + "async-std", + "criterion", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch 0.9.5", + "crossbeam-queue", + "crossbeam-utils 0.8.5", + "futures-timer", + "futures-util", + "lazy_static", + "lever", + "libc", + "lightproc", + "num_cpus", + "once_cell", + "pin-utils", + "rand", + "slab", + "tracing", + "tracing-subscriber 0.3.1", +] + [[package]] name = "fake-simd" version = "0.1.2" @@ -887,6 +1129,12 @@ dependencies = [ "pin-utils", ] +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + [[package]] name = "futures-util" version = "0.3.17" @@ -955,6 +1203,19 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +[[package]] +name = "gloo-timers" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "gsasl-sys" version = "0.2.3" @@ -1070,6 +1331,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "0.4.8" @@ -1085,6 +1355,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1097,13 +1376,29 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +[[package]] +name = "lever" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4894ef6dbd1b26dbd7284530c227aab005a57b939427ace2d4d0cc62c23fb05b" +dependencies = [ + "anyhow", + "crossbeam-epoch 0.8.2", + "itertools 0.9.0", + "lazy_static", + "log", + "parking_lot", + "slice-group-by", + "thiserror", +] + [[package]] name = "lexical-core" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" dependencies = [ - "arrayvec", + "arrayvec 0.5.2", "bitflags", "cfg-if 1.0.0", "ryu", @@ -1112,9 +1407,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.105" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" +checksum = "8521a1b57e76b1ec69af7599e75e38e7b7fad6610f037db8c79b127201b5d119" [[package]] name = "libloading" @@ -1126,6 +1421,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "lightproc" +version = "0.3.0" +dependencies = [ + "async-std", + "bitfield", + "bitflags", + "crossbeam", + "crossbeam-utils 0.8.5", + "futures-executor", + "lazy_static", + "pin-utils", +] + [[package]] name = "lmdb-rkv" version = "0.14.0" @@ -1149,6 +1458,15 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "lock_api" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +dependencies = [ + "scopeguard", +] + [[package]] name = "log" version = "0.4.14" @@ -1156,6 +1474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ "cfg-if 1.0.0", + "value-bag", ] [[package]] @@ -1179,12 +1498,36 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + [[package]] name = "memchr" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +dependencies = [ + "autocfg", +] + [[package]] name = "native-tls" version = "0.2.8" @@ -1233,12 +1576,28 @@ dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "once_cell" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "opaque-debug" version = "0.2.3" @@ -1290,6 +1649,31 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1405,6 +1789,34 @@ version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +[[package]] +name = "plotters" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" + +[[package]] +name = "plotters-svg" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polling" version = "2.1.0" @@ -1529,6 +1941,31 @@ dependencies = [ "rand_core", ] +[[package]] +name = "rayon" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +dependencies = [ + "autocfg", + "crossbeam-deque", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils 0.8.5", + "lazy_static", + "num_cpus", +] + [[package]] name = "redox_syscall" version = "0.2.10" @@ -1672,7 +2109,7 @@ dependencies = [ "base64", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", + "crossbeam-utils 0.8.5", ] [[package]] @@ -1681,6 +2118,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "ryu" version = "1.0.5" @@ -1706,6 +2152,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + [[package]] name = "sdk" version = "0.1.0" @@ -1739,6 +2191,12 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" + [[package]] name = "serde" version = "1.0.130" @@ -1858,6 +2316,12 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +[[package]] +name = "slice-group-by" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f7474f0b646d228360ab62ed974744617bc869d959eac8403bfa3665931a7fb" + [[package]] name = "smallvec" version = "1.7.0" @@ -1992,6 +2456,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.5.0" @@ -2082,6 +2556,20 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-subscriber" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80a4ddde70311d8da398062ecf6fc2c309337de6b0f77d6c27aff8d53f6fca52" +dependencies = [ + "ansi_term 0.12.1", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", +] + [[package]] name = "typed-arena" version = "1.7.0" @@ -2149,6 +2637,16 @@ dependencies = [ "serde", ] +[[package]] +name = "value-bag" +version = "1.0.0-alpha.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f" +dependencies = [ + "ctor", + "version_check", +] + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/Cargo.toml b/Cargo.toml index 1223920..bfb2c49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "diflouroborane" -version = "0.3.0" +version = "0.4.0" authors = [ "dequbed " , "Kai Jan Kriegel " , "Joseph Langosch " @@ -9,6 +9,7 @@ authors = [ "dequbed " license = "LGPL-3.0" edition = "2021" publish = false +readme = "README.md" [profile.release] opt-level = 3 @@ -29,8 +30,10 @@ uuid = { version = "0.8.2", features = ["serde", "v4"] } async-trait = "0.1.51" async-native-tls = "0.3" intmap = "0.7" +pin-utils = "0.1.0" # Runtime +executor = { path = "runtime/executor" } smol = "1.2.5" # Catch&Handle POSIX process signals @@ -81,4 +84,4 @@ tempfile = "3.2" bincode = "1.3" [workspace] -members = ["modules/*", "api"] +members = ["modules/*", "api"] \ No newline at end of file diff --git a/bffhd/lib.rs b/bffhd/lib.rs index c526f7d..0531fb3 100644 --- a/bffhd/lib.rs +++ b/bffhd/lib.rs @@ -22,23 +22,3 @@ pub mod resource; pub mod state; /// Varints pub mod varint; - -use intmap::IntMap; -use resource::ResourceDriver; - -#[derive(Debug)] -struct InitiatorDriver; -#[derive(Debug)] -struct ActorDriver; - -#[derive(Debug)] -struct System { - resources: IntMap, - initiators: IntMap, - actors: IntMap, -} - -#[derive(Debug)] -struct Accountant { - -} \ No newline at end of file diff --git a/runtime/asyncio/Cargo.toml b/runtime/asyncio/Cargo.toml deleted file mode 100644 index 55a9cac..0000000 --- a/runtime/asyncio/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "asyncio" -version = "0.1.0" -edition = "2021" -description = "io_uring-first async I/O implementation" -readme = "README.md" -publish = false - -[dependencies] -static_assertions = "1.1" -libc = "0.2" -nix = "0.23" -bitflags = "1.3" -ptr_meta = "0.1" - -# SegQueue for task waiting on CQE or SQE being available again. -crossbeam-queue = "0.3" - -# AsyncRead, AsyncWrite, AsyncSeek and related traits -futures-io = "0.3" - -[dev-dependencies] -# As Mr. Torgue would put it: THE MOST EXTREME F*CKING ASYNC FUNCTION RUNNNER! EXPLOSIONS! -extreme = "666.666.666666" -futures-lite = "1.12" \ No newline at end of file diff --git a/runtime/asyncio/examples/future.rs b/runtime/asyncio/examples/future.rs deleted file mode 100644 index 94d3e98..0000000 --- a/runtime/asyncio/examples/future.rs +++ /dev/null @@ -1,63 +0,0 @@ -use std::fs::File; -use std::future::Future; -use std::io; -use std::os::unix::prelude::AsRawFd; -use std::sync::Arc; -use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -use asyncio::ctypes::IORING_OP; -use asyncio::io_uring::IoUring; - -use futures_lite::io::AsyncReadExt; - -pub fn drive(iouring: &IoUring, mut f: impl Future>) -> io::Result { - static VTABLE: RawWakerVTable = RawWakerVTable::new( - |clone_me| unsafe { - let arc = Arc::from_raw(clone_me); - std::mem::forget(arc.clone()); - RawWaker::new(Arc::into_raw(arc) as *const (), &VTABLE) - }, - |wake_me| unsafe { Arc::from_raw(wake_me); }, - |wake_by_ref_me| unsafe {}, - |drop_me| unsafe { drop(Arc::from_raw(drop_me)) }, - ); - - let mut f = unsafe { std::pin::Pin::new_unchecked(&mut f) }; - let park = Arc::new(()); - let sender = Arc::into_raw(park.clone()); - let raw_waker = RawWaker::new(sender as *const _, &VTABLE); - let waker = unsafe { Waker::from_raw(raw_waker) }; - let mut cx = Context::from_waker(&waker); - - loop { - match f.as_mut().poll(&mut cx) { - Poll::Ready(t) => return t, - Poll::Pending => { - iouring.handle_completions(); - match iouring.submit_wait() { - Ok(_) => {} - Err(e) => return Err(e), - } - } - } - } -} - -fn main() { - let file = File::open("/tmp/poem").unwrap(); - let fd = file.as_raw_fd(); - - let mut ring: &'static IoUring = Box::leak(Box::new(IoUring::setup(4).unwrap())); - - let mut async_file = asyncio::fs::File::new(fd, ring); - - let mut buf = Box::new([0u8; 4096]); - - let f = async move { - let len = async_file.read(&mut buf[..]).await?; - println!("Read {} bytes:", len); - let str = unsafe { std::str::from_utf8_unchecked(&buf[..len]) }; - println!("{}", str); - Ok(()) - }; - drive(ring, f); -} \ No newline at end of file diff --git a/runtime/asyncio/examples/raw.rs b/runtime/asyncio/examples/raw.rs deleted file mode 100644 index 0845fe7..0000000 --- a/runtime/asyncio/examples/raw.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::fs::File; -use std::os::unix::prelude::AsRawFd; -use asyncio::ctypes::IORING_OP; -use asyncio::io_uring::IoUring; - - -fn main() { - let file = File::open("/tmp/poem").unwrap(); - let fd = file.as_raw_fd(); - - let ring = IoUring::setup(4).unwrap(); - let mut cqes = ring.cqes(); - - let buf = Box::new([0u8; 4096]); - ring.try_prepare(3, |mut sqes| { - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::READ); - sqe.set_address(buf.as_ptr() as u64); - sqe.set_fd(fd); - sqe.set_len(4096); - - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::NOP); - sqe.set_userdata(0xCAFEBABE); - - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::NOP); - sqe.set_userdata(0xDEADBEEF); - }).unwrap(); - let mut amt = 0; - while amt == 0 { - amt = ring.submit().unwrap(); - } - println!("{}", amt); - - for _ in 0..3 { - let mut cqe = None; - while cqe.is_none() { - cqe = cqes.next(); - } - let cqe = cqe.unwrap(); - println!("{:?}", cqe); - if cqe.user_data == 0xCAFEBABE { - println!("cafebabe"); - } else if cqe.user_data == 0xDEADBEEF { - println!("deadbeef"); - } - - if let Ok(len) = cqe.result() { - let out = unsafe { std::str::from_utf8_unchecked(&buf[0..len as usize]) }; - println!("{}", out); - } - } -} \ No newline at end of file diff --git a/runtime/asyncio/gen.rs b/runtime/asyncio/gen.rs deleted file mode 100644 index 9d4ff67..0000000 --- a/runtime/asyncio/gen.rs +++ /dev/null @@ -1,1768 +0,0 @@ -/* automatically generated by rust-bindgen 0.59.1 */ - -#[repr(C)] -#[derive(Default)] -pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); -impl __IncompleteArrayField { - #[inline] - pub const fn new() -> Self { - __IncompleteArrayField(::std::marker::PhantomData, []) - } - #[inline] - pub fn as_ptr(&self) -> *const T { - self as *const _ as *const T - } - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - self as *mut _ as *mut T - } - #[inline] - pub unsafe fn as_slice(&self, len: usize) -> &[T] { - ::std::slice::from_raw_parts(self.as_ptr(), len) - } - #[inline] - pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { - ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) - } -} -impl ::std::fmt::Debug for __IncompleteArrayField { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - fmt.write_str("__IncompleteArrayField") - } -} -pub type __kernel_sighandler_t = - ::std::option::Option; -pub type __kernel_key_t = ::std::os::raw::c_int; -pub type __kernel_mqd_t = ::std::os::raw::c_int; -pub type __kernel_old_uid_t = ::std::os::raw::c_ushort; -pub type __kernel_old_gid_t = ::std::os::raw::c_ushort; -pub type __kernel_old_dev_t = ::std::os::raw::c_ulong; -pub type __kernel_long_t = ::std::os::raw::c_long; -pub type __kernel_ulong_t = ::std::os::raw::c_ulong; -pub type __kernel_ino_t = __kernel_ulong_t; -pub type __kernel_mode_t = ::std::os::raw::c_uint; -pub type __kernel_pid_t = ::std::os::raw::c_int; -pub type __kernel_ipc_pid_t = ::std::os::raw::c_int; -pub type __kernel_uid_t = ::std::os::raw::c_uint; -pub type __kernel_gid_t = ::std::os::raw::c_uint; -pub type __kernel_suseconds_t = __kernel_long_t; -pub type __kernel_daddr_t = ::std::os::raw::c_int; -pub type __kernel_uid32_t = ::std::os::raw::c_uint; -pub type __kernel_gid32_t = ::std::os::raw::c_uint; -pub type __kernel_size_t = __kernel_ulong_t; -pub type __kernel_ssize_t = __kernel_long_t; -pub type __kernel_ptrdiff_t = __kernel_long_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct __kernel_fsid_t { - pub val: [::std::os::raw::c_int; 2usize], -} -#[test] -fn bindgen_test_layout___kernel_fsid_t() { - assert_eq!( - ::std::mem::size_of::<__kernel_fsid_t>(), - 8usize, - concat!("Size of: ", stringify!(__kernel_fsid_t)) - ); - assert_eq!( - ::std::mem::align_of::<__kernel_fsid_t>(), - 4usize, - concat!("Alignment of ", stringify!(__kernel_fsid_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<__kernel_fsid_t>())).val as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__kernel_fsid_t), - "::", - stringify!(val) - ) - ); -} -pub type __kernel_off_t = __kernel_long_t; -pub type __kernel_loff_t = ::std::os::raw::c_longlong; -pub type __kernel_old_time_t = __kernel_long_t; -pub type __kernel_time_t = __kernel_long_t; -pub type __kernel_time64_t = ::std::os::raw::c_longlong; -pub type __kernel_clock_t = __kernel_long_t; -pub type __kernel_timer_t = ::std::os::raw::c_int; -pub type __kernel_clockid_t = ::std::os::raw::c_int; -pub type __kernel_caddr_t = *mut ::std::os::raw::c_char; -pub type __kernel_uid16_t = ::std::os::raw::c_ushort; -pub type __kernel_gid16_t = ::std::os::raw::c_ushort; -pub type __le16 = __u16; -pub type __be16 = __u16; -pub type __le32 = __u32; -pub type __be32 = __u32; -pub type __le64 = __u64; -pub type __be64 = __u64; -pub type __sum16 = __u16; -pub type __wsum = __u32; -pub type __poll_t = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fscrypt_policy_v1 { - pub version: __u8, - pub contents_encryption_mode: __u8, - pub filenames_encryption_mode: __u8, - pub flags: __u8, - pub master_key_descriptor: [__u8; 8usize], -} -#[test] -fn bindgen_test_layout_fscrypt_policy_v1() { - assert_eq!( - ::std::mem::size_of::(), - 12usize, - concat!("Size of: ", stringify!(fscrypt_policy_v1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(fscrypt_policy_v1)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(version) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).contents_encryption_mode as *const _ - as usize - }, - 1usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(contents_encryption_mode) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ - as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(filenames_encryption_mode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).master_key_descriptor as *const _ as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(master_key_descriptor) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fscrypt_key { - pub mode: __u32, - pub raw: [__u8; 64usize], - pub size: __u32, -} -#[test] -fn bindgen_test_layout_fscrypt_key() { - assert_eq!( - ::std::mem::size_of::(), - 72usize, - concat!("Size of: ", stringify!(fscrypt_key)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_key)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).mode as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key), - "::", - stringify!(mode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key), - "::", - stringify!(raw) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).size as *const _ as usize }, - 68usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key), - "::", - stringify!(size) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fscrypt_policy_v2 { - pub version: __u8, - pub contents_encryption_mode: __u8, - pub filenames_encryption_mode: __u8, - pub flags: __u8, - pub __reserved: [__u8; 4usize], - pub master_key_identifier: [__u8; 16usize], -} -#[test] -fn bindgen_test_layout_fscrypt_policy_v2() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(fscrypt_policy_v2)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(fscrypt_policy_v2)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(version) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).contents_encryption_mode as *const _ - as usize - }, - 1usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(contents_encryption_mode) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ - as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(filenames_encryption_mode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).master_key_identifier as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(master_key_identifier) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_get_policy_ex_arg { - pub policy_size: __u64, - pub policy: fscrypt_get_policy_ex_arg__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union fscrypt_get_policy_ex_arg__bindgen_ty_1 { - pub version: __u8, - pub v1: fscrypt_policy_v1, - pub v2: fscrypt_policy_v2, -} -#[test] -fn bindgen_test_layout_fscrypt_get_policy_ex_arg__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!( - "Size of: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).version as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), - "::", - stringify!(version) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).v1 as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), - "::", - stringify!(v1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).v2 as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), - "::", - stringify!(v2) - ) - ); -} -#[test] -fn bindgen_test_layout_fscrypt_get_policy_ex_arg() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(fscrypt_get_policy_ex_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(fscrypt_get_policy_ex_arg)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).policy_size as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg), - "::", - stringify!(policy_size) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).policy as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg), - "::", - stringify!(policy) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_key_specifier { - pub type_: __u32, - pub __reserved: __u32, - pub u: fscrypt_key_specifier__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union fscrypt_key_specifier__bindgen_ty_1 { - pub __reserved: [__u8; 32usize], - pub descriptor: [__u8; 8usize], - pub identifier: [__u8; 16usize], -} -#[test] -fn bindgen_test_layout_fscrypt_key_specifier__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(fscrypt_key_specifier__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(fscrypt_key_specifier__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier__bindgen_ty_1), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).descriptor as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier__bindgen_ty_1), - "::", - stringify!(descriptor) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).identifier as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier__bindgen_ty_1), - "::", - stringify!(identifier) - ) - ); -} -#[test] -fn bindgen_test_layout_fscrypt_key_specifier() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(fscrypt_key_specifier)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_key_specifier)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).u as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier), - "::", - stringify!(u) - ) - ); -} -#[repr(C)] -#[derive(Debug)] -pub struct fscrypt_provisioning_key_payload { - pub type_: __u32, - pub __reserved: __u32, - pub raw: __IncompleteArrayField<__u8>, -} -#[test] -fn bindgen_test_layout_fscrypt_provisioning_key_payload() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(fscrypt_provisioning_key_payload)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!( - "Alignment of ", - stringify!(fscrypt_provisioning_key_payload) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).type_ as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_provisioning_key_payload), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ - as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_provisioning_key_payload), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).raw as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_provisioning_key_payload), - "::", - stringify!(raw) - ) - ); -} -#[repr(C)] -pub struct fscrypt_add_key_arg { - pub key_spec: fscrypt_key_specifier, - pub raw_size: __u32, - pub key_id: __u32, - pub __reserved: [__u32; 8usize], - pub raw: __IncompleteArrayField<__u8>, -} -#[test] -fn bindgen_test_layout_fscrypt_add_key_arg() { - assert_eq!( - ::std::mem::size_of::(), - 80usize, - concat!("Size of: ", stringify!(fscrypt_add_key_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_add_key_arg)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(key_spec) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).raw_size as *const _ as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(raw_size) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).key_id as *const _ as usize }, - 44usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(key_id) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(raw) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_remove_key_arg { - pub key_spec: fscrypt_key_specifier, - pub removal_status_flags: __u32, - pub __reserved: [__u32; 5usize], -} -#[test] -fn bindgen_test_layout_fscrypt_remove_key_arg() { - assert_eq!( - ::std::mem::size_of::(), - 64usize, - concat!("Size of: ", stringify!(fscrypt_remove_key_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_remove_key_arg)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_remove_key_arg), - "::", - stringify!(key_spec) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).removal_status_flags as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_remove_key_arg), - "::", - stringify!(removal_status_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_remove_key_arg), - "::", - stringify!(__reserved) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_get_key_status_arg { - pub key_spec: fscrypt_key_specifier, - pub __reserved: [__u32; 6usize], - pub status: __u32, - pub status_flags: __u32, - pub user_count: __u32, - pub __out_reserved: [__u32; 13usize], -} -#[test] -fn bindgen_test_layout_fscrypt_get_key_status_arg() { - assert_eq!( - ::std::mem::size_of::(), - 128usize, - concat!("Size of: ", stringify!(fscrypt_get_key_status_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_get_key_status_arg)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).key_spec as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(key_spec) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).status as *const _ as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(status) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).status_flags as *const _ as usize - }, - 68usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(status_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).user_count as *const _ as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(user_count) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__out_reserved as *const _ - as usize - }, - 76usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(__out_reserved) - ) - ); -} -pub const fsconfig_command_FSCONFIG_SET_FLAG: fsconfig_command = 0; -pub const fsconfig_command_FSCONFIG_SET_STRING: fsconfig_command = 1; -pub const fsconfig_command_FSCONFIG_SET_BINARY: fsconfig_command = 2; -pub const fsconfig_command_FSCONFIG_SET_PATH: fsconfig_command = 3; -pub const fsconfig_command_FSCONFIG_SET_PATH_EMPTY: fsconfig_command = 4; -pub const fsconfig_command_FSCONFIG_SET_FD: fsconfig_command = 5; -pub const fsconfig_command_FSCONFIG_CMD_CREATE: fsconfig_command = 6; -pub const fsconfig_command_FSCONFIG_CMD_RECONFIGURE: fsconfig_command = 7; -pub type fsconfig_command = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct mount_attr { - pub attr_set: __u64, - pub attr_clr: __u64, - pub propagation: __u64, - pub userns_fd: __u64, -} -#[test] -fn bindgen_test_layout_mount_attr() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(mount_attr)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(mount_attr)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).attr_set as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(attr_set) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).attr_clr as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(attr_clr) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).propagation as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(propagation) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).userns_fd as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(userns_fd) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct file_clone_range { - pub src_fd: __s64, - pub src_offset: __u64, - pub src_length: __u64, - pub dest_offset: __u64, -} -#[test] -fn bindgen_test_layout_file_clone_range() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(file_clone_range)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(file_clone_range)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_fd as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(src_fd) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(src_offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(src_length) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dest_offset as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(dest_offset) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fstrim_range { - pub start: __u64, - pub len: __u64, - pub minlen: __u64, -} -#[test] -fn bindgen_test_layout_fstrim_range() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(fstrim_range)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(fstrim_range)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).start as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fstrim_range), - "::", - stringify!(start) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fstrim_range), - "::", - stringify!(len) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).minlen as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(fstrim_range), - "::", - stringify!(minlen) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct file_dedupe_range_info { - pub dest_fd: __s64, - pub dest_offset: __u64, - pub bytes_deduped: __u64, - pub status: __s32, - pub reserved: __u32, -} -#[test] -fn bindgen_test_layout_file_dedupe_range_info() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(file_dedupe_range_info)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(file_dedupe_range_info)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dest_fd as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(dest_fd) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).dest_offset as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(dest_offset) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).bytes_deduped as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(bytes_deduped) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).status as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(status) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).reserved as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(reserved) - ) - ); -} -#[repr(C)] -#[derive(Debug)] -pub struct file_dedupe_range { - pub src_offset: __u64, - pub src_length: __u64, - pub dest_count: __u16, - pub reserved1: __u16, - pub reserved2: __u32, - pub info: __IncompleteArrayField, -} -#[test] -fn bindgen_test_layout_file_dedupe_range() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(file_dedupe_range)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(file_dedupe_range)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(src_offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(src_length) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dest_count as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(dest_count) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).reserved1 as *const _ as usize }, - 18usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(reserved1) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).reserved2 as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(reserved2) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).info as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(info) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct files_stat_struct { - pub nr_files: ::std::os::raw::c_ulong, - pub nr_free_files: ::std::os::raw::c_ulong, - pub max_files: ::std::os::raw::c_ulong, -} -#[test] -fn bindgen_test_layout_files_stat_struct() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(files_stat_struct)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(files_stat_struct)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_files as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(files_stat_struct), - "::", - stringify!(nr_files) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_free_files as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(files_stat_struct), - "::", - stringify!(nr_free_files) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).max_files as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(files_stat_struct), - "::", - stringify!(max_files) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct inodes_stat_t { - pub nr_inodes: ::std::os::raw::c_long, - pub nr_unused: ::std::os::raw::c_long, - pub dummy: [::std::os::raw::c_long; 5usize], -} -#[test] -fn bindgen_test_layout_inodes_stat_t() { - assert_eq!( - ::std::mem::size_of::(), - 56usize, - concat!("Size of: ", stringify!(inodes_stat_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(inodes_stat_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_inodes as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(inodes_stat_t), - "::", - stringify!(nr_inodes) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_unused as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(inodes_stat_t), - "::", - stringify!(nr_unused) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dummy as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(inodes_stat_t), - "::", - stringify!(dummy) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fsxattr { - pub fsx_xflags: __u32, - pub fsx_extsize: __u32, - pub fsx_nextents: __u32, - pub fsx_projid: __u32, - pub fsx_cowextsize: __u32, - pub fsx_pad: [::std::os::raw::c_uchar; 8usize], -} -#[test] -fn bindgen_test_layout_fsxattr() { - assert_eq!( - ::std::mem::size_of::(), - 28usize, - concat!("Size of: ", stringify!(fsxattr)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fsxattr)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_xflags as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_xflags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_extsize as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_extsize) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_nextents as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_nextents) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_projid as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_projid) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_cowextsize as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_cowextsize) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_pad as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_pad) - ) - ); -} -pub const IORING_CQE_BUFFER_SHIFT: ::std::os::raw::c_uint = 16; -pub type _bindgen_ty_3 = ::std::os::raw::c_uint; -pub type _bindgen_ty_4 = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_files_update { - pub offset: __u32, - pub resv: __u32, - pub fds: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_files_update() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_files_update)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_files_update)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_files_update), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_files_update), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fds as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_files_update), - "::", - stringify!(fds) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_rsrc_update { - pub offset: __u32, - pub resv: __u32, - pub data: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_rsrc_update() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_rsrc_update)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_rsrc_update)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_rsrc_update), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_rsrc_update), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).data as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_rsrc_update), - "::", - stringify!(data) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_probe_op { - pub op: __u8, - pub resv: __u8, - pub flags: __u16, - pub resv2: __u32, -} -#[test] -fn bindgen_test_layout_io_uring_probe_op() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(io_uring_probe_op)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_probe_op)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).op as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(op) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(resv2) - ) - ); -} -#[repr(C)] -#[derive(Debug)] -pub struct io_uring_probe { - pub last_op: __u8, - pub ops_len: __u8, - pub resv: __u16, - pub resv2: [__u32; 3usize], - pub ops: __IncompleteArrayField, -} -#[test] -fn bindgen_test_layout_io_uring_probe() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_probe)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_probe)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).last_op as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(last_op) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ops_len as *const _ as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(ops_len) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(resv2) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ops as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(ops) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct io_uring_restriction { - pub opcode: __u16, - pub __bindgen_anon_1: io_uring_restriction__bindgen_ty_1, - pub resv: __u8, - pub resv2: [__u32; 3usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union io_uring_restriction__bindgen_ty_1 { - pub register_op: __u8, - pub sqe_op: __u8, - pub sqe_flags: __u8, -} -#[test] -fn bindgen_test_layout_io_uring_restriction__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 1usize, - concat!("Size of: ", stringify!(io_uring_restriction__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(io_uring_restriction__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).register_op as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction__bindgen_ty_1), - "::", - stringify!(register_op) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sqe_op as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction__bindgen_ty_1), - "::", - stringify!(sqe_op) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sqe_flags as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction__bindgen_ty_1), - "::", - stringify!(sqe_flags) - ) - ); -} -#[test] -fn bindgen_test_layout_io_uring_restriction() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_restriction)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_restriction)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction), - "::", - stringify!(opcode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction), - "::", - stringify!(resv2) - ) - ); -} -pub const IORING_RESTRICTION_REGISTER_OP: ::std::os::raw::c_uint = 0; -pub const IORING_RESTRICTION_SQE_OP: ::std::os::raw::c_uint = 1; -pub const IORING_RESTRICTION_SQE_FLAGS_ALLOWED: ::std::os::raw::c_uint = 2; -pub const IORING_RESTRICTION_SQE_FLAGS_REQUIRED: ::std::os::raw::c_uint = 3; -pub const IORING_RESTRICTION_LAST: ::std::os::raw::c_uint = 4; -pub type _bindgen_ty_5 = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_getevents_arg { - pub sigmask: __u64, - pub sigmask_sz: __u32, - pub pad: __u32, - pub ts: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_getevents_arg() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(io_uring_getevents_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_getevents_arg)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sigmask as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(sigmask) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sigmask_sz as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(sigmask_sz) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).pad as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(pad) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ts as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(ts) - ) - ); -} diff --git a/runtime/asyncio/gen2.rs b/runtime/asyncio/gen2.rs deleted file mode 100644 index 1d37efd..0000000 --- a/runtime/asyncio/gen2.rs +++ /dev/null @@ -1,3040 +0,0 @@ -/* automatically generated by rust-bindgen 0.59.1 */ - -#[repr(C)] -#[derive(Default)] -pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); -impl __IncompleteArrayField { - #[inline] - pub const fn new() -> Self { - __IncompleteArrayField(::std::marker::PhantomData, []) - } - #[inline] - pub fn as_ptr(&self) -> *const T { - self as *const _ as *const T - } - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - self as *mut _ as *mut T - } - #[inline] - pub unsafe fn as_slice(&self, len: usize) -> &[T] { - ::std::slice::from_raw_parts(self.as_ptr(), len) - } - #[inline] - pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { - ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) - } -} -impl ::std::fmt::Debug for __IncompleteArrayField { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - fmt.write_str("__IncompleteArrayField") - } -} -pub const NR_OPEN: u32 = 1024; -pub const NGROUPS_MAX: u32 = 65536; -pub const ARG_MAX: u32 = 131072; -pub const LINK_MAX: u32 = 127; -pub const MAX_CANON: u32 = 255; -pub const MAX_INPUT: u32 = 255; -pub const NAME_MAX: u32 = 255; -pub const PATH_MAX: u32 = 4096; -pub const PIPE_BUF: u32 = 4096; -pub const XATTR_NAME_MAX: u32 = 255; -pub const XATTR_SIZE_MAX: u32 = 65536; -pub const XATTR_LIST_MAX: u32 = 65536; -pub const RTSIG_MAX: u32 = 32; -pub const _IOC_NRBITS: u32 = 8; -pub const _IOC_TYPEBITS: u32 = 8; -pub const _IOC_SIZEBITS: u32 = 14; -pub const _IOC_DIRBITS: u32 = 2; -pub const _IOC_NRMASK: u32 = 255; -pub const _IOC_TYPEMASK: u32 = 255; -pub const _IOC_SIZEMASK: u32 = 16383; -pub const _IOC_DIRMASK: u32 = 3; -pub const _IOC_NRSHIFT: u32 = 0; -pub const _IOC_TYPESHIFT: u32 = 8; -pub const _IOC_SIZESHIFT: u32 = 16; -pub const _IOC_DIRSHIFT: u32 = 30; -pub const _IOC_NONE: u32 = 0; -pub const _IOC_WRITE: u32 = 1; -pub const _IOC_READ: u32 = 2; -pub const IOC_IN: u32 = 1073741824; -pub const IOC_OUT: u32 = 2147483648; -pub const IOC_INOUT: u32 = 3221225472; -pub const IOCSIZE_MASK: u32 = 1073676288; -pub const IOCSIZE_SHIFT: u32 = 16; -pub const __BITS_PER_LONG: u32 = 64; -pub const __FD_SETSIZE: u32 = 1024; -pub const FSCRYPT_POLICY_FLAGS_PAD_4: u32 = 0; -pub const FSCRYPT_POLICY_FLAGS_PAD_8: u32 = 1; -pub const FSCRYPT_POLICY_FLAGS_PAD_16: u32 = 2; -pub const FSCRYPT_POLICY_FLAGS_PAD_32: u32 = 3; -pub const FSCRYPT_POLICY_FLAGS_PAD_MASK: u32 = 3; -pub const FSCRYPT_POLICY_FLAG_DIRECT_KEY: u32 = 4; -pub const FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64: u32 = 8; -pub const FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32: u32 = 16; -pub const FSCRYPT_MODE_AES_256_XTS: u32 = 1; -pub const FSCRYPT_MODE_AES_256_CTS: u32 = 4; -pub const FSCRYPT_MODE_AES_128_CBC: u32 = 5; -pub const FSCRYPT_MODE_AES_128_CTS: u32 = 6; -pub const FSCRYPT_MODE_ADIANTUM: u32 = 9; -pub const FSCRYPT_POLICY_V1: u32 = 0; -pub const FSCRYPT_KEY_DESCRIPTOR_SIZE: u32 = 8; -pub const FSCRYPT_KEY_DESC_PREFIX: &'static [u8; 9usize] = b"fscrypt:\0"; -pub const FSCRYPT_KEY_DESC_PREFIX_SIZE: u32 = 8; -pub const FSCRYPT_MAX_KEY_SIZE: u32 = 64; -pub const FSCRYPT_POLICY_V2: u32 = 2; -pub const FSCRYPT_KEY_IDENTIFIER_SIZE: u32 = 16; -pub const FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: u32 = 1; -pub const FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: u32 = 2; -pub const FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY: u32 = 1; -pub const FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS: u32 = 2; -pub const FSCRYPT_KEY_STATUS_ABSENT: u32 = 1; -pub const FSCRYPT_KEY_STATUS_PRESENT: u32 = 2; -pub const FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED: u32 = 3; -pub const FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF: u32 = 1; -pub const FS_KEY_DESCRIPTOR_SIZE: u32 = 8; -pub const FS_POLICY_FLAGS_PAD_4: u32 = 0; -pub const FS_POLICY_FLAGS_PAD_8: u32 = 1; -pub const FS_POLICY_FLAGS_PAD_16: u32 = 2; -pub const FS_POLICY_FLAGS_PAD_32: u32 = 3; -pub const FS_POLICY_FLAGS_PAD_MASK: u32 = 3; -pub const FS_POLICY_FLAG_DIRECT_KEY: u32 = 4; -pub const FS_POLICY_FLAGS_VALID: u32 = 7; -pub const FS_ENCRYPTION_MODE_INVALID: u32 = 0; -pub const FS_ENCRYPTION_MODE_AES_256_XTS: u32 = 1; -pub const FS_ENCRYPTION_MODE_AES_256_GCM: u32 = 2; -pub const FS_ENCRYPTION_MODE_AES_256_CBC: u32 = 3; -pub const FS_ENCRYPTION_MODE_AES_256_CTS: u32 = 4; -pub const FS_ENCRYPTION_MODE_AES_128_CBC: u32 = 5; -pub const FS_ENCRYPTION_MODE_AES_128_CTS: u32 = 6; -pub const FS_ENCRYPTION_MODE_SPECK128_256_XTS: u32 = 7; -pub const FS_ENCRYPTION_MODE_SPECK128_256_CTS: u32 = 8; -pub const FS_ENCRYPTION_MODE_ADIANTUM: u32 = 9; -pub const FS_KEY_DESC_PREFIX: &'static [u8; 9usize] = b"fscrypt:\0"; -pub const FS_KEY_DESC_PREFIX_SIZE: u32 = 8; -pub const FS_MAX_KEY_SIZE: u32 = 64; -pub const MS_RDONLY: u32 = 1; -pub const MS_NOSUID: u32 = 2; -pub const MS_NODEV: u32 = 4; -pub const MS_NOEXEC: u32 = 8; -pub const MS_SYNCHRONOUS: u32 = 16; -pub const MS_REMOUNT: u32 = 32; -pub const MS_MANDLOCK: u32 = 64; -pub const MS_DIRSYNC: u32 = 128; -pub const MS_NOSYMFOLLOW: u32 = 256; -pub const MS_NOATIME: u32 = 1024; -pub const MS_NODIRATIME: u32 = 2048; -pub const MS_BIND: u32 = 4096; -pub const MS_MOVE: u32 = 8192; -pub const MS_REC: u32 = 16384; -pub const MS_VERBOSE: u32 = 32768; -pub const MS_SILENT: u32 = 32768; -pub const MS_POSIXACL: u32 = 65536; -pub const MS_UNBINDABLE: u32 = 131072; -pub const MS_PRIVATE: u32 = 262144; -pub const MS_SLAVE: u32 = 524288; -pub const MS_SHARED: u32 = 1048576; -pub const MS_RELATIME: u32 = 2097152; -pub const MS_KERNMOUNT: u32 = 4194304; -pub const MS_I_VERSION: u32 = 8388608; -pub const MS_STRICTATIME: u32 = 16777216; -pub const MS_LAZYTIME: u32 = 33554432; -pub const MS_SUBMOUNT: u32 = 67108864; -pub const MS_NOREMOTELOCK: u32 = 134217728; -pub const MS_NOSEC: u32 = 268435456; -pub const MS_BORN: u32 = 536870912; -pub const MS_ACTIVE: u32 = 1073741824; -pub const MS_NOUSER: u32 = 2147483648; -pub const MS_RMT_MASK: u32 = 41943121; -pub const MS_MGC_VAL: u32 = 3236757504; -pub const MS_MGC_MSK: u32 = 4294901760; -pub const OPEN_TREE_CLONE: u32 = 1; -pub const MOVE_MOUNT_F_SYMLINKS: u32 = 1; -pub const MOVE_MOUNT_F_AUTOMOUNTS: u32 = 2; -pub const MOVE_MOUNT_F_EMPTY_PATH: u32 = 4; -pub const MOVE_MOUNT_T_SYMLINKS: u32 = 16; -pub const MOVE_MOUNT_T_AUTOMOUNTS: u32 = 32; -pub const MOVE_MOUNT_T_EMPTY_PATH: u32 = 64; -pub const MOVE_MOUNT__MASK: u32 = 119; -pub const FSOPEN_CLOEXEC: u32 = 1; -pub const FSPICK_CLOEXEC: u32 = 1; -pub const FSPICK_SYMLINK_NOFOLLOW: u32 = 2; -pub const FSPICK_NO_AUTOMOUNT: u32 = 4; -pub const FSPICK_EMPTY_PATH: u32 = 8; -pub const FSMOUNT_CLOEXEC: u32 = 1; -pub const MOUNT_ATTR_RDONLY: u32 = 1; -pub const MOUNT_ATTR_NOSUID: u32 = 2; -pub const MOUNT_ATTR_NODEV: u32 = 4; -pub const MOUNT_ATTR_NOEXEC: u32 = 8; -pub const MOUNT_ATTR__ATIME: u32 = 112; -pub const MOUNT_ATTR_RELATIME: u32 = 0; -pub const MOUNT_ATTR_NOATIME: u32 = 16; -pub const MOUNT_ATTR_STRICTATIME: u32 = 32; -pub const MOUNT_ATTR_NODIRATIME: u32 = 128; -pub const MOUNT_ATTR_IDMAP: u32 = 1048576; -pub const MOUNT_ATTR_SIZE_VER0: u32 = 32; -pub const INR_OPEN_CUR: u32 = 1024; -pub const INR_OPEN_MAX: u32 = 4096; -pub const BLOCK_SIZE_BITS: u32 = 10; -pub const BLOCK_SIZE: u32 = 1024; -pub const SEEK_SET: u32 = 0; -pub const SEEK_CUR: u32 = 1; -pub const SEEK_END: u32 = 2; -pub const SEEK_DATA: u32 = 3; -pub const SEEK_HOLE: u32 = 4; -pub const SEEK_MAX: u32 = 4; -pub const RENAME_NOREPLACE: u32 = 1; -pub const RENAME_EXCHANGE: u32 = 2; -pub const RENAME_WHITEOUT: u32 = 4; -pub const FILE_DEDUPE_RANGE_SAME: u32 = 0; -pub const FILE_DEDUPE_RANGE_DIFFERS: u32 = 1; -pub const NR_FILE: u32 = 8192; -pub const FS_XFLAG_REALTIME: u32 = 1; -pub const FS_XFLAG_PREALLOC: u32 = 2; -pub const FS_XFLAG_IMMUTABLE: u32 = 8; -pub const FS_XFLAG_APPEND: u32 = 16; -pub const FS_XFLAG_SYNC: u32 = 32; -pub const FS_XFLAG_NOATIME: u32 = 64; -pub const FS_XFLAG_NODUMP: u32 = 128; -pub const FS_XFLAG_RTINHERIT: u32 = 256; -pub const FS_XFLAG_PROJINHERIT: u32 = 512; -pub const FS_XFLAG_NOSYMLINKS: u32 = 1024; -pub const FS_XFLAG_EXTSIZE: u32 = 2048; -pub const FS_XFLAG_EXTSZINHERIT: u32 = 4096; -pub const FS_XFLAG_NODEFRAG: u32 = 8192; -pub const FS_XFLAG_FILESTREAM: u32 = 16384; -pub const FS_XFLAG_DAX: u32 = 32768; -pub const FS_XFLAG_COWEXTSIZE: u32 = 65536; -pub const FS_XFLAG_HASATTR: u32 = 2147483648; -pub const BMAP_IOCTL: u32 = 1; -pub const FSLABEL_MAX: u32 = 256; -pub const FS_SECRM_FL: u32 = 1; -pub const FS_UNRM_FL: u32 = 2; -pub const FS_COMPR_FL: u32 = 4; -pub const FS_SYNC_FL: u32 = 8; -pub const FS_IMMUTABLE_FL: u32 = 16; -pub const FS_APPEND_FL: u32 = 32; -pub const FS_NODUMP_FL: u32 = 64; -pub const FS_NOATIME_FL: u32 = 128; -pub const FS_DIRTY_FL: u32 = 256; -pub const FS_COMPRBLK_FL: u32 = 512; -pub const FS_NOCOMP_FL: u32 = 1024; -pub const FS_ENCRYPT_FL: u32 = 2048; -pub const FS_BTREE_FL: u32 = 4096; -pub const FS_INDEX_FL: u32 = 4096; -pub const FS_IMAGIC_FL: u32 = 8192; -pub const FS_JOURNAL_DATA_FL: u32 = 16384; -pub const FS_NOTAIL_FL: u32 = 32768; -pub const FS_DIRSYNC_FL: u32 = 65536; -pub const FS_TOPDIR_FL: u32 = 131072; -pub const FS_HUGE_FILE_FL: u32 = 262144; -pub const FS_EXTENT_FL: u32 = 524288; -pub const FS_VERITY_FL: u32 = 1048576; -pub const FS_EA_INODE_FL: u32 = 2097152; -pub const FS_EOFBLOCKS_FL: u32 = 4194304; -pub const FS_NOCOW_FL: u32 = 8388608; -pub const FS_DAX_FL: u32 = 33554432; -pub const FS_INLINE_DATA_FL: u32 = 268435456; -pub const FS_PROJINHERIT_FL: u32 = 536870912; -pub const FS_CASEFOLD_FL: u32 = 1073741824; -pub const FS_RESERVED_FL: u32 = 2147483648; -pub const FS_FL_USER_VISIBLE: u32 = 253951; -pub const FS_FL_USER_MODIFIABLE: u32 = 229631; -pub const SYNC_FILE_RANGE_WAIT_BEFORE: u32 = 1; -pub const SYNC_FILE_RANGE_WRITE: u32 = 2; -pub const SYNC_FILE_RANGE_WAIT_AFTER: u32 = 4; -pub const SYNC_FILE_RANGE_WRITE_AND_WAIT: u32 = 7; -pub const IORING_SETUP_IOPOLL: u32 = 1; -pub const IORING_SETUP_SQPOLL: u32 = 2; -pub const IORING_SETUP_SQ_AFF: u32 = 4; -pub const IORING_SETUP_CQSIZE: u32 = 8; -pub const IORING_SETUP_CLAMP: u32 = 16; -pub const IORING_SETUP_ATTACH_WQ: u32 = 32; -pub const IORING_SETUP_R_DISABLED: u32 = 64; -pub const IORING_FSYNC_DATASYNC: u32 = 1; -pub const IORING_TIMEOUT_ABS: u32 = 1; -pub const IORING_TIMEOUT_UPDATE: u32 = 2; -pub const SPLICE_F_FD_IN_FIXED: u32 = 2147483648; -pub const IORING_CQE_F_BUFFER: u32 = 1; -pub const IORING_SQ_NEED_WAKEUP: u32 = 1; -pub const IORING_SQ_CQ_OVERFLOW: u32 = 2; -pub const IORING_CQ_EVENTFD_DISABLED: u32 = 1; -pub const IORING_ENTER_GETEVENTS: u32 = 1; -pub const IORING_ENTER_SQ_WAKEUP: u32 = 2; -pub const IORING_ENTER_SQ_WAIT: u32 = 4; -pub const IORING_ENTER_EXT_ARG: u32 = 8; -pub const IORING_FEAT_SINGLE_MMAP: u32 = 1; -pub const IORING_FEAT_NODROP: u32 = 2; -pub const IORING_FEAT_SUBMIT_STABLE: u32 = 4; -pub const IORING_FEAT_RW_CUR_POS: u32 = 8; -pub const IORING_FEAT_CUR_PERSONALITY: u32 = 16; -pub const IORING_FEAT_FAST_POLL: u32 = 32; -pub const IORING_FEAT_POLL_32BITS: u32 = 64; -pub const IORING_FEAT_SQPOLL_NONFIXED: u32 = 128; -pub const IORING_FEAT_EXT_ARG: u32 = 256; -pub const IORING_FEAT_NATIVE_WORKERS: u32 = 512; -pub const IORING_REGISTER_FILES_SKIP: i32 = -2; -pub const IO_URING_OP_SUPPORTED: u32 = 1; -pub type __s8 = ::std::os::raw::c_schar; -pub type __u8 = ::std::os::raw::c_uchar; -pub type __s16 = ::std::os::raw::c_short; -pub type __u16 = ::std::os::raw::c_ushort; -pub type __s32 = ::std::os::raw::c_int; -pub type __u32 = ::std::os::raw::c_uint; -pub type __s64 = ::std::os::raw::c_longlong; -pub type __u64 = ::std::os::raw::c_ulonglong; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct __kernel_fd_set { - pub fds_bits: [::std::os::raw::c_ulong; 16usize], -} -#[test] -fn bindgen_test_layout___kernel_fd_set() { - assert_eq!( - ::std::mem::size_of::<__kernel_fd_set>(), - 128usize, - concat!("Size of: ", stringify!(__kernel_fd_set)) - ); - assert_eq!( - ::std::mem::align_of::<__kernel_fd_set>(), - 8usize, - concat!("Alignment of ", stringify!(__kernel_fd_set)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<__kernel_fd_set>())).fds_bits as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__kernel_fd_set), - "::", - stringify!(fds_bits) - ) - ); -} -pub type __kernel_sighandler_t = - ::std::option::Option; -pub type __kernel_key_t = ::std::os::raw::c_int; -pub type __kernel_mqd_t = ::std::os::raw::c_int; -pub type __kernel_old_uid_t = ::std::os::raw::c_ushort; -pub type __kernel_old_gid_t = ::std::os::raw::c_ushort; -pub type __kernel_old_dev_t = ::std::os::raw::c_ulong; -pub type __kernel_long_t = ::std::os::raw::c_long; -pub type __kernel_ulong_t = ::std::os::raw::c_ulong; -pub type __kernel_ino_t = __kernel_ulong_t; -pub type __kernel_mode_t = ::std::os::raw::c_uint; -pub type __kernel_pid_t = ::std::os::raw::c_int; -pub type __kernel_ipc_pid_t = ::std::os::raw::c_int; -pub type __kernel_uid_t = ::std::os::raw::c_uint; -pub type __kernel_gid_t = ::std::os::raw::c_uint; -pub type __kernel_suseconds_t = __kernel_long_t; -pub type __kernel_daddr_t = ::std::os::raw::c_int; -pub type __kernel_uid32_t = ::std::os::raw::c_uint; -pub type __kernel_gid32_t = ::std::os::raw::c_uint; -pub type __kernel_size_t = __kernel_ulong_t; -pub type __kernel_ssize_t = __kernel_long_t; -pub type __kernel_ptrdiff_t = __kernel_long_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct __kernel_fsid_t { - pub val: [::std::os::raw::c_int; 2usize], -} -#[test] -fn bindgen_test_layout___kernel_fsid_t() { - assert_eq!( - ::std::mem::size_of::<__kernel_fsid_t>(), - 8usize, - concat!("Size of: ", stringify!(__kernel_fsid_t)) - ); - assert_eq!( - ::std::mem::align_of::<__kernel_fsid_t>(), - 4usize, - concat!("Alignment of ", stringify!(__kernel_fsid_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<__kernel_fsid_t>())).val as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__kernel_fsid_t), - "::", - stringify!(val) - ) - ); -} -pub type __kernel_off_t = __kernel_long_t; -pub type __kernel_loff_t = ::std::os::raw::c_longlong; -pub type __kernel_old_time_t = __kernel_long_t; -pub type __kernel_time_t = __kernel_long_t; -pub type __kernel_time64_t = ::std::os::raw::c_longlong; -pub type __kernel_clock_t = __kernel_long_t; -pub type __kernel_timer_t = ::std::os::raw::c_int; -pub type __kernel_clockid_t = ::std::os::raw::c_int; -pub type __kernel_caddr_t = *mut ::std::os::raw::c_char; -pub type __kernel_uid16_t = ::std::os::raw::c_ushort; -pub type __kernel_gid16_t = ::std::os::raw::c_ushort; -pub type __le16 = __u16; -pub type __be16 = __u16; -pub type __le32 = __u32; -pub type __be32 = __u32; -pub type __le64 = __u64; -pub type __be64 = __u64; -pub type __sum16 = __u16; -pub type __wsum = __u32; -pub type __poll_t = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fscrypt_policy_v1 { - pub version: __u8, - pub contents_encryption_mode: __u8, - pub filenames_encryption_mode: __u8, - pub flags: __u8, - pub master_key_descriptor: [__u8; 8usize], -} -#[test] -fn bindgen_test_layout_fscrypt_policy_v1() { - assert_eq!( - ::std::mem::size_of::(), - 12usize, - concat!("Size of: ", stringify!(fscrypt_policy_v1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(fscrypt_policy_v1)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(version) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).contents_encryption_mode as *const _ - as usize - }, - 1usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(contents_encryption_mode) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ - as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(filenames_encryption_mode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).master_key_descriptor as *const _ as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v1), - "::", - stringify!(master_key_descriptor) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fscrypt_key { - pub mode: __u32, - pub raw: [__u8; 64usize], - pub size: __u32, -} -#[test] -fn bindgen_test_layout_fscrypt_key() { - assert_eq!( - ::std::mem::size_of::(), - 72usize, - concat!("Size of: ", stringify!(fscrypt_key)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_key)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).mode as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key), - "::", - stringify!(mode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key), - "::", - stringify!(raw) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).size as *const _ as usize }, - 68usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key), - "::", - stringify!(size) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fscrypt_policy_v2 { - pub version: __u8, - pub contents_encryption_mode: __u8, - pub filenames_encryption_mode: __u8, - pub flags: __u8, - pub __reserved: [__u8; 4usize], - pub master_key_identifier: [__u8; 16usize], -} -#[test] -fn bindgen_test_layout_fscrypt_policy_v2() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(fscrypt_policy_v2)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(fscrypt_policy_v2)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(version) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).contents_encryption_mode as *const _ - as usize - }, - 1usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(contents_encryption_mode) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ - as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(filenames_encryption_mode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).master_key_identifier as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_policy_v2), - "::", - stringify!(master_key_identifier) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_get_policy_ex_arg { - pub policy_size: __u64, - pub policy: fscrypt_get_policy_ex_arg__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union fscrypt_get_policy_ex_arg__bindgen_ty_1 { - pub version: __u8, - pub v1: fscrypt_policy_v1, - pub v2: fscrypt_policy_v2, -} -#[test] -fn bindgen_test_layout_fscrypt_get_policy_ex_arg__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!( - "Size of: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).version as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), - "::", - stringify!(version) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).v1 as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), - "::", - stringify!(v1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).v2 as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), - "::", - stringify!(v2) - ) - ); -} -#[test] -fn bindgen_test_layout_fscrypt_get_policy_ex_arg() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(fscrypt_get_policy_ex_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(fscrypt_get_policy_ex_arg)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).policy_size as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg), - "::", - stringify!(policy_size) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).policy as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_policy_ex_arg), - "::", - stringify!(policy) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_key_specifier { - pub type_: __u32, - pub __reserved: __u32, - pub u: fscrypt_key_specifier__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union fscrypt_key_specifier__bindgen_ty_1 { - pub __reserved: [__u8; 32usize], - pub descriptor: [__u8; 8usize], - pub identifier: [__u8; 16usize], -} -#[test] -fn bindgen_test_layout_fscrypt_key_specifier__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(fscrypt_key_specifier__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(fscrypt_key_specifier__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier__bindgen_ty_1), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).descriptor as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier__bindgen_ty_1), - "::", - stringify!(descriptor) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).identifier as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier__bindgen_ty_1), - "::", - stringify!(identifier) - ) - ); -} -#[test] -fn bindgen_test_layout_fscrypt_key_specifier() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(fscrypt_key_specifier)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_key_specifier)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).u as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_key_specifier), - "::", - stringify!(u) - ) - ); -} -#[repr(C)] -#[derive(Debug)] -pub struct fscrypt_provisioning_key_payload { - pub type_: __u32, - pub __reserved: __u32, - pub raw: __IncompleteArrayField<__u8>, -} -#[test] -fn bindgen_test_layout_fscrypt_provisioning_key_payload() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(fscrypt_provisioning_key_payload)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!( - "Alignment of ", - stringify!(fscrypt_provisioning_key_payload) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).type_ as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_provisioning_key_payload), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ - as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_provisioning_key_payload), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).raw as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_provisioning_key_payload), - "::", - stringify!(raw) - ) - ); -} -#[repr(C)] -pub struct fscrypt_add_key_arg { - pub key_spec: fscrypt_key_specifier, - pub raw_size: __u32, - pub key_id: __u32, - pub __reserved: [__u32; 8usize], - pub raw: __IncompleteArrayField<__u8>, -} -#[test] -fn bindgen_test_layout_fscrypt_add_key_arg() { - assert_eq!( - ::std::mem::size_of::(), - 80usize, - concat!("Size of: ", stringify!(fscrypt_add_key_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_add_key_arg)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(key_spec) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).raw_size as *const _ as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(raw_size) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).key_id as *const _ as usize }, - 44usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(key_id) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_add_key_arg), - "::", - stringify!(raw) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_remove_key_arg { - pub key_spec: fscrypt_key_specifier, - pub removal_status_flags: __u32, - pub __reserved: [__u32; 5usize], -} -#[test] -fn bindgen_test_layout_fscrypt_remove_key_arg() { - assert_eq!( - ::std::mem::size_of::(), - 64usize, - concat!("Size of: ", stringify!(fscrypt_remove_key_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_remove_key_arg)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_remove_key_arg), - "::", - stringify!(key_spec) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).removal_status_flags as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_remove_key_arg), - "::", - stringify!(removal_status_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_remove_key_arg), - "::", - stringify!(__reserved) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct fscrypt_get_key_status_arg { - pub key_spec: fscrypt_key_specifier, - pub __reserved: [__u32; 6usize], - pub status: __u32, - pub status_flags: __u32, - pub user_count: __u32, - pub __out_reserved: [__u32; 13usize], -} -#[test] -fn bindgen_test_layout_fscrypt_get_key_status_arg() { - assert_eq!( - ::std::mem::size_of::(), - 128usize, - concat!("Size of: ", stringify!(fscrypt_get_key_status_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fscrypt_get_key_status_arg)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).key_spec as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(key_spec) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__reserved as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(__reserved) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).status as *const _ as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(status) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).status_flags as *const _ as usize - }, - 68usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(status_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).user_count as *const _ as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(user_count) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__out_reserved as *const _ - as usize - }, - 76usize, - concat!( - "Offset of field: ", - stringify!(fscrypt_get_key_status_arg), - "::", - stringify!(__out_reserved) - ) - ); -} -pub const fsconfig_command_FSCONFIG_SET_FLAG: fsconfig_command = 0; -pub const fsconfig_command_FSCONFIG_SET_STRING: fsconfig_command = 1; -pub const fsconfig_command_FSCONFIG_SET_BINARY: fsconfig_command = 2; -pub const fsconfig_command_FSCONFIG_SET_PATH: fsconfig_command = 3; -pub const fsconfig_command_FSCONFIG_SET_PATH_EMPTY: fsconfig_command = 4; -pub const fsconfig_command_FSCONFIG_SET_FD: fsconfig_command = 5; -pub const fsconfig_command_FSCONFIG_CMD_CREATE: fsconfig_command = 6; -pub const fsconfig_command_FSCONFIG_CMD_RECONFIGURE: fsconfig_command = 7; -pub type fsconfig_command = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct mount_attr { - pub attr_set: __u64, - pub attr_clr: __u64, - pub propagation: __u64, - pub userns_fd: __u64, -} -#[test] -fn bindgen_test_layout_mount_attr() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(mount_attr)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(mount_attr)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).attr_set as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(attr_set) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).attr_clr as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(attr_clr) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).propagation as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(propagation) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).userns_fd as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(mount_attr), - "::", - stringify!(userns_fd) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct file_clone_range { - pub src_fd: __s64, - pub src_offset: __u64, - pub src_length: __u64, - pub dest_offset: __u64, -} -#[test] -fn bindgen_test_layout_file_clone_range() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(file_clone_range)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(file_clone_range)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_fd as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(src_fd) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(src_offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(src_length) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dest_offset as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(file_clone_range), - "::", - stringify!(dest_offset) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fstrim_range { - pub start: __u64, - pub len: __u64, - pub minlen: __u64, -} -#[test] -fn bindgen_test_layout_fstrim_range() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(fstrim_range)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(fstrim_range)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).start as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fstrim_range), - "::", - stringify!(start) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fstrim_range), - "::", - stringify!(len) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).minlen as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(fstrim_range), - "::", - stringify!(minlen) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct file_dedupe_range_info { - pub dest_fd: __s64, - pub dest_offset: __u64, - pub bytes_deduped: __u64, - pub status: __s32, - pub reserved: __u32, -} -#[test] -fn bindgen_test_layout_file_dedupe_range_info() { - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(file_dedupe_range_info)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(file_dedupe_range_info)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dest_fd as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(dest_fd) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).dest_offset as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(dest_offset) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).bytes_deduped as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(bytes_deduped) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).status as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(status) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).reserved as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range_info), - "::", - stringify!(reserved) - ) - ); -} -#[repr(C)] -#[derive(Debug)] -pub struct file_dedupe_range { - pub src_offset: __u64, - pub src_length: __u64, - pub dest_count: __u16, - pub reserved1: __u16, - pub reserved2: __u32, - pub info: __IncompleteArrayField, -} -#[test] -fn bindgen_test_layout_file_dedupe_range() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(file_dedupe_range)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(file_dedupe_range)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(src_offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(src_length) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dest_count as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(dest_count) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).reserved1 as *const _ as usize }, - 18usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(reserved1) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).reserved2 as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(reserved2) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).info as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(file_dedupe_range), - "::", - stringify!(info) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct files_stat_struct { - pub nr_files: ::std::os::raw::c_ulong, - pub nr_free_files: ::std::os::raw::c_ulong, - pub max_files: ::std::os::raw::c_ulong, -} -#[test] -fn bindgen_test_layout_files_stat_struct() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(files_stat_struct)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(files_stat_struct)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_files as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(files_stat_struct), - "::", - stringify!(nr_files) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_free_files as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(files_stat_struct), - "::", - stringify!(nr_free_files) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).max_files as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(files_stat_struct), - "::", - stringify!(max_files) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct inodes_stat_t { - pub nr_inodes: ::std::os::raw::c_long, - pub nr_unused: ::std::os::raw::c_long, - pub dummy: [::std::os::raw::c_long; 5usize], -} -#[test] -fn bindgen_test_layout_inodes_stat_t() { - assert_eq!( - ::std::mem::size_of::(), - 56usize, - concat!("Size of: ", stringify!(inodes_stat_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(inodes_stat_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_inodes as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(inodes_stat_t), - "::", - stringify!(nr_inodes) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).nr_unused as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(inodes_stat_t), - "::", - stringify!(nr_unused) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dummy as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(inodes_stat_t), - "::", - stringify!(dummy) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct fsxattr { - pub fsx_xflags: __u32, - pub fsx_extsize: __u32, - pub fsx_nextents: __u32, - pub fsx_projid: __u32, - pub fsx_cowextsize: __u32, - pub fsx_pad: [::std::os::raw::c_uchar; 8usize], -} -#[test] -fn bindgen_test_layout_fsxattr() { - assert_eq!( - ::std::mem::size_of::(), - 28usize, - concat!("Size of: ", stringify!(fsxattr)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(fsxattr)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_xflags as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_xflags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_extsize as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_extsize) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_nextents as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_nextents) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_projid as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_projid) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_cowextsize as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_cowextsize) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fsx_pad as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(fsxattr), - "::", - stringify!(fsx_pad) - ) - ); -} -pub type __kernel_rwf_t = ::std::os::raw::c_int; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct io_uring_sqe { - pub opcode: __u8, - pub flags: __u8, - pub ioprio: __u16, - pub fd: __s32, - pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_1, - pub __bindgen_anon_2: io_uring_sqe__bindgen_ty_2, - pub len: __u32, - pub __bindgen_anon_3: io_uring_sqe__bindgen_ty_3, - pub user_data: __u64, - pub __bindgen_anon_4: io_uring_sqe__bindgen_ty_4, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union io_uring_sqe__bindgen_ty_1 { - pub off: __u64, - pub addr2: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_sqe__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_1)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).off as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_1), - "::", - stringify!(off) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).addr2 as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_1), - "::", - stringify!(addr2) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union io_uring_sqe__bindgen_ty_2 { - pub addr: __u64, - pub splice_off_in: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_sqe__bindgen_ty_2() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_2)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_2)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).addr as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_2), - "::", - stringify!(addr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).splice_off_in as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_2), - "::", - stringify!(splice_off_in) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union io_uring_sqe__bindgen_ty_3 { - pub rw_flags: __kernel_rwf_t, - pub fsync_flags: __u32, - pub poll_events: __u16, - pub poll32_events: __u32, - pub sync_range_flags: __u32, - pub msg_flags: __u32, - pub timeout_flags: __u32, - pub accept_flags: __u32, - pub cancel_flags: __u32, - pub open_flags: __u32, - pub statx_flags: __u32, - pub fadvise_advice: __u32, - pub splice_flags: __u32, - pub rename_flags: __u32, - pub unlink_flags: __u32, -} -#[test] -fn bindgen_test_layout_io_uring_sqe__bindgen_ty_3() { - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_3)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_3)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).rw_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(rw_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).fsync_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(fsync_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).poll_events as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(poll_events) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).poll32_events as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(poll32_events) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sync_range_flags as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(sync_range_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).msg_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(msg_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).timeout_flags as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(timeout_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).accept_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(accept_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).cancel_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(cancel_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).open_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(open_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).statx_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(statx_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).fadvise_advice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(fadvise_advice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).splice_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(splice_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).rename_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(rename_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).unlink_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(unlink_flags) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union io_uring_sqe__bindgen_ty_4 { - pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_4__bindgen_ty_1, - pub __pad2: [__u64; 3usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct io_uring_sqe__bindgen_ty_4__bindgen_ty_1 { - pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1, - pub personality: __u16, - pub splice_fd_in: __s32, -} -#[repr(C, packed)] -#[derive(Copy, Clone)] -pub union io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 { - pub buf_index: __u16, - pub buf_group: __u16, -} -#[test] -fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 2usize, - concat!( - "Size of: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())) - .buf_index as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(buf_index) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())) - .buf_group as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(buf_group) - ) - ); -} -#[test] -fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!( - "Size of: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!( - "Alignment of ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).personality - as *const _ as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), - "::", - stringify!(personality) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).splice_fd_in - as *const _ as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), - "::", - stringify!(splice_fd_in) - ) - ); -} -#[test] -fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_4)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_4)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__pad2 as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4), - "::", - stringify!(__pad2) - ) - ); -} -#[test] -fn bindgen_test_layout_io_uring_sqe() { - assert_eq!( - ::std::mem::size_of::(), - 64usize, - concat!("Size of: ", stringify!(io_uring_sqe)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_sqe)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(opcode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ioprio as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(ioprio) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fd as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(fd) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(len) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(user_data) - ) - ); -} -pub const IOSQE_FIXED_FILE_BIT: ::std::os::raw::c_uint = 0; -pub const IOSQE_IO_DRAIN_BIT: ::std::os::raw::c_uint = 1; -pub const IOSQE_IO_LINK_BIT: ::std::os::raw::c_uint = 2; -pub const IOSQE_IO_HARDLINK_BIT: ::std::os::raw::c_uint = 3; -pub const IOSQE_ASYNC_BIT: ::std::os::raw::c_uint = 4; -pub const IOSQE_BUFFER_SELECT_BIT: ::std::os::raw::c_uint = 5; -pub type _bindgen_ty_1 = ::std::os::raw::c_uint; -pub const IORING_OP_NOP: ::std::os::raw::c_uint = 0; -pub const IORING_OP_READV: ::std::os::raw::c_uint = 1; -pub const IORING_OP_WRITEV: ::std::os::raw::c_uint = 2; -pub const IORING_OP_FSYNC: ::std::os::raw::c_uint = 3; -pub const IORING_OP_READ_FIXED: ::std::os::raw::c_uint = 4; -pub const IORING_OP_WRITE_FIXED: ::std::os::raw::c_uint = 5; -pub const IORING_OP_POLL_ADD: ::std::os::raw::c_uint = 6; -pub const IORING_OP_POLL_REMOVE: ::std::os::raw::c_uint = 7; -pub const IORING_OP_SYNC_FILE_RANGE: ::std::os::raw::c_uint = 8; -pub const IORING_OP_SENDMSG: ::std::os::raw::c_uint = 9; -pub const IORING_OP_RECVMSG: ::std::os::raw::c_uint = 10; -pub const IORING_OP_TIMEOUT: ::std::os::raw::c_uint = 11; -pub const IORING_OP_TIMEOUT_REMOVE: ::std::os::raw::c_uint = 12; -pub const IORING_OP_ACCEPT: ::std::os::raw::c_uint = 13; -pub const IORING_OP_ASYNC_CANCEL: ::std::os::raw::c_uint = 14; -pub const IORING_OP_LINK_TIMEOUT: ::std::os::raw::c_uint = 15; -pub const IORING_OP_CONNECT: ::std::os::raw::c_uint = 16; -pub const IORING_OP_FALLOCATE: ::std::os::raw::c_uint = 17; -pub const IORING_OP_OPENAT: ::std::os::raw::c_uint = 18; -pub const IORING_OP_CLOSE: ::std::os::raw::c_uint = 19; -pub const IORING_OP_FILES_UPDATE: ::std::os::raw::c_uint = 20; -pub const IORING_OP_STATX: ::std::os::raw::c_uint = 21; -pub const IORING_OP_READ: ::std::os::raw::c_uint = 22; -pub const IORING_OP_WRITE: ::std::os::raw::c_uint = 23; -pub const IORING_OP_FADVISE: ::std::os::raw::c_uint = 24; -pub const IORING_OP_MADVISE: ::std::os::raw::c_uint = 25; -pub const IORING_OP_SEND: ::std::os::raw::c_uint = 26; -pub const IORING_OP_RECV: ::std::os::raw::c_uint = 27; -pub const IORING_OP_OPENAT2: ::std::os::raw::c_uint = 28; -pub const IORING_OP_EPOLL_CTL: ::std::os::raw::c_uint = 29; -pub const IORING_OP_SPLICE: ::std::os::raw::c_uint = 30; -pub const IORING_OP_PROVIDE_BUFFERS: ::std::os::raw::c_uint = 31; -pub const IORING_OP_REMOVE_BUFFERS: ::std::os::raw::c_uint = 32; -pub const IORING_OP_TEE: ::std::os::raw::c_uint = 33; -pub const IORING_OP_SHUTDOWN: ::std::os::raw::c_uint = 34; -pub const IORING_OP_RENAMEAT: ::std::os::raw::c_uint = 35; -pub const IORING_OP_UNLINKAT: ::std::os::raw::c_uint = 36; -pub const IORING_OP_LAST: ::std::os::raw::c_uint = 37; -pub type _bindgen_ty_2 = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_cqe { - pub user_data: __u64, - pub res: __s32, - pub flags: __u32, -} -#[test] -fn bindgen_test_layout_io_uring_cqe() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_cqe)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_cqe)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_cqe), - "::", - stringify!(user_data) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).res as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_cqe), - "::", - stringify!(res) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_uring_cqe), - "::", - stringify!(flags) - ) - ); -} -pub const IORING_CQE_BUFFER_SHIFT: ::std::os::raw::c_uint = 16; -pub type _bindgen_ty_3 = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_sqring_offsets { - pub head: __u32, - pub tail: __u32, - pub ring_mask: __u32, - pub ring_entries: __u32, - pub flags: __u32, - pub dropped: __u32, - pub array: __u32, - pub resv1: __u32, - pub resv2: __u64, -} -#[test] -fn bindgen_test_layout_io_sqring_offsets() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(io_sqring_offsets)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_sqring_offsets)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(head) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(tail) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(ring_mask) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(ring_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dropped as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(dropped) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).array as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(array) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(resv1) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(resv2) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_cqring_offsets { - pub head: __u32, - pub tail: __u32, - pub ring_mask: __u32, - pub ring_entries: __u32, - pub overflow: __u32, - pub cqes: __u32, - pub flags: __u32, - pub resv1: __u32, - pub resv2: __u64, -} -#[test] -fn bindgen_test_layout_io_cqring_offsets() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(io_cqring_offsets)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_cqring_offsets)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(head) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(tail) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(ring_mask) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(ring_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).overflow as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(overflow) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).cqes as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(cqes) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(resv1) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(resv2) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_params { - pub sq_entries: __u32, - pub cq_entries: __u32, - pub flags: __u32, - pub sq_thread_cpu: __u32, - pub sq_thread_idle: __u32, - pub features: __u32, - pub wq_fd: __u32, - pub resv: [__u32; 3usize], - pub sq_off: io_sqring_offsets, - pub cq_off: io_cqring_offsets, -} -#[test] -fn bindgen_test_layout_io_uring_params() { - assert_eq!( - ::std::mem::size_of::(), - 120usize, - concat!("Size of: ", stringify!(io_uring_params)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_params)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_entries as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).cq_entries as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(cq_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_thread_cpu as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_thread_cpu) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_thread_idle as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_thread_idle) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).features as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(features) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).wq_fd as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(wq_fd) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_off as *const _ as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_off) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).cq_off as *const _ as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(cq_off) - ) - ); -} -pub const IORING_REGISTER_BUFFERS: ::std::os::raw::c_uint = 0; -pub const IORING_UNREGISTER_BUFFERS: ::std::os::raw::c_uint = 1; -pub const IORING_REGISTER_FILES: ::std::os::raw::c_uint = 2; -pub const IORING_UNREGISTER_FILES: ::std::os::raw::c_uint = 3; -pub const IORING_REGISTER_EVENTFD: ::std::os::raw::c_uint = 4; -pub const IORING_UNREGISTER_EVENTFD: ::std::os::raw::c_uint = 5; -pub const IORING_REGISTER_FILES_UPDATE: ::std::os::raw::c_uint = 6; -pub const IORING_REGISTER_EVENTFD_ASYNC: ::std::os::raw::c_uint = 7; -pub const IORING_REGISTER_PROBE: ::std::os::raw::c_uint = 8; -pub const IORING_REGISTER_PERSONALITY: ::std::os::raw::c_uint = 9; -pub const IORING_UNREGISTER_PERSONALITY: ::std::os::raw::c_uint = 10; -pub const IORING_REGISTER_RESTRICTIONS: ::std::os::raw::c_uint = 11; -pub const IORING_REGISTER_ENABLE_RINGS: ::std::os::raw::c_uint = 12; -pub const IORING_REGISTER_LAST: ::std::os::raw::c_uint = 13; -pub type _bindgen_ty_4 = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_files_update { - pub offset: __u32, - pub resv: __u32, - pub fds: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_files_update() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_files_update)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_files_update)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_files_update), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_files_update), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fds as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_files_update), - "::", - stringify!(fds) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_rsrc_update { - pub offset: __u32, - pub resv: __u32, - pub data: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_rsrc_update() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_rsrc_update)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_rsrc_update)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_rsrc_update), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_rsrc_update), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).data as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_rsrc_update), - "::", - stringify!(data) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_probe_op { - pub op: __u8, - pub resv: __u8, - pub flags: __u16, - pub resv2: __u32, -} -#[test] -fn bindgen_test_layout_io_uring_probe_op() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(io_uring_probe_op)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_probe_op)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).op as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(op) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe_op), - "::", - stringify!(resv2) - ) - ); -} -#[repr(C)] -#[derive(Debug)] -pub struct io_uring_probe { - pub last_op: __u8, - pub ops_len: __u8, - pub resv: __u16, - pub resv2: [__u32; 3usize], - pub ops: __IncompleteArrayField, -} -#[test] -fn bindgen_test_layout_io_uring_probe() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_probe)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_probe)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).last_op as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(last_op) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ops_len as *const _ as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(ops_len) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(resv2) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ops as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_uring_probe), - "::", - stringify!(ops) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct io_uring_restriction { - pub opcode: __u16, - pub __bindgen_anon_1: io_uring_restriction__bindgen_ty_1, - pub resv: __u8, - pub resv2: [__u32; 3usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union io_uring_restriction__bindgen_ty_1 { - pub register_op: __u8, - pub sqe_op: __u8, - pub sqe_flags: __u8, -} -#[test] -fn bindgen_test_layout_io_uring_restriction__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 1usize, - concat!("Size of: ", stringify!(io_uring_restriction__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(io_uring_restriction__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).register_op as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction__bindgen_ty_1), - "::", - stringify!(register_op) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sqe_op as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction__bindgen_ty_1), - "::", - stringify!(sqe_op) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sqe_flags as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction__bindgen_ty_1), - "::", - stringify!(sqe_flags) - ) - ); -} -#[test] -fn bindgen_test_layout_io_uring_restriction() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_restriction)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_restriction)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction), - "::", - stringify!(opcode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_restriction), - "::", - stringify!(resv2) - ) - ); -} -pub const IORING_RESTRICTION_REGISTER_OP: ::std::os::raw::c_uint = 0; -pub const IORING_RESTRICTION_SQE_OP: ::std::os::raw::c_uint = 1; -pub const IORING_RESTRICTION_SQE_FLAGS_ALLOWED: ::std::os::raw::c_uint = 2; -pub const IORING_RESTRICTION_SQE_FLAGS_REQUIRED: ::std::os::raw::c_uint = 3; -pub const IORING_RESTRICTION_LAST: ::std::os::raw::c_uint = 4; -pub type _bindgen_ty_5 = ::std::os::raw::c_uint; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct io_uring_getevents_arg { - pub sigmask: __u64, - pub sigmask_sz: __u32, - pub pad: __u32, - pub ts: __u64, -} -#[test] -fn bindgen_test_layout_io_uring_getevents_arg() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(io_uring_getevents_arg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_getevents_arg)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sigmask as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(sigmask) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sigmask_sz as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(sigmask_sz) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).pad as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(pad) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ts as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_uring_getevents_arg), - "::", - stringify!(ts) - ) - ); -} diff --git a/runtime/asyncio/io_uring.h b/runtime/asyncio/io_uring.h deleted file mode 100644 index 8a63099..0000000 --- a/runtime/asyncio/io_uring.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/runtime/asyncio/src/cancellation.rs b/runtime/asyncio/src/cancellation.rs deleted file mode 100644 index 36614d5..0000000 --- a/runtime/asyncio/src/cancellation.rs +++ /dev/null @@ -1,149 +0,0 @@ -use std::ptr; -use std::any::Any; -use std::ffi::CString; -use ptr_meta::DynMetadata; - -/// Cancellation callback to clean up I/O resources -/// -/// This allows IO actions to properly cancel and have their resources cleaned up without having to -/// worry about the current state of the io_uring queues. -pub struct Cancellation { - data: *mut (), - metadata: usize, - drop: unsafe fn (*mut (), usize), -} - -pub unsafe trait Cancel { - fn into_raw(self) -> (*mut (), usize); - unsafe fn drop_raw(ptr: *mut (), metadata: usize); -} - -pub unsafe trait CancelNarrow { - fn into_narrow_raw(self) -> *mut (); - unsafe fn drop_narrow_raw(ptr: *mut ()); -} - -unsafe impl Cancel for T { - fn into_raw(self) -> (*mut (), usize) { - (T::into_narrow_raw(self), 0) - } - - unsafe fn drop_raw(ptr: *mut (), _: usize) { - T::drop_narrow_raw(ptr) - } -} - -unsafe impl CancelNarrow for Box { - fn into_narrow_raw(self) -> *mut () { - Box::into_raw(self) as *mut () - } - - unsafe fn drop_narrow_raw(ptr: *mut ()) { - drop(Box::from_raw(ptr)) - } -} - -unsafe impl Cancel for Box<[T]> { - fn into_raw(self) -> (*mut (), usize) { - let len = self.len(); - (Box::into_raw(self) as *mut (), len) - } - - unsafe fn drop_raw(ptr: *mut (), metadata: usize) { - drop(Vec::from_raw_parts(ptr, metadata, metadata)) - } -} - -// Cancel impl for panics -unsafe impl Cancel for Box { - fn into_raw(self) -> (*mut (), usize) { - let ptr = Box::into_raw(self); - let metadata = ptr_meta::metadata(ptr as *mut dyn Any); - let metadata = unsafe { - // SAFETY: None. I happen to know that metadata is always exactly `usize`-sized for this - // type but only `std` can guarantee it. - std::mem::transmute::<_, usize>(metadata) - }; - (ptr as *mut(), metadata) - } - - unsafe fn drop_raw(ptr: *mut (), metadata: usize) { - let boxed: Box = unsafe { - let metadata = - // SAFETY: We did it the other way around so this is safe if the previous step was. - std::mem::transmute::<_, DynMetadata>(metadata); - - // We can then (safely) construct a fat pointer from the metadata and data address - let ptr = ptr_meta::from_raw_parts_mut(ptr, metadata); - - // SAFETY: We know the pointer is valid since `Self::into_raw` took ownership and the - // vtable was extracted from this known good reference. - Box::from_raw(ptr) - }; - drop(boxed) - } -} - -unsafe impl CancelNarrow for CString { - fn into_narrow_raw(self) -> *mut () { - self.into_raw() as *mut () - } - - unsafe fn drop_narrow_raw(ptr: *mut ()) { - drop(CString::from_raw(ptr as *mut libc::c_char)); - } -} - -unsafe impl CancelNarrow for () { - fn into_narrow_raw(self) -> *mut () { - ptr::null_mut() - } - - unsafe fn drop_narrow_raw(_: *mut ()) {} -} - -unsafe impl Cancel for (T, F) - where T: CancelNarrow, - F: CancelNarrow, -{ - fn into_raw(self) -> (*mut (), usize) { - let (t, f) = self; - let (t, _) = t.into_raw(); - let (f, _) = f.into_raw(); - (t, f as usize) - } - - unsafe fn drop_raw(t: *mut (), f: usize) { - T::drop_raw(t, 0); - F::drop_raw(f as *mut (), 0); - } -} - -impl Cancellation { - pub fn new(cancel: T) -> Self { - let (data, metadata) = cancel.into_raw(); - Self { data, metadata, drop: T::drop_raw } - } -} - -impl Drop for Cancellation { - fn drop(&mut self) { - unsafe { - (self.drop)(self.data, self.metadata) - } - } -} - -impl From for Cancellation { - fn from(cancel: T) -> Self { - Cancellation::new(cancel) - } -} - -impl From> for Cancellation - where Cancellation: From -{ - fn from(option: Option) -> Self { - option.map_or(Cancellation::new(()), Cancellation::from) - } -} diff --git a/runtime/asyncio/src/completion.rs b/runtime/asyncio/src/completion.rs deleted file mode 100644 index 637b3f7..0000000 --- a/runtime/asyncio/src/completion.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::cell::Cell; -use std::io; -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::task::Waker; -use crate::cancellation::Cancellation; - -// TODO: Completions for linked requests? How would you handle having multiple results? In one -// Completion struct or using multiple? If the latter, prepare needs to set user_data -// for all intermediary SQE explicitly. -pub struct Completion { - state: ManuallyDrop>>, -} - -enum State { - Submitted(Waker), - Completed(io::Result), - Cancelled(Cancellation), - Empty, -} - -impl Completion { - pub fn new(waker: Waker) -> Self { - Self { - state: ManuallyDrop::new(Box::new(Cell::new(State::Submitted(waker)))), - } - } - - pub(crate) unsafe fn from_raw(ptr: u64) -> Self { - let ptr = ptr as usize as *mut Cell; - let state = ManuallyDrop::new(Box::from_raw(ptr)); - Self { - state, - } - } - - pub fn addr(&self) -> u64 { - self.state.as_ptr() as *const _ as usize as u64 - } - - pub fn check(self, waker: &Waker) -> Result, Self> { - match self.state.replace(State::Empty) { - State::Submitted(old_waker) => { - // If the given waker wakes a different task than the one we were constructed - // with we must replace our waker. - if !old_waker.will_wake(waker) { - self.state.replace(State::Submitted(waker.clone())); - } else { - self.state.replace(State::Submitted(old_waker)); - } - Err(self) - }, - State::Completed(result) => { - Ok(result) - }, - _ => unreachable!(), - } - } - - pub fn cancel(self, callback: Cancellation) { - match self.state.replace(State::Cancelled(callback)) { - State::Completed(_) => { - drop(self.state); - }, - State::Submitted(_) => { - }, - _ => unreachable!(), - } - } - - pub fn complete(self, result: io::Result) { - match self.state.replace(State::Completed(result)) { - State::Submitted(waker) => { - waker.wake(); - }, - State::Cancelled(callback) => { - drop(callback); - }, - _ => unreachable!(), - } - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/cq.rs b/runtime/asyncio/src/cq.rs deleted file mode 100644 index 1092b3c..0000000 --- a/runtime/asyncio/src/cq.rs +++ /dev/null @@ -1,300 +0,0 @@ -use std::cell::UnsafeCell; -use std::os::unix::prelude::RawFd; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::{AtomicU32, compiler_fence, Ordering}; -use std::task::{Context, Poll, Waker}; -use crossbeam_queue::SegQueue; -use nix::sys::mman::munmap; -use crate::completion::Completion; -use crate::cqe::CQE; -use crate::ctypes::{CQOffsets, IORING_CQ}; - -#[derive(Debug)] -pub struct CQ { - /// Head of the completion queue. Moved by the program to indicate that it has consumed - /// completions. - /// - /// While it's important that the kernel sees the same value as the userspace program the - /// main problem that can happen otherwise is that the kernel assumes it lost completions - /// which we already successfully pulled from the queue. - head: &'static AtomicU32, - - /// Tail of the completion queue. Moved by the kernel when new completions are stored. - /// - /// Since this is modified by the kernel we should use atomic operations to read it, making - /// sure both the kernel and any program have a consistent view of its contents. - tail: &'static AtomicU32, - - /// A cached version of `tail` which additionally counts reserved slots for future - /// completions, i.e. slots that the kernel will fill in the future. - predicted_tail: UnsafeCell, - - ring_mask: u32, - num_entries: u32, - flags: &'static AtomicU32, - entries: &'static [CQE], - - waiters: SegQueue, - - // cq_ptr is set to `None` if we used a single mmap for both SQ and CQ. - cq_ptr: *mut libc::c_void, - cq_map_size: usize, -} - -impl Drop for CQ { - fn drop(&mut self) { - if !self.cq_ptr.is_null() { - unsafe { munmap(self.cq_ptr, self.cq_map_size) }; - } - } -} - -impl CQ { - pub unsafe fn new(ptr: *mut libc::c_void, - offs: CQOffsets, - cq_entries: u32, - split_mmap: bool, - cq_map_size: usize, - ) -> Self { - // Sanity check the pointer and offsets. If these fail we were probably passed an - // offsets from an uninitialized parameter struct. - assert!(!ptr.is_null()); - assert_ne!(offs.head, offs.tail); - - // Eagerly extract static values. Since they won't ever change again there's no reason to - // not read them now. - let ring_mask = *(ptr.offset(offs.ring_mask as isize).cast()); - let num_entries = *(ptr.offset(offs.ring_entries as isize).cast()); - - let head: &AtomicU32 = &*(ptr.offset(offs.head as isize).cast()); - let tail: &AtomicU32 = &*(ptr.offset(offs.tail as isize).cast()); - let predicted_tail = UnsafeCell::new(head.load(Ordering::Acquire)); - let flags: &AtomicU32 = &*(ptr.offset(offs.flags as isize).cast()); - let entries = std::slice::from_raw_parts( - ptr.offset(offs.cqes as isize).cast(), - cq_entries as usize - ); - - Self { - head, - predicted_tail, - tail, - ring_mask, - num_entries, - flags, - - entries, - - waiters: SegQueue::new(), - - // Only store a pointer if we used a separate mmap() syscall for the CQ - cq_ptr: if split_mmap { ptr } else { std::ptr::null_mut() }, - cq_map_size, - } - } - - #[inline(always)] - fn predicted_tail(&self) -> &mut u32 { - unsafe { &mut *self.predicted_tail.get() } - } - - #[inline(always)] - /// Currently used + reserved slots - pub fn used(&self) -> u32 { - let tail = *self.predicted_tail(); - let head = self.head.load(Ordering::Relaxed); - compiler_fence(Ordering::Acquire); - tail.wrapping_sub(head) - } - - #[inline(always)] - /// Amount of available slots taking reservations into account. - pub fn available(&self) -> u32 { - self.num_entries - self.used() - } - - /// Try to reserve a number of CQ slots to make sure that - pub fn try_reserve(&self, count: u32) -> bool { - if self.available() >= count { - let tail = self.predicted_tail(); - *tail = (*tail).wrapping_add(count); - true - } else { - false - } - } - - pub fn poll_reserve(self: Pin<&mut Self>, ctx: &mut Context<'_>, count: u32) -> Poll<()> { - if self.available() >= count { - Poll::Ready(()) - } else { - self.waiters.push(ctx.waker().clone()); - Poll::Pending - } - } - - pub fn get_next(&self) -> Option<&CQE> { - let tail = self.tail.load(Ordering::Relaxed); - let head = self.head.load(Ordering::Relaxed); - if tail == head { - None - } else { - compiler_fence(Ordering::Acquire); - self.head.fetch_add(1, Ordering::Release); - let index = (head & self.ring_mask) as usize; - Some(&self.entries[index]) - } - } - - pub fn ready(&self) -> u32 { - let tail = self.tail.load(Ordering::Relaxed); - let head = self.head.load(Ordering::Relaxed); - compiler_fence(Ordering::Acquire); - tail.wrapping_sub(head) - } - - pub fn handle(&self, handler: impl Fn(&CQE)) { - let tail = self.tail.load(Ordering::Relaxed); - let head = self.head.load(Ordering::Relaxed); - - for i in head..tail { - let index = (i & self.ring_mask) as usize; - let cqe = &self.entries[index]; - handler(cqe); - } - - compiler_fence(Ordering::Acquire); - self.head.store(tail, Ordering::Release); - } - - #[cfg(test)] - fn test_insert_cqe(&self, cqe: impl Iterator) { - let head = self.head.load(Ordering::Relaxed); - let mut tail = self.tail.load(Ordering::Acquire); - unsafe { - for entry in cqe { - let index = (tail & self.ring_mask) as usize; - // Yes, this is absolutely not safe or defined behaviour in the first place. This - // function must *never* be used outside simple testing setups. - let ptr = &self.entries[index] as *const _ as *mut CQE; - ptr.write(entry); - tail += 1; - - // If we would overflow, crash instead - assert!((tail - head) <= self.num_entries, "test_insert_cqe overflowed the buffer"); - } - } - self.tail.store(tail, Ordering::Release); - } -} - -mod tests { - use std::sync::atomic::AtomicU64; - use super::*; - - fn gen_cq(num_entries: u32) -> CQ { - let head = Box::leak(Box::new(AtomicU32::new(0))); - let tail = Box::leak(Box::new(AtomicU32::new(0))); - let flags = Box::leak(Box::new(AtomicU32::new(0))); - let entries = Box::leak((0..num_entries).map(|_| CQE::default()).collect()); - - CQ { - head, - tail, - predicted_tail: UnsafeCell::new(0), - ring_mask: num_entries - 1, - num_entries, - flags, - entries, - cq_ptr: std::ptr::null_mut(), - cq_map_size: 0, - waiters: SegQueue::new(), - } - } - - #[test] - fn test_test_insert_cqe() { - let cq = gen_cq(4); - cq.test_insert_cqe([ - CQE { - user_data: 1, - .. Default::default() - }, - CQE { - user_data: 2, - .. Default::default() - }, - CQE { - user_data: 3, - .. Default::default() - }, - CQE { - user_data: 4, - .. Default::default() - }, - ].into_iter()); - println!("{:?}", cq.entries); - for i in 0..4 { - assert_eq!(cq.entries[i].user_data, (i+1) as u64); - } - } - - #[test] - #[should_panic] - fn test_test_insert_cqe_overflow() { - let cq = gen_cq(2); - cq.test_insert_cqe([ - CQE { - user_data: 1, - .. Default::default() - }, - CQE { - user_data: 2, - .. Default::default() - }, - CQE { - user_data: 3, - .. Default::default() - }, - CQE { - user_data: 4, - .. Default::default() - }, - ].into_iter()); - println!("{:?}", cq.entries); - } - - #[test] - fn test_cq_reserve_insert() { - let cq = gen_cq(4); - assert_eq!(cq.tail.load(Ordering::Relaxed), 0); - assert_eq!(cq.head.load(Ordering::Relaxed), 0); - assert_eq!(*cq.predicted_tail(), 0); - - cq.try_reserve(2); - assert_eq!(cq.tail.load(Ordering::Relaxed), 0); - assert_eq!(*cq.predicted_tail(), 2); - - cq.test_insert_cqe([ - CQE { - user_data: 1, - .. Default::default() - }, - CQE { - user_data: 2, - .. Default::default() - }, - ].into_iter()); - - assert_eq!(cq.head.load(Ordering::Relaxed), 0); - assert_eq!(cq.tail.load(Ordering::Relaxed), 2); - assert_eq!(*cq.predicted_tail(), 2); - - let mut o = AtomicU64::new(1); - cq.handle(|cqe| { - assert_eq!(cqe.user_data, o.fetch_add(1, Ordering::Relaxed)) - }); - assert_eq!(o.load(Ordering::Relaxed), 3); - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/cqe.rs b/runtime/asyncio/src/cqe.rs deleted file mode 100644 index f5c3e70..0000000 --- a/runtime/asyncio/src/cqe.rs +++ /dev/null @@ -1,137 +0,0 @@ -use std::io; -use std::ptr::NonNull; -use std::sync::atomic::Ordering; -use crate::cq::CQ; -use crate::io_uring::{IoUring}; - -#[repr(C)] -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -/// Completion Queue Event -pub struct CQE { - pub user_data: u64, - pub(crate) res: i32, - pub flags: IOCQE, -} - -impl CQE { - pub fn raw_result(&self) -> i32 { - self.res - } - - pub fn result(&self) -> io::Result { - if self.res < 0 { - let err = io::Error::from_raw_os_error(-self.res); - Err(err) - } else { - Ok(self.res) - } - } -} - -pub struct CQEs<'a> { - cq: &'a CQ, - ready: u32, -} - -impl<'a> CQEs<'a> { - pub fn new(cq: &'a CQ) -> Self { - Self { cq, ready: 0 } - } - - fn get(&mut self) -> Option { - self.cq.get_next().map(|cqe| *cqe) - } - - fn ready(&mut self) -> u32 { - self.cq.ready() - } -} - -impl<'a> Iterator for CQEs<'a> { - type Item = CQE; - - fn next(&mut self) -> Option { - if self.ready == 0 { - self.ready = self.ready(); - if self.ready == 0 { - return None; - } - } - - self.ready -= 1; - self.get() - } -} - -bitflags::bitflags! { - #[derive(Default)] - #[repr(C)] - pub struct IOCQE: u32 { - const F_BUFFER = 1; - const F_MORE = 1 << 1; - } -} -static_assertions::assert_eq_size!(u32, IOCQE); - -mod tests { - use super::*; - - #[test] - fn test_result_into_std() { - let cqe = CQE { res: 0, .. Default::default() }; - assert_eq!(cqe.result().unwrap(), 0); - let cqe = CQE { res: 42567, .. Default::default() }; - assert_eq!(cqe.result().unwrap(), 42567); - - let cqe = CQE { res: -32, .. Default::default() }; - assert_eq!(cqe.result().unwrap_err().kind(), io::ErrorKind::BrokenPipe); - - let cqe = CQE { res: -2, .. Default::default() }; - assert_eq!(cqe.result().unwrap_err().kind(), io::ErrorKind::NotFound); - } - - #[test] - fn test_layout_io_uring_cqe() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(io_uring_cqe)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_cqe)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_cqe), - "::", - stringify!(user_data) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).res as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_cqe), - "::", - stringify!(res) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_uring_cqe), - "::", - stringify!(flags) - ) - ); - } - -} \ No newline at end of file diff --git a/runtime/asyncio/src/ctypes.rs b/runtime/asyncio/src/ctypes.rs deleted file mode 100644 index 547dd75..0000000 --- a/runtime/asyncio/src/ctypes.rs +++ /dev/null @@ -1,1164 +0,0 @@ -#![allow(non_camel_case_types)] - -// Generated using bindgen-0.59.1 and then cleaned up by hand - -use std::fmt::{Debug, Formatter}; -use std::os::unix::prelude::RawFd; -use libc::{c_ulong, c_long, c_uint, c_int}; - -#[repr(C)] -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -/// Parameters for the io_uring_setup syscall. -/// -/// Except for `flags`, `sq_thread_cpu`, `sq_thread_idle` and `wq_fd` this is filled entirely by -/// the kernel. -pub struct Params { - /// Number of entries in the submission queue - pub sq_entries: u32, - /// Number of entries in the completion queue - pub cq_entries: u32, - /// Setup Flags passed to the kernel - pub flags: IORING_SETUP, - /// If `!= 0` this will pin the kernel thread for submission queue polling to a given CPU - pub sq_thread_cpu: u32, - /// Timeout for the submission queue polling kernel thread - pub sq_thread_idle: u32, - /// Bitflags of features available in the current context (i.e. as that uid with that kernel) - pub features: IORING_FEAT, - /// file descriptor for a previous io_uring instance to share kernel async backend. To use - /// this you also need to set [`IORING_SETUP::ATTACH_WQ`]. - pub wq_fd: u32, - - // reserved - _resv: [u32; 3], - - /// Submission Queue offsets - pub sq_off: SQOffsets, - /// Completion Queue offsets - pub cq_off: CQOffsets, -} - -impl Params { - pub fn new(flags: IORING_SETUP) -> Self - { - Self { - flags, - .. Default::default() - } - } -} - -#[repr(C)] -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -/// Submission Queue offsets -/// -/// These are offsets (on top of [`IORING_OFF_SQ_RING`]) into the fd returned by `io_uring_setup` -/// at which relevant parts of information are stored. io_uring assumes this file to be mmap()ed -/// into the process memory space, thus allowing communication with the kernel using this shared -/// memory. -pub struct SQOffsets { - pub head: u32, - pub tail: u32, - pub ring_mask: u32, - pub ring_entries: u32, - pub flags: u32, - pub dropped: u32, - pub array: u32, - pub resv1: u32, - pub resv2: u64, -} - -#[repr(C)] -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -/// Completion Queue offsets -/// -/// These are offsets (on top of [`IORING_OFF_SQ_RING`]) into the fd returned by `io_uring_setup` -/// at which relevant parts of information are stored. io_uring assumes this file to be mmap()ed -/// into the process memory space, thus allowing communication with the kernel using this shared -/// memory. -pub struct CQOffsets { - pub head: u32, - pub tail: u32, - pub ring_mask: u32, - pub ring_entries: u32, - pub overflow: u32, - pub cqes: u32, - pub flags: u32, - pub resv1: u32, - pub resv2: u64, -} - -#[repr(C)] -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -/// Submission Queue Event -/// -/// This struct describes the I/O action that the kernel should execute on the programs behalf. -/// Every SQE will generate a [`CQE`] reply on the completion queue when the action has been -/// completed (successfully or not) which will contain the same `user_data` value. Usually -/// `user_data` is set to a pointer value, e.g. to a [`std::task::Waker`] allowing a task -/// blocking on this I/O action to be woken up. -pub struct io_uring_sqe { - /// Type of operation for this SQE - pub opcode: IORING_OP, - pub flags: IOSQE, - pub ioprio: u16, - pub fd: RawFd, - pub offset: u64, - pub address: u64, - pub len: i32, - pub op_flags: SQEOpFlags, - pub user_data: u64, - pub personality: pers_buf_pad, -} - -#[repr(C)] -#[derive(Eq, Copy, Clone)] -pub union SQEOpFlags { - pub rw_flags: c_int, - pub fsync_flags: FSYNC_FLAGS, - pub poll_events: u16, - pub poll32_events: u32, - pub sync_range_flags: u32, - pub msg_flags: u32, - pub timeout_flags: TIMEOUT_FLAGS, - pub accept_flags: u32, - pub cancel_flags: u32, - pub open_flags: u32, - pub statx_flags: u32, - pub fadvise_advice: u32, - pub splice_flags: u32, - pub rename_flags: u32, - pub unlink_flags: u32, -} -static_assertions::assert_eq_size!(u32, SQEOpFlags); - -impl PartialEq for SQEOpFlags { - fn eq(&self, other: &Self) -> bool { - unsafe { self.rw_flags == other.rw_flags } - } -} - -impl Default for SQEOpFlags { - fn default() -> Self { - Self { rw_flags: 0 } - } -} - -impl Debug for SQEOpFlags { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - unsafe { - f.debug_struct("union ioop_flags") - .field("raw", &self.rw_flags) - .field("fsync_flags", &self.fsync_flags) - .field("timeout_flags", &self.timeout_flags) - .finish() - } - } -} - -#[repr(C)] -#[derive(Eq, Copy, Clone)] -pub union pers_buf_pad { - pub personality: personality, - pub __pad2: [u64; 3], -} - -impl PartialEq for pers_buf_pad { - fn eq(&self, other: &Self) -> bool { - unsafe { self.personality == other.personality } - } -} - -impl Default for pers_buf_pad { - fn default() -> Self { - Self { personality: personality::default() } - } -} - -impl Debug for pers_buf_pad { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - unsafe { - f.debug_struct("union pers_buf_pad") - .field("personality", &self.personality) - .finish() - } - } -} - -#[repr(C)] -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub struct personality { - pub buffer: buffer_selection, - pub personality: u16, - pub splice_fd_in: i32, -} -#[repr(C, packed)] -#[derive(Eq, Copy, Clone)] -pub union buffer_selection { - pub buf_index: u16, - pub buf_group: u16, -} - -impl Debug for buffer_selection { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.write_str("personality_buffer_selection") - } -} - -impl PartialEq for buffer_selection { - fn eq(&self, other: &Self) -> bool { - unsafe { self.buf_index == other.buf_index } - } -} - -impl Default for buffer_selection { - fn default() -> Self { - Self { buf_index: 0 } - } -} - -bitflags::bitflags! { - #[derive(Default)] - #[repr(C)] - /// Available features - pub struct IORING_FEAT: u32 { - /// SQ, CQ and CQE can be mapped using a single mmap(), reducing the required mmap()s - /// from three to two. - const SINGLE_MMAP = 1; - const NODROP = 2; - const SUBMIT_STABLE = 4; - const RW_CUR_POS = 8; - const CUR_PERSONALITY = 16; - const FAST_POLL = 32; - const POLL_32BITS = 64; - const SQPOLL_NONFIXED = 128; - const EXT_ARG = 256; - const NATIVE_WORKERS = 512; - const RSRC_TAGS = 1024; - } - - #[derive(Default)] - #[repr(C)] - pub struct IORING_SETUP: u32 { - const IOPOLL = 1; - const SQPOLL = 2; - const SQ_AFF = 4; - const CQSIZE = 8; - const CLAMP = 16; - /// Attach to an existing io_uring async backend kernel-side. This allows sharing - /// resources while setting up several independent rings - const ATTACH_WQ = 32; - /// Disable the io_uring async backend on creation. This allows registering - /// resources but prevents submitting and polling - const R_DISABLED = 64; - } - - #[derive(Default)] - #[repr(C)] - pub struct IORING_SQ: u32 { - /// The Kernel Submission Queue thread was stopped and needs to be waked up again. - const NEED_WAKEUP = 1; - /// The Completion Queue as overflown and completions were dropped. - const CQ_OVERFLOW = 2; - } - - #[derive(Default)] - #[repr(C)] - pub struct IORING_CQ: u32 { - const EVENTFD_DISABLED = 1; - } - - #[derive(Default)] - #[repr(C)] - pub struct IORING_ENTER: u32 { - /// If this flag is set, then the system call will wait for the specified number of - /// events in `min_complete` before returning. This flag can be set along with `to_submit` - /// to both submit and complete events in a single system call. - const GETEVENTS = 1; - /// If the io_uring was created with [`IORING_SETUP::SQPOLL`] then this flag asks the kernel - /// to wake up the kernel SQ thread. - const SQ_WAKEUP = 2; - /// When the io_uring was created with [`IORING_SETUP::SQPOLL`] it's impossible to know - /// for an application when the kernel as consumed an SQ event. If this flag is set - /// io_uring_enter will block until at least one SQE was consumed and can be re-used. - const SQ_WAIT = 4; - /// Setting this flags allows passing extra arguments to recent enough kernel versions - /// (>= 5.11). - /// This allows passing arguments other than a [`libc::sigset_t`] to `io_uring_enter` - const EXT_ARG = 8; - } - - #[derive(Default)] - #[repr(C)] - pub struct IOSQE: u8 { - /// If set a passed `fd` is not a fd but instead an index into the array of file - /// descriptors registered using [`io_uring_register`](crate::syscall::io_uring_register). - const FIXED_FILE = 1 << 0; - - /// When this flag is specified, the SQE will not be started before previously submitted - /// `SQEs` have completed, and new `SQEs` will not be started before this one completes. - /// Available since 5.2. - const IO_DRAIN = 1 << 1; - - - /// When this flag is specified, it forms a link with the next [`SQE`] in the submission - /// ring. That next `SQE` will not be started before this one completes. This, in effect, - /// forms a chain of `SQEs`, which can be arbitrarily long. The tail of the chain is - /// denoted by the first `SQE` that does not have this flag set. This flag has no effect on - /// previous `SQE` submissions, nor does it impact `SQEs` that are outside of the chain - /// tail. This means that multiple chains can be executing in parallel, or chains and - /// individual `SQEs`. Only members inside the chain are serialized. A chain of `SQEs` will - /// be broken, if any request in that chain ends in error. `io_uring` considers any - /// unexpected result an error. This means that, eg., a short read will also terminate the - /// remainder of the chain. If a chain of `SQE` links is broken, the remaining unstarted - /// part of the chain will be terminated and completed with `-ECANCELED` as the error code. - /// Available since 5.3. - const IO_LINK = 1 << 2; - - /// Like [`IOSQE::IO_LINK`], but it doesn't sever regardless of the completion result. - /// Note that the link will still sever if we fail submitting the parent request, hard - /// links are only resilient in the presence of completion results for requests that did - /// submit correctly. `IOSQE::IO_HARDLINK` implies `IO_LINK`. Available since 5.5. - const IO_HARDLINK = 1 << 3; - - /// Normal operation for io_uring is to try and issue an sqe as non-blocking first, and if - /// that fails, execute it in an async manner. To support more efficient overlapped - /// operation of requests that the application knows/assumes will always (or most of the - /// time) block, the application can ask for an sqe to be issued async from the start. - /// Available since 5.6. - const ASYNC = 1 << 4; - - /// Used in conjunction with the [`IORING_OP::PROVIDE_BUFFERS`] command, which registers a - /// pool of buffers to be used by commands that read or receive data. When buffers are - /// registered for this use case, and this flag is set in the command, io_uring will grab a - /// buffer from this pool when the request is ready to receive or read data. If successful, - /// the resulting `CQE` will have [`IOCQE::F_BUFFER`] set in the flags part of the struct, - /// and the upper `IORING_CQE_BUFFER_SHIFT` bits will contain the ID of the selected - /// buffers. This allows the application to know exactly which buffer was selected for the - /// op‐ eration. If no buffers are available and this flag is set, then the request will - /// fail with `-ENOBUFS` as the error code. Once a buffer has been used, it is no longer - /// available in the kernel pool. The application must re-register the given buffer again - /// when it is ready to recycle it (eg has completed using it). Available since 5.7. - const BUFFER_SELECT = 1 << 5; - } - - #[derive(Default)] - #[repr(C)] - pub struct FSYNC_FLAGS: u32 { - const DATASYNC = 1; - } - - #[derive(Default)] - #[repr(C)] - pub struct TIMEOUT_FLAGS: u32 { - const ABS = 0; - const UPDATE = 1; - const BOOTTIME = 1 << 2; - const REALTIME = 1 << 3; - const LINK_UPDATE = 1 << 4; - const CLOCK_MASK = (Self::BOOTTIME.bits | Self::REALTIME.bits); - const UPDATE_MASK = (Self::UPDATE.bits | Self::LINK_UPDATE.bits); - } -} -static_assertions::assert_eq_size!(u32, IORING_FEAT); -static_assertions::assert_eq_size!(u32, IORING_SETUP); -static_assertions::assert_eq_size!(u32, IORING_SQ); -static_assertions::assert_eq_size!(u32, IORING_CQ); -static_assertions::assert_eq_size!(u32, IORING_ENTER); -static_assertions::assert_eq_size!(u8, IOSQE); - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -#[repr(u8)] -#[non_exhaustive] -pub enum IORING_OP { - NOP = 0, - READV = 1, - WRITEV = 2, - FSYNC = 3, - READ_FIXED = 4, - WRITE_FIXED = 5, - POLL_ADD = 6, - POLL_REMOVE = 7, - SYNC_FILE_RANGE = 8, - SENDMSG = 9, - RECVMSG = 10, - TIMEOUT = 11, - TIMEOUT_REMOVE = 12, - ACCEPT = 13, - ASYNC_CANCEL = 14, - LINK_TIMEOUT = 15, - CONNECT = 16, - FALLOCATE = 17, - OPENAT = 18, - CLOSE = 19, - FILES_UPDATE = 20, - STATX = 21, - READ = 22, - WRITE = 23, - FADVISE = 24, - MADVISE = 25, - SEND = 26, - RECV = 27, - OPENAT2 = 28, - EPOLL_CTL = 29, - SPLICE = 30, - PROVIDE_BUFFERS = 31, - REMOVE_BUFFERS = 32, - TEE = 33, - SHUTDOWN = 34, - RENAMEAT = 35, - UNLINKAT = 36, - MKDIRAT = 37, - SYMLINKAT = 38, - LINKAT = 39, - - LAST = 40, -} -static_assertions::assert_eq_size!(u8, IORING_OP); - -impl Default for IORING_OP { - fn default() -> Self { - IORING_OP::NOP - } -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -#[repr(u32)] -#[non_exhaustive] -pub enum IORING_REGISTER_OP { - REGISTER_BUFFERS = 0, - UNREGISTER_BUFFERS = 1, - REGISTER_FILES = 2, - UNREGISTER_FILES = 3, - REGISTER_EVENTFD = 4, - UNREGISTER_EVENTFD = 5, - REGISTER_FILES_UPDATE = 6, - REGISTER_EVENTFD_ASYNC = 7, - REGISTER_PROBE = 8, - REGISTER_PERSONALITY = 9, - UNREGISTER_PERSONALITY = 10, - REGISTER_RESTRICTIONS = 11, - REGISTER_ENABLE_RINGS = 12, - REGISTER_LAST = 13, -} -static_assertions::assert_eq_size!(u32, IORING_REGISTER_OP); - -pub const IORING_OFF_SQ_RING: u32 = 0; -pub const IORING_OFF_CQ_RING: u32 = 134217728; -pub const IORING_OFF_SQES: u32 = 268435456; - -mod tests { - use super::*; - - #[test] - fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 2usize, - concat!( - "Size of: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())) - .buf_index as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(buf_index) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())) - .buf_group as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(buf_group) - ) - ); - } - - #[test] - fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!( - "Size of: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!( - "Alignment of ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).personality - as *const _ as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), - "::", - stringify!(personality) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).splice_fd_in - as *const _ as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), - "::", - stringify!(splice_fd_in) - ) - ); - } - - #[test] - fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_4)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_4)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).__pad2 as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_4), - "::", - stringify!(__pad2) - ) - ); - } - - #[test] - fn bindgen_test_layout_io_uring_sqe() { - assert_eq!( - ::std::mem::size_of::(), - 64usize, - concat!("Size of: ", stringify!(io_uring_sqe)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_sqe)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(opcode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ioprio as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(ioprio) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).fd as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(fd) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(len) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe), - "::", - stringify!(user_data) - ) - ); - } - - #[test] - fn bindgen_test_layout_io_uring_sqe__bindgen_ty_3() { - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_3)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_3)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).rw_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(rw_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).fsync_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(fsync_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).poll_events as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(poll_events) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).poll32_events as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(poll32_events) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).sync_range_flags as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(sync_range_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).msg_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(msg_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).timeout_flags as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(timeout_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).accept_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(accept_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).cancel_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(cancel_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).open_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(open_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).statx_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(statx_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).fadvise_advice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(fadvise_advice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).splice_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(splice_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).rename_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(rename_flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).unlink_flags as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_sqe__bindgen_ty_3), - "::", - stringify!(unlink_flags) - ) - ); - } - - #[test] - fn bindgen_test_layout_io_sqring_offsets() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(io_sqring_offsets)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_sqring_offsets)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(head) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(tail) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(ring_mask) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(ring_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).dropped as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(dropped) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).array as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(array) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(resv1) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(io_sqring_offsets), - "::", - stringify!(resv2) - ) - ); - } - - #[test] - fn bindgen_test_layout_io_cqring_offsets() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(io_cqring_offsets)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_cqring_offsets)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(head) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(tail) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(ring_mask) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(ring_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).overflow as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(overflow) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).cqes as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(cqes) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(resv1) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(io_cqring_offsets), - "::", - stringify!(resv2) - ) - ); - } - #[test] - fn bindgen_test_layout_io_uring_params() { - assert_eq!( - ::std::mem::size_of::(), - 120usize, - concat!("Size of: ", stringify!(io_uring_params)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(io_uring_params)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_entries as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).cq_entries as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(cq_entries) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_thread_cpu as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_thread_cpu) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_thread_idle as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_thread_idle) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).features as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(features) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).wq_fd as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(wq_fd) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::()))._resv as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(resv) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).sq_off as *const _ as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(sq_off) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).cq_off as *const _ as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(io_uring_params), - "::", - stringify!(cq_off) - ) - ); - } - -} diff --git a/runtime/asyncio/src/fs.rs b/runtime/asyncio/src/fs.rs deleted file mode 100644 index 758d2e4..0000000 --- a/runtime/asyncio/src/fs.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::cell::Cell; -use std::io::IoSliceMut; -use std::os::unix::prelude::RawFd; -use std::pin::Pin; -use std::task::{Context, Poll}; -use futures_io::AsyncRead; -use crate::completion::Completion; -use crate::ctypes::IORING_OP; -use crate::io_uring::IoUring; -use crate::sqe::{SQE, SQEs}; -use crate::submission::Submission; - -pub struct File { - fd: RawFd, - submission: Submission, -} - -impl File { - pub fn new(fd: RawFd, io_uring: &'static IoUring) -> Self { - Self { fd, submission: Submission::new(io_uring) } - } - - fn prepare_read<'sq>( - fd: RawFd, - buf: &mut [u8], - sqes: &mut SQEs<'sq>, - ) -> SQE<'sq> - { - let mut sqe = sqes.next().expect("prepare_read requires at least one SQE"); - sqe.set_opcode(IORING_OP::READ); - sqe.set_address(buf.as_ptr() as u64); - sqe.set_fd(fd); - sqe.set_len(buf.len() as i32); - sqe - } -} - -impl AsyncRead for File { - fn poll_read(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, buf: &mut [u8]) - -> Poll> - { - let fd = self.fd; - Pin::new(&mut self.submission).poll(ctx, 1, |sqes| { - Self::prepare_read(fd, buf, sqes) - }).map(|res| res.map(|val| val as usize)) - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/io_uring.rs b/runtime/asyncio/src/io_uring.rs deleted file mode 100644 index 9a37ce0..0000000 --- a/runtime/asyncio/src/io_uring.rs +++ /dev/null @@ -1,168 +0,0 @@ -use std::fmt::{Debug, Formatter}; -use std::future::Future; -use std::io; -use std::marker::PhantomData; -use std::mem::{size_of, align_of}; -use std::ops::Deref; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::os::unix::prelude::RawFd; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::Arc; -use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -use nix::sys::{mman, mman::{MapFlags, ProtFlags}}; -use crate::completion::Completion; -use crate::cq::CQ; -use crate::cqe::{CQE, CQEs}; -use crate::ctypes::{CQOffsets, IORING_ENTER, SQOffsets}; -use crate::sq::SQ; -use crate::sqe::{SQE, SQEs}; -use super::ctypes::{Params, io_uring_sqe, IORING_CQ, IORING_FEAT, - IORING_OFF_CQ_RING, IORING_OFF_SQ_RING, IORING_OFF_SQES, IORING_SQ}; -use super::syscall; - -#[derive(Debug)] -pub struct IoUring { - fd: RawFd, - params: Params, - sq: SQ, - cq: CQ, -} - -unsafe fn mmap(map_size: usize, fd: RawFd, offset: i64) -> nix::Result<*mut libc::c_void> { - mman::mmap( - std::ptr::null_mut(), - map_size, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_SHARED | MapFlags::MAP_POPULATE, - fd, - offset - ) -} - -impl IoUring { - pub fn setup(entries: u32) -> io::Result { - let mut params = Params::default(); - let fd = syscall::setup(entries, &mut params)?; - - let mut sq_map_size = (params.sq_off.array as usize) + - (params.sq_entries as usize) * size_of::(); - let mut cq_map_size = (params.cq_off.cqes as usize) + - (params.cq_entries as usize) * size_of::(); - - // If we can use a single mmap() syscall to map sq, cq and cqe the size of the total map - // is the largest of `sq_map_size` and `cq_map_size`. - if params.features.contains(IORING_FEAT::SINGLE_MMAP) { - sq_map_size = sq_map_size.max(cq_map_size); - cq_map_size = sq_map_size; - } - - let sq_ptr = unsafe { - mmap(sq_map_size as usize, fd, IORING_OFF_SQ_RING as i64)? - }; - - let sqes_map_size = (params.sq_entries as usize) * size_of::(); - let sqes = unsafe { - let ptr = mmap(sqes_map_size, fd, IORING_OFF_SQES as i64)?.cast(); - std::slice::from_raw_parts_mut(ptr, params.sq_entries as usize) - }; - - let sq = unsafe { - SQ::new(sq_ptr, - params.sq_off, - sqes, - sq_map_size, - sqes_map_size - ) - }; - - let cq_ptr = if params.features.contains(IORING_FEAT::SINGLE_MMAP) { - sq_ptr - } else { - unsafe { - mmap(cq_map_size, fd, IORING_OFF_CQ_RING as i64)? - } - }; - let cq = unsafe { - CQ::new(cq_ptr, - params.cq_off, - params.cq_entries, - sq_ptr != cq_ptr, - cq_map_size, - ) - }; - - Ok(IoUring { - fd, - params, - sq, - cq, - }) - } - - pub fn try_prepare( - &self, - count: u32, - prepare: impl FnOnce(SQEs<'_>) - ) -> Option<()> { - self.handle_completions(); - if !self.cq.try_reserve(count) { - return None; - } - - if let Some(sqes) = self.sq.try_reserve(count) { - let start = sqes.start(); - prepare(sqes); - self.sq.prepare(start, count); - Some(()) - } else { - None - } - } - - pub fn poll_prepare<'cx>( - mut self: Pin<&mut Self>, - ctx: &mut Context<'cx>, - count: u32, - prepare: impl for<'sq> FnOnce(SQEs<'sq>, &mut Context<'cx>) -> Completion - ) -> Poll { - Pin::new(&mut self.sq).poll_prepare(ctx, count, prepare) - } - - pub fn poll_submit<'cx>( - mut self: Pin<&mut Self>, - ctx: &mut Context<'cx>, - head: u32, - ) -> Poll<()> { - let fd = self.fd; - Pin::new(&mut self.sq).poll_submit(ctx, fd, head) - } - - pub fn submit_wait(&self) -> io::Result { - self.sq.submit_wait(self.fd) - } - - pub fn handle_completions(&self) { - self.cq.handle(|cqe| { - let udata = cqe.user_data; - if udata != 0 { - let completion = unsafe { - Completion::from_raw(udata) - }; - completion.complete(cqe.result()) - } - }); - } -} - -impl Future for &IoUring { - type Output = io::Result<()>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - self.handle_completions(); - match self.sq.submit(self.fd, Some(cx.waker())) { - Ok(_) => Poll::Pending, - Err(e) => Poll::Ready(Err(e)), - } - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/lib.rs b/runtime/asyncio/src/lib.rs deleted file mode 100644 index 2cc161d..0000000 --- a/runtime/asyncio/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ - -// Raw typedefs and structs for kernel communication via syscalls -pub mod ctypes; -mod syscall; -pub mod io_uring; - -mod sq; -mod sqe; -mod cq; -mod cqe; - -mod submission; -mod completion; -mod cancellation; - -pub mod fs; - -#[macro_export] -macro_rules! ready { - ($e:expr $(,)?) => { - match $e { - std::task::Poll::Ready(t) => t, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} \ No newline at end of file diff --git a/runtime/asyncio/src/rtypes.rs b/runtime/asyncio/src/rtypes.rs deleted file mode 100644 index a67ee6b..0000000 --- a/runtime/asyncio/src/rtypes.rs +++ /dev/null @@ -1,5 +0,0 @@ - - -pub struct CQE { - -} \ No newline at end of file diff --git a/runtime/asyncio/src/sq.rs b/runtime/asyncio/src/sq.rs deleted file mode 100644 index 27c2474..0000000 --- a/runtime/asyncio/src/sq.rs +++ /dev/null @@ -1,529 +0,0 @@ -use std::cell::{Cell, UnsafeCell}; -use std::fmt::{Debug, Formatter}; -use std::io; -use std::mem::ManuallyDrop; -use std::os::unix::prelude::RawFd; -use std::pin::Pin; -use std::ptr::NonNull; -use std::sync::atomic::{AtomicU32, compiler_fence, fence, Ordering}; -use std::task::{Context, Poll, Waker}; -use crossbeam_queue::SegQueue; -use nix::sys::mman::munmap; -use crate::completion::Completion; -use crate::ctypes::{IORING_ENTER, IORING_SQ, io_uring_sqe, SQOffsets}; -use crate::sqe::{SQE, SQEs}; -use crate::syscall; - -pub struct SQ { - /// Head of the submission queue. This value is set by the kernel when it consumes SQE. - /// Thus we need to use atomic operations when passing information, making sure both the kernel - /// and program have a consistent view of its contents. - array_head: &'static AtomicU32, - - /// The head of the sqes buffer. This value is our local cache of `array_head` that's not - /// shared with or modified by the kernel. We use it to index the start of the prepared SQE. - /// This means that this value lags behind after `array_head`. - sqes_head: UnsafeCell, - - /// Tail of the submission queue. While this will be modified by the userspace program only, - /// the kernel uses atomic operations to read it so we want to use atomic operations to write - /// it. - array_tail: &'static AtomicU32, - // non-atomic cache of array_tail - cached_tail: UnsafeCell, - /// Tail of the sqes buffer. This value serves as our local cache of `array_tail` and, in - /// combination with `sqes_head` allows us to more efficiently submit SQE by skipping already - /// submitted ones. - /// `sqes_tail` marks the end of the prepared SQE. - sqes_tail: UnsafeCell, - - ring_mask: u32, - num_entries: u32, - - flags: &'static AtomicU32, - - dropped: &'static AtomicU32, - - array: &'static [AtomicU32], - sqes: &'static mut [UnsafeCell], - - sq_ptr: NonNull<()>, - sq_map_size: usize, - sqes_map_size: usize, - - /// Queue of tasks waiting for a submission, either because they need free slots or because - waiters: SegQueue, - submitter: Cell>, -} - -static_assertions::assert_not_impl_any!(SQ: Send, Sync); - -impl Drop for SQ { - fn drop(&mut self) { - unsafe { - munmap(self.sq_ptr.as_ptr().cast(), self.sq_map_size); - let sqes_ptr: *mut libc::c_void = self.sqes.as_mut_ptr().cast(); - munmap(sqes_ptr, self.sqes_map_size); - } - } -} - -impl Debug for SQ { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - unsafe { - // TODO: Complete - f.debug_struct("SQ") - .field("head", self.array_head) - .field("tail", self.array_tail) - .field("ring_mask", &self.ring_mask) - .field("num_entries", &self.num_entries) - .field("flags", self.flags) - .field("dropped", self.dropped) - .field("array", &self.array) - .finish() - } - } -} - -impl SQ { - pub unsafe fn new(ptr: *mut libc::c_void, - offs: SQOffsets, - sqes: &'static mut [UnsafeCell], - sq_map_size: usize, - sqes_map_size: usize, - ) -> Self { - // Sanity check the pointer and offsets. If these fail we were probably passed an - // offsets from an uninitialized parameter struct. - assert!(!ptr.is_null()); - assert_ne!(offs.head, offs.tail); - - // Eagerly extract static values. Since they won't ever change again there's no reason to - // not read them now. - let ring_mask = *(ptr.offset(offs.ring_mask as isize).cast()); - let num_entries = *(ptr.offset(offs.ring_entries as isize).cast()); - - // These are valid Rust references; they are valid for the entire lifetime of self, - // properly initialized by the kernel and well aligned. - let array_head: &AtomicU32 = &*(ptr.offset(offs.head as isize).cast()); - let sqes_head = UnsafeCell::new(array_head.load(Ordering::Acquire)); - let array_tail: &AtomicU32 = &*ptr.offset(offs.tail as isize).cast(); - let sqes_tail = UnsafeCell::new(array_tail.load(Ordering::Acquire)); - let cached_tail = UnsafeCell::new(array_tail.load(Ordering::Acquire)); - let flags = &*ptr.offset(offs.flags as isize).cast(); - let dropped = &*ptr.offset(offs.dropped as isize).cast(); - - let array = std::slice::from_raw_parts( - ptr.offset(offs.array as isize).cast(), - sqes.len() as usize, - ); - let sq_ptr = NonNull::new_unchecked(ptr).cast(); - - Self { - array_head, - sqes_head, - array_tail, - sqes_tail, - cached_tail, - ring_mask, - num_entries, - flags, - dropped, - array, - sqes, - sq_ptr, - sq_map_size, - sqes_map_size, - waiters: SegQueue::new(), - submitter: Cell::new(None), - } - } - - #[inline(always)] - fn sqes_head(&self) -> &mut u32 { - unsafe { &mut *self.sqes_head.get() } - } - - #[inline(always)] - fn sqes_tail(&self) -> &mut u32 { - unsafe { &mut *self.sqes_tail.get() } - } - - #[inline(always)] - fn cached_tail(&self) -> &mut u32 { - unsafe { &mut *self.cached_tail.get() } - } - - #[inline(always)] - fn increment_tail(&self, count: u32) -> u32 { - let tail = self.sqes_tail(); - let old = *tail; - *tail = (*tail).wrapping_add(count); - old - } - - #[inline(always)] - fn increment_head(&self, count: u32) -> u32{ - let head = self.sqes_head(); - let old = *head; - *head = (*head).wrapping_add(count); - old - } - - #[inline(always)] - fn used(&self) -> u32 { - (*self.sqes_tail()).wrapping_sub(*self.sqes_head()) - } - - #[inline(always)] - fn available(&self) -> u32 { - self.num_entries - self.used() - } - - #[inline(always)] - fn to_submit(&self) -> u32 { - let shared_tail = self.array_tail.load(Ordering::Relaxed); - let cached_tail = *self.cached_tail(); - cached_tail.wrapping_sub(shared_tail) - } - - pub fn submit_wait(&self, fd: RawFd) -> io::Result { - // Ensure that the writes into the array are not moved after the write of the tail. - // Otherwise kernelside may read completely wrong indices from array. - compiler_fence(Ordering::Release); - self.array_tail.store(*self.cached_tail(), Ordering::Release); - - let retval = syscall::enter( - fd, - self.num_entries, - 1, - IORING_ENTER::GETEVENTS, - std::ptr::null(), - 0, - )? as u32; - // Return SQE into circulation that we successfully submitted to the kernel. - self.increment_head(retval); - self.notify(); - Ok(retval) - } - - /// Submit all prepared entries to the kernel. This function will return the number of - /// entries successfully submitted to the kernel. - pub fn submit(&self, fd: RawFd, waker: Option<&Waker>) -> io::Result { - if let Some(waker) = waker { - let new = if let Some(old) = self.submitter.take() { - if old.will_wake(waker) { old } else { waker.clone() } - } else { - waker.clone() - }; - self.submitter.set(Some(new)); - } - - // Ensure that the writes into the array are not moved after the write of the tail. - // Otherwise kernelside may read completely wrong indices from array. - compiler_fence(Ordering::Release); - self.array_tail.store(*self.cached_tail(), Ordering::Release); - - let retval = syscall::enter( - fd, - self.num_entries, - 0, - IORING_ENTER::GETEVENTS, - std::ptr::null(), - 0, - )? as u32; - // Return SQE into circulation that we successfully submitted to the kernel. - self.increment_head(retval); - self.notify(); - Ok(retval) - } - - - /// Prepare actions for submission by shuffling them into the correct order. - /// - /// Kernelside `array` is used to index into the sqes, more specifically the code behaves - /// like this: - /// ```C - /// u32 mask = ctx->sq_entries - 1; - /// u32 sq_idx = ctx->cached_sq_head++ & mask; - /// u32 head = READ_ONCE(ctx->sq_array[sq_idx]); - /// if (likely(head < ctx->sq_entries)) - /// return &ctx->sq_sqes[head]; - /// ``` - /// Where `ctx->sq_entries` is the number of slots in the ring (i.e. simply a boundary check). - /// - /// So we need to make sure that for every new entry since we last submitted we have the - /// correct index set. In our case shuffle will map the next `count` entries in `self.array` to - /// point to `count` entries in `self.sqes` starting at `start`. This allows actions to be - /// submitted to the kernel even when there are still reserved SQE in between that weren't yet - /// filled. - pub fn prepare(&self, start: u32, count: u32) { - // Load the tail of the array (i.e. where we will start filling) - let tail = self.cached_tail(); - let mut head = start; - - for _ in 0..count { - let index = (*tail & self.ring_mask) as usize; - - // We can allow this store to be an Relaxed operation since updating the shared tail - // is done after a memory barrier. - self.array[index].store(head & self.ring_mask, Ordering::Relaxed); - - // Same here. We need to take the overflow into account but don't have to explicitly - // handle it. - head = head.wrapping_add(1); - *tail = (*tail).wrapping_add(1); - } - - // FIXME: This should really be done by epoll - if let Some(waker) = self.submitter.take() { - waker.wake_by_ref(); - self.submitter.set(Some(waker)); - } - } - - pub fn poll_prepare<'cx>( - self: Pin<&mut Self>, - ctx: &mut Context<'cx>, - count: u32, - prepare: impl for<'sq> FnOnce(SQEs<'sq>, &mut Context<'cx>) -> Completion - ) -> Poll { - if let Some(sqes) = self.try_reserve(count) { - let start = sqes.start(); - let completion = prepare(sqes, ctx); - self.prepare(start, count); - Poll::Ready(completion) - } else { - self.waiters.push(ctx.waker().clone()); - Poll::Pending - } - } - - /// Suggest to submit pending events to the kernel. Returns `Ready` when the relevant event - /// was submitted to the kernel, i.e. when kernelside `head` >= the given `head`. - pub fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>, fd: RawFd, head: u32) - -> Poll<()> - { - let shared_tail = self.array_tail.load(Ordering::Relaxed); - let cached_tail = *self.cached_tail(); - let to_submit = cached_tail.wrapping_sub(shared_tail); - - // TODO: Do some smart cookie thinking here and batch submissions in a sensible way - if to_submit > 4 { - self.submit(fd, None); - } - - if *self.sqes_head() < head { - self.waiters.push(ctx.waker().clone()); - Poll::Pending - } else { - Poll::Ready(()) - } - } - - pub fn notify(&self) { - if self.waiters.len() > 0 && self.available() > 0 { - while let Some(waker) = self.waiters.pop() { - waker.wake() - } - } - } - - pub fn try_reserve(&self, count: u32) -> Option> { - if self.available() >= count { - let start = self.increment_tail(count); - Some(SQEs::new(self.sqes, start, count)) - } else { - None - } - } -} - -mod tests { - use std::mem::ManuallyDrop; - use std::sync::atomic::Ordering::Relaxed; - use crate::ctypes::{IORING_OP, IOSQE}; - use super::*; - - fn gen_sq(num_entries: u32, head: u32, tail: u32) -> ManuallyDrop { - assert!((0 < num_entries && num_entries <= 4096), "entries must be between 1 and 4096"); - assert_eq!(num_entries.count_ones(), 1, "entries must be a power of two"); - - let array_head = Box::leak(Box::new(AtomicU32::new(head))); - let array_tail = Box::leak(Box::new(AtomicU32::new(tail))); - let flags = Box::leak(Box::new(AtomicU32::new(0))); - let dropped = Box::leak(Box::new(AtomicU32::new(0))); - let array = Box::leak((0..num_entries) - .map(|n| AtomicU32::new(n)) - .collect::>()); - let sqes = Box::leak((0..num_entries) - .map(|_| UnsafeCell::new(io_uring_sqe::default())) - .collect::>()); - - unsafe { - ManuallyDrop::new(SQ { - array_head, - sqes_head: UnsafeCell::new(head), - array_tail, - sqes_tail: UnsafeCell::new(tail), - cached_tail: UnsafeCell::new(0), - ring_mask: num_entries - 1, - num_entries, - flags, - dropped, - array, - sqes, - sq_ptr: NonNull::dangling(), - sq_map_size: 0, - sqes_map_size: 0, - waiters: SegQueue::new(), - submitter: Cell::new(None), - }) - } - } - - #[test] - fn test_head_tail() { - let mut sq = gen_sq(64, 30, 30); - assert_eq!(*sq.sqes_head(), 30); - assert_eq!(*sq.sqes_tail(), 30); - assert_eq!(sq.used(), 0); - assert_eq!(sq.available(), 64); - - sq.increment_tail(4); - assert_eq!(*sq.sqes_head(), 30); - assert_eq!(*sq.sqes_tail(), 34); - assert_eq!(sq.used(), 4); - assert_eq!(sq.available(), 60); - - sq.increment_head(2); - assert_eq!(*sq.sqes_head(), 32); - assert_eq!(*sq.sqes_tail(), 34); - assert_eq!(sq.used(), 2); - assert_eq!(sq.available(), 62); - } - - #[test] - fn test_sq_getter_setter() { - let mut sq = gen_sq(64, 30, 30); - assert_eq!(*sq.sqes_head(), 30); - assert_eq!(*sq.sqes_tail(), 30); - assert_eq!(sq.used(), 0); - assert_eq!(sq.available(), 64); - - { - let mut sqes = sq.try_reserve(2).unwrap(); - assert_eq!(sq.used(), 2); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::READV); - sqe.add_flags(IOSQE::IO_HARDLINK); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::WRITEV); - sqe.set_userdata(823); - } - assert_eq!(sq.used(), 2); - - { - let sqes = &mut sq.sqes; - assert_eq!(sqes[30].get_mut().opcode, IORING_OP::READV); - assert_eq!(sqes[30].get_mut().flags, IOSQE::IO_HARDLINK); - assert_eq!(sqes[31].get_mut().opcode, IORING_OP::WRITEV); - assert_eq!(sqes[31].get_mut().user_data, 823); - } - - - } - - #[test] - fn test_sq_full() { - let mut sq = gen_sq(64, 1, 65); - let sqe = sq.try_reserve(1); - assert!(sqe.is_none()); - } - - #[test] - fn test_out_of_order_submit() { - let mut sq = gen_sq(64, 0, 0); - - let start; - { - let mut sqes = sq.try_reserve(4).unwrap(); - start = sqes.start(); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::READV); - sqe.add_flags(IOSQE::IO_HARDLINK); - sqe.set_address(1); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::READV); - sqe.add_flags(IOSQE::IO_HARDLINK); - sqe.set_address(2); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::READV); - sqe.add_flags(IOSQE::IO_HARDLINK); - sqe.set_address(3); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::READV); - sqe.set_address(4); - sqe.set_userdata(823); - } - assert_eq!(sq.used(), 4); - - let start2; - { - let mut sqes = sq.try_reserve(4).unwrap(); - start2 = sqes.start(); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::WRITEV); - sqe.add_flags(IOSQE::IO_LINK); - sqe.set_address(1); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::WRITEV); - sqe.add_flags(IOSQE::IO_LINK); - sqe.set_address(2); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::WRITEV); - sqe.add_flags(IOSQE::IO_LINK); - sqe.set_address(3); - let mut sqe = sqes.next().unwrap(); - sqe.set_opcode(IORING_OP::WRITEV); - sqe.set_address(4); - sqe.set_userdata(0xDEADBEEF); - } - assert_eq!(sq.used(), 8); - - sq.prepare(start2, 4); - sq.prepare(start, 4); - - let sqes: Vec<_> = sq.sqes.iter_mut() - .map(|c| c.get_mut().clone()) - .collect(); - let mut out: Vec<_> = sq.array.iter().map(|n| { - let i = n.load(Relaxed) as usize; - sqes[i] - }).collect(); - - for (n, s) in out.iter().take(4).enumerate() { - assert_eq!(s.opcode, IORING_OP::WRITEV); - assert_eq!(s.address, n as u64 + 1); - if n == 3 { - assert_eq!(s.user_data, 0xDEADBEEF); - } else { - assert_eq!(s.flags, IOSQE::IO_LINK); - } - } - - for (n, s) in out.iter().skip(4).take(4).enumerate() { - assert_eq!(s.opcode, IORING_OP::READV); - assert_eq!(s.address, n as u64 + 1); - if n == 3 { - assert_eq!(s.user_data, 823); - } else { - assert_eq!(s.flags, IOSQE::IO_HARDLINK); - } - } - - let mut i = out.iter().skip(8); - while let Some(sqe) = i.next() { - assert_eq!(*sqe, io_uring_sqe::default()); - } - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sqe.rs b/runtime/asyncio/src/sqe.rs deleted file mode 100644 index af35993..0000000 --- a/runtime/asyncio/src/sqe.rs +++ /dev/null @@ -1,362 +0,0 @@ -use std::cell::UnsafeCell; -use std::ops::{Deref, DerefMut}; -use std::os::unix::prelude::RawFd; -use std::slice::IterMut; -use crate::ctypes::{IORING_OP, IOSQE, io_uring_sqe, SQEOpFlags}; - -#[derive(Debug)] -pub struct SQE<'iou> { - sqe: &'iou mut io_uring_sqe, -} - -impl<'iou> SQE<'iou> { - pub fn new(sqe: &'iou mut io_uring_sqe) -> Self { - Self { sqe } - } - - #[inline(always)] - pub fn add_flags(&mut self, flags: IOSQE) { - self.sqe.flags |= flags; - } - - #[inline(always)] - pub fn set_opcode(&mut self, opcode: IORING_OP) { - self.sqe.opcode = opcode; - } - - #[inline(always)] - pub fn set_userdata(&mut self, user_data: u64) { - self.sqe.user_data = user_data; - } - - #[inline(always)] - pub fn set_address(&mut self, address: u64) { - self.sqe.address = address; - } - - #[inline(always)] - pub fn set_len(&mut self, len: i32) { - self.sqe.len = len; - } - - #[inline(always)] - pub fn set_fd(&mut self, fd: RawFd) { - self.sqe.fd = fd; - } - - #[inline(always)] - pub fn set_offset(&mut self, offset: u64) { - self.sqe.offset = offset; - } - - #[inline(always)] - pub fn set_op_flags(&mut self, op_flags: SQEOpFlags) { - self.sqe.op_flags = op_flags; - } - - pub fn prepare_cancel(&mut self, user_data: u64) { - self.set_opcode(IORING_OP::ASYNC_CANCEL); - self.set_address(user_data); - } -} - -pub struct SQEs<'iou> { - slice: &'iou [UnsafeCell], - mask: u32, - start: u32, - count: u32, - capacity: u32, -} - -impl<'iou> SQEs<'iou> { - pub(crate) fn new(slice: &'iou [UnsafeCell], start: u32, capacity: u32) - -> Self - { - let mask = (slice.len() - 1) as u32; - Self { slice, mask, count: 0, start, capacity } - } - - pub fn last(&mut self) -> Option> { - let mut last = None; - while let Some(sqe) = self.consume() { last = Some(sqe) } - last - } - - /// An iterator of [`HardLinkedSQE`]s. These will be [`SQE`]s that are hard linked together. - /// - /// Hard linked SQEs will occur sequentially. All of them will be completed, even if one of the - /// events resolves to an error. - pub fn hard_linked(&mut self) -> HardLinked<'iou, '_> { - HardLinked { sqes: self } - } - - /// An iterator of [`SoftLinkedSQE`]s. These will be [`SQE`]s that are soft linked together. - /// - /// Soft linked SQEs will occur sequentially. If one the events errors, all events after it - /// will be cancelled. - pub fn soft_linked(&mut self) -> SoftLinked<'iou, '_> { - SoftLinked { sqes: self } - } - - /// Remaining [`SQE`]s that can be modified. - pub fn remaining(&self) -> u32 { - self.capacity - self.count - } - - pub fn start(&self) -> u32 { - self.start - } - - pub fn capacity(&self) -> u32 { - self.capacity - } - - pub fn used(&self) -> u32 { - self.count - } - - fn consume(&mut self) -> Option> { - if self.count >= self.capacity { - None - } else { - let index = (self.start + self.count) & self.mask; - self.count += 1; - - let sqe: &mut io_uring_sqe = unsafe { - &mut *self.slice.get_unchecked(index as usize).get() - }; - - // Ensure that all SQE passing through here are wiped into NOPs first. - *sqe = io_uring_sqe::default(); - sqe.opcode = IORING_OP::NOP; - - Some(SQE { sqe }) - } - } - - /// Exhaust this iterator, thus ensuring all entries are set to NOP - fn exhaust(&mut self) { - while let Some(_) = self.consume() {} - } -} - -impl<'iou> Iterator for SQEs<'iou> { - type Item = SQE<'iou>; - - fn next(&mut self) -> Option> { - self.consume() - } -} - -impl<'iou> Drop for SQEs<'iou> { - fn drop(&mut self) { - if self.count != 0 { - // This iterator is responsible for all of its SQE and must NOP every not used one. - self.exhaust() - } - } -} - -/// An Iterator of [`SQE`]s which will be hard linked together. -pub struct HardLinked<'iou, 'a> { - sqes: &'a mut SQEs<'iou>, -} - -impl<'iou> HardLinked<'iou, '_> { - pub fn terminate(self) -> Option> { - self.sqes.consume() - } -} - -impl<'iou> Iterator for HardLinked<'iou, '_> { - type Item = HardLinkedSQE<'iou>; - - fn next(&mut self) -> Option { - let is_final = self.sqes.remaining() == 1; - self.sqes.consume().map(|sqe| HardLinkedSQE { sqe, is_final }) - } -} - -pub struct HardLinkedSQE<'iou> { - sqe: SQE<'iou>, - is_final: bool, -} - -impl<'iou> Deref for HardLinkedSQE<'iou> { - type Target = SQE<'iou>; - - fn deref(&self) -> &SQE<'iou> { - &self.sqe - } -} - -impl<'iou> DerefMut for HardLinkedSQE<'iou> { - fn deref_mut(&mut self) -> &mut SQE<'iou> { - &mut self.sqe - } -} - -impl<'iou> Drop for HardLinkedSQE<'iou> { - fn drop(&mut self) { - if !self.is_final { - self.sqe.add_flags(IOSQE::IO_HARDLINK); - } - } -} - -/// An Iterator of [`SQE`]s which will be soft linked together. -pub struct SoftLinked<'iou, 'a> { - sqes: &'a mut SQEs<'iou>, -} - -impl<'iou> SoftLinked<'iou, '_> { - pub fn terminate(self) -> Option> { - self.sqes.consume() - } -} - -impl<'iou> Iterator for SoftLinked<'iou, '_> { - type Item = SoftLinkedSQE<'iou>; - - fn next(&mut self) -> Option { - let is_final = self.sqes.remaining() == 1; - self.sqes.consume().map(|sqe| SoftLinkedSQE { sqe, is_final }) - } -} - -pub struct SoftLinkedSQE<'iou> { - sqe: SQE<'iou>, - is_final: bool, -} - -impl<'iou> Deref for SoftLinkedSQE<'iou> { - type Target = SQE<'iou>; - - fn deref(&self) -> &SQE<'iou> { - &self.sqe - } -} - -impl<'iou> DerefMut for SoftLinkedSQE<'iou> { - fn deref_mut(&mut self) -> &mut SQE<'iou> { - &mut self.sqe - } -} - -impl<'iou> Drop for SoftLinkedSQE<'iou> { - fn drop(&mut self) { - if !self.is_final { - self.sqe.add_flags(IOSQE::IO_LINK); - } - } -} - -mod tests { - use super::*; - - fn gen_buf(num_entries: usize) -> &'static mut [UnsafeCell]{ - Box::leak((0..num_entries) - .map(|_| UnsafeCell::new(io_uring_sqe::default())) - .collect::>()) - } - - #[test] - fn test_wrapping_sqes() { - let mut sqe_buf = gen_buf(64); - - { - let mut sqes = SQEs::new(&mut sqe_buf[..], 62, 5); - assert_eq!(sqes.next().map(|i| i.sqe.user_data = 1), Some(())); - assert_eq!(sqes.next().map(|i| i.sqe.user_data = 2), Some(())); - assert_eq!(sqes.next().map(|i| i.sqe.user_data = 3), Some(())); - assert_eq!(sqes.next().map(|i| i.sqe.user_data = 4), Some(())); - assert_eq!(sqes.next().map(|i| i.sqe.user_data = 5), Some(())); - assert_eq!(sqes.next().map(|i| i.sqe.user_data = 6), None); - } - - assert_eq!(sqe_buf[61].get_mut().user_data, 0); - assert_eq!(sqe_buf[62].get_mut().user_data, 1); - assert_eq!(sqe_buf[63].get_mut().user_data, 2); - assert_eq!(sqe_buf[0].get_mut().user_data, 3); - assert_eq!(sqe_buf[1].get_mut().user_data, 4); - assert_eq!(sqe_buf[2].get_mut().user_data, 5); - assert_eq!(sqe_buf[3].get_mut().user_data, 0); - - } - - #[test] - fn test_hard_linked_sqes() { - let mut sqe_buf = gen_buf(64); - - { - let mut sqes = SQEs::new(&mut sqe_buf, 62, 5); - let mut linked = sqes.hard_linked(); - - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::READ), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::TEE), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::ACCEPT), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CLOSE), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CONNECT), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::FADVISE), None); - } - - assert_eq!(sqe_buf[61].get_mut().opcode, IORING_OP::NOP); - assert_eq!(sqe_buf[61].get_mut().flags, IOSQE::empty()); - - assert_eq!(sqe_buf[62].get_mut().opcode, IORING_OP::READ); - assert_eq!(sqe_buf[62].get_mut().flags, IOSQE::IO_HARDLINK); - - assert_eq!(sqe_buf[63].get_mut().opcode, IORING_OP::TEE); - assert_eq!(sqe_buf[63].get_mut().flags, IOSQE::IO_HARDLINK); - - assert_eq!(sqe_buf[0].get_mut().opcode, IORING_OP::ACCEPT); - assert_eq!(sqe_buf[0].get_mut().flags, IOSQE::IO_HARDLINK); - - assert_eq!(sqe_buf[1].get_mut().opcode, IORING_OP::CLOSE); - assert_eq!(sqe_buf[1].get_mut().flags, IOSQE::IO_HARDLINK); - - assert_eq!(sqe_buf[2].get_mut().opcode, IORING_OP::CONNECT); - assert_eq!(sqe_buf[2].get_mut().flags, IOSQE::empty()); - - assert_eq!(sqe_buf[3].get_mut().opcode, IORING_OP::NOP); - assert_eq!(sqe_buf[3].get_mut().flags, IOSQE::empty()); - } - - #[test] - fn test_soft_linked_sqes() { - let mut sqe_buf = gen_buf(64); - - { - let mut sqes = SQEs::new(&mut sqe_buf, 62, 5); - let mut linked = sqes.soft_linked(); - - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::READ), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::TEE), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::ACCEPT), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CLOSE), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CONNECT), Some(())); - assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::FADVISE), None); - } - - assert_eq!(sqe_buf[61].get_mut().opcode, IORING_OP::NOP); - assert_eq!(sqe_buf[61].get_mut().flags, IOSQE::empty()); - - assert_eq!(sqe_buf[62].get_mut().opcode, IORING_OP::READ); - assert_eq!(sqe_buf[62].get_mut().flags, IOSQE::IO_LINK); - - assert_eq!(sqe_buf[63].get_mut().opcode, IORING_OP::TEE); - assert_eq!(sqe_buf[63].get_mut().flags, IOSQE::IO_LINK); - - assert_eq!(sqe_buf[0].get_mut().opcode, IORING_OP::ACCEPT); - assert_eq!(sqe_buf[0].get_mut().flags, IOSQE::IO_LINK); - - assert_eq!(sqe_buf[1].get_mut().opcode, IORING_OP::CLOSE); - assert_eq!(sqe_buf[1].get_mut().flags, IOSQE::IO_LINK); - - assert_eq!(sqe_buf[2].get_mut().opcode, IORING_OP::CONNECT); - assert_eq!(sqe_buf[2].get_mut().flags, IOSQE::empty()); - - assert_eq!(sqe_buf[3].get_mut().opcode, IORING_OP::NOP); - assert_eq!(sqe_buf[3].get_mut().flags, IOSQE::empty()); - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/submission.rs b/runtime/asyncio/src/submission.rs deleted file mode 100644 index 76fad24..0000000 --- a/runtime/asyncio/src/submission.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use crate::cancellation::Cancellation; -use crate::completion::Completion; -use crate::io_uring::IoUring; -use crate::sq::SQ; -use crate::sqe::{SQE, SQEs}; - -pub struct Submission { - iouring: &'static IoUring, - state: State, -} - -enum State { - Inert, - Prepared(u32, Completion), - Submitted(Completion), - Cancelled(u64), - Lost, -} - -impl Submission { - pub fn new(iouring: &'static IoUring) -> Self { - Self { iouring, state: State::Inert } - } - - fn split_pinned(self: Pin<&mut Self>) -> (Pin<&mut IoUring>, &mut State) { - unsafe { - let this = Pin::get_unchecked_mut(self); - let iouring = &mut *(this.iouring as *const _ as *mut _); - (Pin::new_unchecked(iouring), &mut this.state) - } - } - - pub fn poll( - mut self: Pin<&mut Self>, - ctx: &mut Context<'_>, - count: u32, - prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq> - ) -> Poll> { - match self.state { - State::Inert | State::Cancelled(_) => { - let head = crate::ready!(self.as_mut().poll_prepare(ctx, count, prepare)); - crate::ready!(self.as_mut().poll_submit(ctx, head)); - self.poll_complete(ctx) - }, - State::Prepared(head, _) => { - crate::ready!(self.as_mut().poll_submit(ctx, head)); - self.poll_complete(ctx) - }, - State::Submitted(_) => self.poll_complete(ctx), - State::Lost => { - panic!("Ring in invalid state") - }, - } - } - - pub fn poll_prepare( - self: Pin<&mut Self>, - ctx: &mut Context<'_>, - count: u32, - prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq> - ) -> Poll { - let (sq, state) = self.split_pinned(); - let mut head = 0u32; - let completion = match *state { - State::Inert => { - crate::ready!(sq.poll_prepare(ctx, count, |mut sqes, ctx| { - *state = State::Lost; - - let mut sqe = prepare(&mut sqes); - let completion = Completion::new(ctx.waker().clone()); - sqe.set_userdata(completion.addr()); - - head = sqes.used(); - completion - })) - }, - State::Cancelled(prev) => { - crate::ready!(sq.poll_prepare(ctx, count + 1, |mut sqes, ctx| { - *state = State::Lost; - - sqes.soft_linked().next().unwrap().prepare_cancel(prev); - - let mut sqe = prepare(&mut sqes); - let completion = Completion::new(ctx.waker().clone()); - sqe.set_userdata(completion.addr()); - - head = sqes.used(); - completion - })) - }, - _ => unreachable!(), - }; - *state = State::Prepared(head, completion); - Poll::Ready(head) - } - - pub fn poll_submit( - self: Pin<&mut Self>, - ctx: &mut Context<'_>, - head: u32, - ) -> Poll<()> { - let (iouring, state) = self.split_pinned(); - match iouring.poll_submit(ctx, head) { - Poll::Ready(()) => { - match std::mem::replace(state, State::Lost) { - State::Prepared(_, completion) => { - *state = State::Submitted(completion); - }, - _ => unreachable!(), - } - Poll::Ready(()) - }, - Poll::Pending => Poll::Pending, - } - } - - pub fn poll_complete( - self: Pin<&mut Self>, - ctx: &mut Context<'_>, - ) -> Poll> { - let (_, state) = self.split_pinned(); - if let State::Submitted(completion) = std::mem::replace(state, State::Inert) { - match completion.check(ctx.waker()) { - Ok(result) => return Poll::Ready(result), - Err(completion) => { - *state = State::Submitted(completion) - } - } - } - - Poll::Pending - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/epoll/mod.rs b/runtime/asyncio/src/sys/linux/epoll/mod.rs deleted file mode 100644 index e69de29..0000000 diff --git a/runtime/asyncio/src/sys/linux/io_uring/driver.rs b/runtime/asyncio/src/sys/linux/io_uring/driver.rs deleted file mode 100644 index a6458f8..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/driver.rs +++ /dev/null @@ -1,61 +0,0 @@ -use std::marker::PhantomData; -use std::pin::Pin; -use std::task::{Context, Poll}; -use iou::{SQE, SQEs}; -use super::{Event, Submission}; - -pub struct Completion<'cx> { - inner: super::Completion, - marker: PhantomData &'cx ()>, -} - -impl<'cx> Completion<'cx> { - pub(crate) fn new(mut sqe: SQE<'_>, _sqes: SQEs<'_>, cx: &mut Context<'cx>) -> Self { - let inner = super::Completion::new(cx.waker().clone()); - - // Make the userdata for the (final) SQE a pointer to the waker for the task blocking on - // this IO. - unsafe { sqe.set_user_data(inner.addr()) }; - - Self { inner, marker: PhantomData } - } - - #[inline(always)] - pub(crate) fn into_inner(self) -> super::Completion { - self.inner - } -} - -pub trait Driver: Clone { - /// Poll to prepare a number of submissions for the submission queue. - /// - /// If the driver has space for `count` SQE available it calls `prepare` to have said `SQE` - /// inserted. A driver can assume that prepare will use exactly `count` slots. Using this - /// drivers can implement backpressure by returning `Poll::Pending` if less than `count` - /// slots are available and waking the respective task up if enough slots have become available. - fn poll_prepare<'cx>( - self: Pin<&mut Self>, - ctx: &mut Context<'cx>, - count: u32, - prepare: impl FnOnce(SQEs<'_>, &mut Context<'cx>) -> Completion<'cx>, - ) -> Poll>; - - /// Suggestion for the driver to submit their queue to the kernel. - /// - /// This will be called by tasks after they have finished preparing submissions. Drivers must - /// eventually submit these to the kernel but aren't required to do so right away. - fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<()>; - - /// Completion hint - /// - /// This should return `Poll::Ready` if an completion with the given user_data may have been - /// received since the last call to this function. It is safe to always return `Poll::Ready`, - /// even if no actions were completed. - fn poll_complete(self: Pin<&mut Self>, ctx: &mut Context<'_>, user_data: u64) -> Poll<()>; - - fn submit(self, event: E) -> Submission - where Self: Sized - { - Submission::new(self, event) - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/accept.rs b/runtime/asyncio/src/sys/linux/io_uring/events/accept.rs deleted file mode 100644 index 42b3502..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/accept.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::sqe::{SockFlag, SockAddrStorage}; -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct Accept { - pub addr: Option>, - pub fd: FD, - pub flags: SockFlag, -} - -impl Event for Accept { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_accept(self.fd, self.addr.as_deref_mut(), self.flags); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).addr) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/close.rs b/runtime/asyncio/src/sys/linux/io_uring/events/close.rs deleted file mode 100644 index b2cfdf8..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/close.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::os::unix::io::RawFd; - -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs}; - -pub struct Close { - pub fd: FD, -} - -impl Event for Close { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_close(self.fd); - sqe - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/connect.rs b/runtime/asyncio/src/sys/linux/io_uring/events/connect.rs deleted file mode 100644 index ad928c7..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/connect.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::sqe::SockAddr; -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct Connect { - pub fd: FD, - pub addr: Box, -} - -impl Event for Connect { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_connect(self.fd, &mut *self.addr); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).addr) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/epoll_ctl.rs b/runtime/asyncio/src/sys/linux/io_uring/events/epoll_ctl.rs deleted file mode 100644 index a9f7269..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/epoll_ctl.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::sqe::{EpollOp, EpollEvent}; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct EpollCtl { - pub epoll_fd: RawFd, - pub op: EpollOp, - pub fd: RawFd, - pub event: Option>, -} - -impl Event for EpollCtl { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_epoll_ctl(self.epoll_fd, self.op, self.fd, self.event.as_deref_mut()); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).event) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/fadvise.rs b/runtime/asyncio/src/sys/linux/io_uring/events/fadvise.rs deleted file mode 100644 index 9560b22..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/fadvise.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::os::unix::io::RawFd; - -use iou::sqe::PosixFadviseAdvice; -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs}; - -pub struct Fadvise { - pub fd: FD, - pub offset: u64, - pub size: u64, - pub flags: PosixFadviseAdvice, -} - -impl Event for Fadvise { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_fadvise(self.fd, self.offset, self.size, self.flags); - sqe - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/fallocate.rs b/runtime/asyncio/src/sys/linux/io_uring/events/fallocate.rs deleted file mode 100644 index 01d6cd1..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/fallocate.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::os::unix::io::RawFd; - -use iou::registrar::UringFd; -use iou::sqe::FallocateFlags; - -use super::{Event, SQE, SQEs}; - -pub struct Fallocate { - pub fd: FD, - pub offset: u64, - pub size: u64, - pub flags: FallocateFlags, -} - -impl Event for Fallocate { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_fallocate(self.fd, self.offset, self.size, self.flags); - sqe - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/files_update.rs b/runtime/asyncio/src/sys/linux/io_uring/events/files_update.rs deleted file mode 100644 index d1f8337..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/files_update.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct FilesUpdate { - pub files: Box<[RawFd]>, - pub offset: u32, -} - -impl Event for FilesUpdate { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_files_update(&self.files[..], self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).files) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/fsync.rs b/runtime/asyncio/src/sys/linux/io_uring/events/fsync.rs deleted file mode 100644 index 60c73ca..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/fsync.rs +++ /dev/null @@ -1,21 +0,0 @@ -use std::os::unix::io::RawFd; - -use iou::registrar::UringFd; -use iou::sqe::FsyncFlags; - -use super::{Event, SQE, SQEs}; - -pub struct Fsync { - pub fd: FD, - pub flags: FsyncFlags, -} - -impl Event for Fsync { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_fsync(self.fd, self.flags); - sqe - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/mod.rs b/runtime/asyncio/src/sys/linux/io_uring/events/mod.rs deleted file mode 100644 index 2b7369a..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/mod.rs +++ /dev/null @@ -1,56 +0,0 @@ - -mod accept; -mod close; -mod connect; -mod epoll_ctl; -mod fadvise; -mod fallocate; -mod files_update; -mod fsync; -mod openat; -mod provide_buffers; -mod read; -mod readv; -mod recv; -mod send; -mod splice; -mod statx; -mod timeout; -mod write; -mod writev; - -use std::mem::ManuallyDrop; -use iou::{SQE, SQEs}; -use super::Cancellation; - -pub use accept::Accept; -pub use close::Close; -pub use connect::Connect; -pub use epoll_ctl::EpollCtl; -pub use fadvise::Fadvise; -pub use fallocate::Fallocate; -pub use files_update::FilesUpdate; -pub use fsync::Fsync; -pub use openat::OpenAt; -pub use provide_buffers::ProvideBuffers; -pub use read::Read; -pub use readv::ReadVectored; -pub use recv::Recv; -pub use send::Send; -pub use splice::Splice; -pub use statx::Statx; -pub use timeout::Timeout; -pub use write::Write; -pub use writev::WriteVectored; - -pub trait Event { - fn sqes_needed() -> u32; - - unsafe fn prepare<'a>(&mut self, sqs: &mut SQEs<'a>) -> SQE<'a>; - - fn cancel(_: ManuallyDrop) -> Cancellation - where Self: Sized - { - Cancellation::from(()) - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/openat.rs b/runtime/asyncio/src/sys/linux/io_uring/events/openat.rs deleted file mode 100644 index b814124..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/openat.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::ffi::CString; -use std::mem::ManuallyDrop; -use std::os::unix::ffi::OsStrExt; -use std::os::unix::prelude::RawFd; -use std::path::Path; -use iou::{SQE, SQEs}; -use iou::sqe::{Mode, OFlag}; -use crate::sys::linux::io_uring::cancellation::Cancellation; -use super::Event; - -pub struct OpenAt { - pub path: CString, - pub dir_fd: RawFd, - pub flags: OFlag, - pub mode: Mode, -} - -impl OpenAt { - pub fn without_dir(path: impl AsRef, flags: OFlag, mode: Mode) -> Self { - let path = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap(); - Self { path, dir_fd: libc::AT_FDCWD, flags, mode } - } -} - -impl Event for OpenAt { - fn sqes_needed() -> u32 { - 1 - } - - unsafe fn prepare<'a>(&mut self, sqs: &mut SQEs<'a>) -> SQE<'a> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_openat(self.dir_fd, &*self.path, self.flags, self.mode); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation where Self: Sized { - ManuallyDrop::into_inner(this).path.into() - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/provide_buffers.rs b/runtime/asyncio/src/sys/linux/io_uring/events/provide_buffers.rs deleted file mode 100644 index 569d559..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/provide_buffers.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::mem::ManuallyDrop; -use iou::sqe::BufferGroupId; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct ProvideBuffers { - pub bufs: Box<[u8]>, - pub count: u32, - pub group: BufferGroupId, - pub index: u32, -} - -impl Event for ProvideBuffers { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_provide_buffers(&mut self.bufs[..], self.count, self.group, self.index); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).bufs) - } -} - -pub struct RemoveBuffers { - pub count: u32, - pub group: BufferGroupId, -} - -impl Event for RemoveBuffers { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_remove_buffers(self.count, self.group); - sqe - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/read.rs b/runtime/asyncio/src/sys/linux/io_uring/events/read.rs deleted file mode 100644 index cd3ac83..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/read.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::registrar::{UringFd, RegisteredBuf}; - -use super::{Event, SQE, SQEs, Cancellation}; - -/// A basic read event. -pub struct Read { - pub fd: FD, - pub buf: Box<[u8]>, - pub offset: u64, -} - -impl Event for Read { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_read(self.fd, &mut self.buf[..], self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).buf) - } -} - -pub struct ReadFixed { - pub fd: FD, - pub buf: RegisteredBuf, - pub offset: u64, -} - -impl Event for ReadFixed { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_read(self.fd, self.buf.as_mut(), self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).buf) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/readv.rs b/runtime/asyncio/src/sys/linux/io_uring/events/readv.rs deleted file mode 100644 index dfdf384..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/readv.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::io::IoSliceMut; -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -/// A `readv` event. -pub struct ReadVectored { - pub fd: FD, - pub bufs: Box<[Box<[u8]>]>, - pub offset: u64, -} - -impl ReadVectored { - fn as_iovecs(buffers: &mut [Box<[u8]>]) -> &mut [IoSliceMut] { - // Unsafe contract: - // This pointer cast is defined behaviour because Box<[u8]> (wide pointer) - // is currently ABI compatible with libc::iovec. - // - // Then, libc::iovec is guaranteed ABI compatible with IoSliceMut on Unix: - // https://doc.rust-lang.org/beta/std/io/struct.IoSliceMut.html - // - // We are relying on the internals of Box<[u8]>, but this is such a - // foundational part of Rust it's unlikely the data layout would change - // without warning. - // - // Pointer cast expression adapted from the "Turning a &mut T into an &mut U" - // example of: https://doc.rust-lang.org/std/mem/fn.transmute.html#alternatives - unsafe { &mut *(buffers as *mut [Box<[u8]>] as *mut [IoSliceMut]) } - } -} - - -impl Event for ReadVectored { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_read_vectored(self.fd, Self::as_iovecs(&mut self.bufs[..]), self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).bufs) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/recv.rs b/runtime/asyncio/src/sys/linux/io_uring/events/recv.rs deleted file mode 100644 index 86dceea..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/recv.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::sqe::MsgFlags; -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct Recv { - pub fd: FD, - pub buf: Box<[u8]>, - pub flags: MsgFlags, -} - -impl Event for Recv { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_recv(self.fd, &mut self.buf[..], self.flags); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).buf) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/send.rs b/runtime/asyncio/src/sys/linux/io_uring/events/send.rs deleted file mode 100644 index 740dade..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/send.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::sqe::MsgFlags; -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct Send { - pub fd: FD, - pub buf: Box<[u8]>, - pub flags: MsgFlags, -} - -impl Event for Send { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_send(self.fd, &self.buf[..], self.flags); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).buf) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/splice.rs b/runtime/asyncio/src/sys/linux/io_uring/events/splice.rs deleted file mode 100644 index c574d34..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/splice.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::os::unix::io::RawFd; - -use iou::sqe::SpliceFlags; - -use super::{Event, SQE, SQEs}; - -pub struct Splice { - pub fd_in: RawFd, - pub off_in: i64, - pub fd_out: RawFd, - pub off_out: i64, - pub bytes: u32, - pub flags: SpliceFlags, -} - -impl Event for Splice { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_splice(self.fd_in, self.off_in, self.fd_out, self.off_out, self.bytes, self.flags); - sqe - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/statx.rs b/runtime/asyncio/src/sys/linux/io_uring/events/statx.rs deleted file mode 100644 index 1349ab7..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/statx.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::ffi::CString; -use std::mem::{self, ManuallyDrop}; -use std::os::unix::io::RawFd; -use std::os::unix::ffi::OsStrExt; -use std::path::Path; - -use iou::sqe::{StatxFlags, StatxMode}; -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -pub struct Statx { - pub dir_fd: FD, - pub path: CString, - pub flags: StatxFlags, - pub mask: StatxMode, - pub statx: Box, -} - -impl Statx { - pub fn without_dir(path: impl AsRef, flags: StatxFlags, mask: StatxMode) -> Statx { - let path = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap(); - let statx = unsafe { Box::new(mem::zeroed()) }; - Statx { path, dir_fd: libc::AT_FDCWD, flags, mask, statx } - } -} - -impl Statx { - pub fn without_path(fd: FD, mut flags: StatxFlags, mask: StatxMode) -> Statx { - unsafe { - // TODO don't allocate? Use Cow? Use NULL? - let path = CString::new("").unwrap(); - let statx = Box::new(mem::zeroed()); - flags.insert(StatxFlags::AT_EMPTY_PATH); - Statx { dir_fd: fd, path, flags, mask, statx } - } - } -} - -impl Event for Statx { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_statx(self.dir_fd, self.path.as_c_str(), self.flags, self.mask, &mut *self.statx); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - let this = ManuallyDrop::into_inner(this); - Cancellation::from((this.statx, this.path)) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/timeout.rs b/runtime/asyncio/src/sys/linux/io_uring/events/timeout.rs deleted file mode 100644 index b2d9f9a..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/timeout.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::mem::ManuallyDrop; -use std::time::Duration; - -use super::{Event, SQE, SQEs, Cancellation}; - -use iou::sqe::TimeoutFlags; - -pub struct StaticTimeout { - ts: uring_sys::__kernel_timespec, - events: u32, - flags: TimeoutFlags, -} - -impl StaticTimeout { - pub const fn new(duration: Duration, events: u32, flags: TimeoutFlags) -> StaticTimeout { - StaticTimeout { - ts: timespec(duration), - events, flags, - } - } -} - -impl Event for &'static StaticTimeout { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_timeout(&self.ts, self.events, self.flags); - sqe - } -} - -pub struct Timeout { - ts: Box, - events: u32, - flags: TimeoutFlags, -} - -impl Timeout { - pub fn new(duration: Duration, events: u32, flags: TimeoutFlags) -> Timeout { - Timeout { - ts: Box::new(timespec(duration)), - events, flags, - } - } -} - -impl Event for Timeout { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_timeout(&*self.ts, self.events, self.flags); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).ts) - } -} - -const fn timespec(duration: Duration) -> uring_sys::__kernel_timespec { - uring_sys::__kernel_timespec { - tv_sec: duration.as_secs() as i64, - tv_nsec: duration.subsec_nanos() as _, - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/write.rs b/runtime/asyncio/src/sys/linux/io_uring/events/write.rs deleted file mode 100644 index bf0d308..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/write.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::registrar::{UringFd, RegisteredBuf}; - -use super::{Event, SQE, SQEs, Cancellation}; - -/// A basic write event. -pub struct Write { - pub fd: FD, - pub buf: Box<[u8]>, - pub offset: u64, -} - -impl Event for Write { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_write(self.fd, &self.buf[..], self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).buf) - } -} - -pub struct WriteFixed { - pub fd: FD, - pub buf: RegisteredBuf, - pub offset: u64, -} - -impl Event for WriteFixed { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_write(self.fd, self.buf.as_ref(), self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).buf) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/writev.rs b/runtime/asyncio/src/sys/linux/io_uring/events/writev.rs deleted file mode 100644 index 447ada2..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/events/writev.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::io::IoSlice; -use std::mem::ManuallyDrop; -use std::os::unix::io::RawFd; - -use iou::registrar::UringFd; - -use super::{Event, SQE, SQEs, Cancellation}; - -/// A `writev` event. -pub struct WriteVectored { - pub fd: FD, - pub bufs: Box<[Box<[u8]>]>, - pub offset: u64, -} - -impl WriteVectored { - fn iovecs(&self) -> &[IoSlice] { - unsafe { & *(&self.bufs[..] as *const [Box<[u8]>] as *const [IoSlice]) } - } -} - -impl Event for WriteVectored { - fn sqes_needed() -> u32 { 1 } - - unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { - let mut sqe = sqs.single().unwrap(); - sqe.prep_write_vectored(self.fd, self.iovecs(), self.offset); - sqe - } - - fn cancel(this: ManuallyDrop) -> Cancellation { - Cancellation::from(ManuallyDrop::into_inner(this).bufs) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/fs.rs b/runtime/asyncio/src/sys/linux/io_uring/fs.rs deleted file mode 100644 index 104a63b..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/fs.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Imported here for modules -use std::future::Future; -use std::{fs, io}; -use std::mem::ManuallyDrop; -use std::os::unix::prelude::{FromRawFd, RawFd}; -use std::path::Path; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use super::{Driver, Ring, Submission, events::*}; - -use futures_core::ready; -use futures_io::{AsyncRead, AsyncWrite, AsyncSeek, AsyncBufRead}; - -use iou::sqe::{Mode, OFlag}; - -pub struct File { - ring: Ring, - fd: RawFd, - active: Op, -} - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -enum Op { - Read, - Write, - Close, - Nothing, - Statx, - Closed, -} - - -impl File { - fn from_fd(fd: RawFd, driver: D) -> File { - File { - ring: Ring::new(driver), - fd, - active: Op::Nothing, - } - } - - pub fn open>(driver: D, path: P) -> impl Future> { - let flags = OFlag::O_CLOEXEC | OFlag::O_RDONLY; - open::Open(driver.submit(OpenAt::without_dir( - path, flags, Mode::from_bits(0o666).unwrap() - ))) - } - - pub fn create>(driver: D, path: P) -> impl Future> { - let flags = OFlag::O_CLOEXEC | OFlag::O_WRONLY | OFlag::O_CREAT | OFlag::O_TRUNC; - create::Create(driver.submit(OpenAt::without_dir( - path, flags, Mode::from_bits(0o666).unwrap() - ))) - } -} - -mod open; -mod create; - -impl AsyncRead for File { - fn poll_read(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, buf: &mut [u8]) - -> Poll> - { - let mut inner = ready!(self.as_mut().poll_fill_buf(ctx))?; - let len = io::Read::read(&mut inner, buf)?; - self.consume(len); - Poll::Ready(Ok(len)) - } -} - -impl AsyncBufRead for File { - fn poll_fill_buf(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { - let fd = self.fd; - let (ring, buf, pos, ..) = self.split_with_buf(); - buf.fill_buf(|buf| { - let n = ready!(ring.poll(ctx, 1, |sqs| { - let mut sqe = sqs.single().unwrap(); - unsafe { - sqe.prep_read(fd, buf, *pos); - } - sqe - }))?; - *pos += n as u64; - Poll::Ready(Ok(n as u32)) - }) - } - - fn consume(self: Pin<&mut Self>, amt: usize) { - self.buf().consume(amt); - } -} - -impl AsyncWrite for File { - fn poll_write(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, slice: &[u8]) -> Poll> { - let fd = self.fd; - let (ring, buf, pos, ..) = self.split_with_buf(); - let data = ready!(buf.fill_buf(|mut buf| { - Poll::Ready(Ok(io::Write::write(&mut buf, slice)? as u32)) - }))?; - let n = ready!(ring.poll(ctx, 1, |sqs| { - let mut sqe = sqs.single().unwrap(); - unsafe { - sqe.prep_write(fd, data, *pos); - } - sqe - }))?; - *pos += n as u64; - buf.clear(); - Poll::Ready(Ok(n as usize)) - } - - fn poll_flush(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { - ready!(self.poll_write(ctx, &[]))?; - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { - self.as_mut().guard_op(Op::Close); - let fd = self.fd; - ready!(self.as_mut().ring().poll(ctx, 1, |sqs| { - let mut sqe = sqs.single().unwrap(); - unsafe { - sqe.prep_close(fd); - } - sqe - }))?; - self.confirm_close(); - Poll::Ready(Ok(())) - } -} - -impl AsyncSeek for File { - fn poll_seek(mut self: Pin<&mut Self>, ctx: &mut Context, pos: io::SeekFrom) - -> Poll> - { - let (start, offset) = match pos { - io::SeekFrom::Start(n) => { - *self.as_mut().pos() = n; - return Poll::Ready(Ok(self.pos)); - } - io::SeekFrom::Current(n) => (self.pos, n), - io::SeekFrom::End(n) => { - (ready!(self.as_mut().poll_file_size(ctx))?, n) - } - }; - let valid_seek = if offset.is_negative() { - match start.checked_sub(offset.abs() as u64) { - Some(valid_seek) => valid_seek, - None => { - let invalid = io::Error::from(io::ErrorKind::InvalidInput); - return Poll::Ready(Err(invalid)); - } - } - } else { - match start.checked_add(offset as u64) { - Some(valid_seek) => valid_seek, - None => { - let overflow = io::Error::from_raw_os_error(libc::EOVERFLOW); - return Poll::Ready(Err(overflow)); - } - } - }; - *self.as_mut().pos() = valid_seek; - Poll::Ready(Ok(self.pos)) - } -} - -impl From> for fs::File { - fn from(mut file: File) -> fs::File { - file.cancel(); - let file = ManuallyDrop::new(file); - unsafe { - fs::File::from_raw_fd(file.fd) - } - } -} - -impl Drop for File { - fn drop(&mut self) { - match self.active { - Op::Closed => { } - Op::Nothing => unsafe { libc::close(self.fd); }, - _ => self.cancel(), - } - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/fs/create.rs b/runtime/asyncio/src/sys/linux/io_uring/fs/create.rs deleted file mode 100644 index fbcc760..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/fs/create.rs +++ /dev/null @@ -1,18 +0,0 @@ -use std::future::Future; -use futures_core::ready; -use super::*; - -pub(super) struct Create(pub(super) Submission); - -impl Future for Create { - type Output = io::Result>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut inner = unsafe { - self.map_unchecked_mut(|this| &mut this.0) - }; - let (_, ready) = ready!(inner.as_mut().poll(cx)); - let fd = ready? as i32; - Poll::Ready(Ok(File::from_fd(fd, inner.driver().clone()))) - } -} diff --git a/runtime/asyncio/src/sys/linux/io_uring/fs/open.rs b/runtime/asyncio/src/sys/linux/io_uring/fs/open.rs deleted file mode 100644 index 498aaf1..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/fs/open.rs +++ /dev/null @@ -1,18 +0,0 @@ -use std::future::Future; -use futures_core::ready; -use super::*; - -pub(super) struct Open(pub(super) Submission); - -impl Future for Open { - type Output = io::Result>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut inner = unsafe { - self.map_unchecked_mut(|this| &mut this.0) - }; - let (_, ready) = ready!(inner.as_mut().poll(cx)); - let fd = ready? as i32; - Poll::Ready(Ok(File::from_fd(fd, inner.driver().clone()))) - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/mod.rs b/runtime/asyncio/src/sys/linux/io_uring/mod.rs deleted file mode 100644 index 1c45b67..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ - -mod completion; -use completion::Completion; - -mod cancellation; -use cancellation::Cancellation; - -mod ring; -use ring::Ring; - -mod events; -use events::Event; - -mod submission; -use submission::Submission; - -mod driver; -use driver::Driver; - -mod fs; \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/ring.rs b/runtime/asyncio/src/sys/linux/io_uring/ring.rs deleted file mode 100644 index 5f0425b..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/ring.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::{io, mem}; -use std::pin::Pin; -use std::task::{Context, Poll}; -use iou::{SQE, SQEs}; -use super::{driver, Driver}; -use super::Completion; - -use futures_core::ready; -use crate::sys::linux::io_uring::cancellation::Cancellation; - -/// -pub struct Ring { - state: State, - driver: D, -} - -enum State { - Empty, - Prepared(Completion), - Submitted(Completion), - Cancelled(u64), - Lost, -} - -impl Ring { - pub fn new(driver: D) -> Self { - Self { - state: State::Empty, - driver, - } - } - - pub fn driver(&self) -> &D { - &self.driver - } - - fn split_pinned(self: Pin<&mut Self>) -> (&mut State, Pin<&mut D>) { - unsafe { - let this = Pin::get_unchecked_mut(self); - (&mut this.state, Pin::new_unchecked(&mut this.driver)) - } - } - - pub fn poll( - mut self: Pin<&mut Self>, - ctx: &mut Context<'_>, - count: u32, - prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>, - ) -> Poll> { - match self.state { - State::Empty => { - ready!(self.as_mut().poll_prepare_empty(ctx, count, prepare)); - ready!(self.as_mut().poll_submit(ctx)); - self.poll_complete(ctx) - }, - State::Cancelled(previous) => { - ready!(self.as_mut().poll_prepare_canceled(ctx, previous, count, prepare)); - ready!(self.as_mut().poll_submit(ctx)); - self.poll_complete(ctx) - }, - State::Prepared(_) => match self.as_mut().poll_complete(ctx) { - Poll::Pending => { - ready!(self.as_mut().poll_submit(ctx)); - self.poll_complete(ctx) - }, - ready @ Poll::Ready(_) => ready, - }, - State::Submitted(_) => self.poll_complete(ctx), - State::Lost => panic!("Lost events, ring is now in an invalid state"), - } - } - - fn poll_prepare_empty( - self: Pin<&mut Self>, - ctx: &mut Context<'_>, - count: u32, - prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>, - ) -> Poll<()> { - let (state, driver) = self.split_pinned(); - let completion = ready!(driver.poll_prepare(ctx, count, |mut sqes, ctx| { - *state = State::Lost; - let sqe = prepare(&mut sqes); - let completion = driver::Completion::new(sqe, sqes, ctx); - completion - })); - *state = State::Prepared(completion.into_inner()); - Poll::Ready(()) - } - - fn poll_prepare_canceled( - self: Pin<&mut Self>, - ctx: &mut Context<'_>, - previous: u64, - count: u32, - prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>, - ) -> Poll<()> { - let (mut state, driver) = self.split_pinned(); - let completion = ready!(driver.poll_prepare(ctx, count + 1, |mut sqes, ctx| { - *state = State::Lost; - unsafe { sqes.hard_linked().next().unwrap().prep_cancel(previous, 0); } - let sqe = prepare(&mut sqes); - let completion = driver::Completion::new(sqe, sqes, ctx); - completion - })); - *state = State::Prepared(completion.into_inner()); - Poll::Ready(()) - } - - fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<()> { - let (state, driver) = self.split_pinned(); - let _ = ready!(driver.poll_submit(ctx)); - if let State::Prepared(completion) | State::Submitted(completion) - = mem::replace(state, State::Lost) - { - *state = State::Submitted(completion); - Poll::Ready(()) - } else { - unreachable!(); - } - } - - fn poll_complete(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { - let (state, driver) = self.split_pinned(); - match mem::replace(state, State::Lost) { - State::Prepared(completion) => { - ready!(driver.poll_complete(ctx, completion.addr())); - match completion.check(ctx.waker()) { - Ok(result) => { - *state = State::Empty; - Poll::Ready(result) - }, - Err(completion) => { - *state = State::Prepared(completion); - Poll::Pending - } - } - }, - State::Submitted(completion) => { - ready!(driver.poll_complete(ctx, completion.addr())); - match completion.check(ctx.waker()) { - Ok(result) => { - *state = State::Empty; - Poll::Ready(result) - }, - Err(completion) => { - *state = State::Submitted(completion); - Poll::Pending - } - } - }, - _ => unreachable!(), - } - } - - pub fn cancel_pinned(self: Pin<&mut Self>, cancellation: Cancellation) { - self.split_pinned().0.cancel(cancellation); - } - - pub fn cancel(&mut self, cancellation: Cancellation) { - self.state.cancel(cancellation) - } -} - -impl State { - fn cancel(&mut self, cancellation: Cancellation) { - match mem::replace(self, State::Lost) { - State::Submitted(completion) | State::Prepared(completion) => { - *self = State::Cancelled(completion.addr()); - completion.cancel(cancellation); - }, - state=> { - *self = state; - } - } - } -} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/submission.rs b/runtime/asyncio/src/sys/linux/io_uring/submission.rs deleted file mode 100644 index 10a7c02..0000000 --- a/runtime/asyncio/src/sys/linux/io_uring/submission.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::future::Future; -use futures_core::ready; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use super::{Ring, Driver, Event}; - -pub struct Submission { - ring: Ring, - event: Option, -} - -impl Submission { - pub fn new(driver: D, event: E) -> Self { - Self { - ring: Ring::new(driver), - event: Some(event), - } - } - - pub fn driver(&self) -> &D { - self.ring.driver() - } - - fn split_pinned(self: Pin<&mut Self>) -> (Pin<&mut Ring>, &mut Option) { - unsafe { - let this = Pin::get_unchecked_mut(self); - (Pin::new_unchecked(&mut this.ring), &mut this.event) - } - } -} - -impl Future for Submission { - type Output = (E, io::Result); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let (ring, event) = self.split_pinned(); - - let result = if let Some(event) = event { - let count = E::sqes_needed(); - ready!(ring.poll(cx, count, |sqes| unsafe { event.prepare(sqes) })) - } else { - panic!("polled Submission after completion") - }; - - Poll::Ready((event.take().unwrap(), result)) - } -} diff --git a/runtime/asyncio/src/sys/linux/mod.rs b/runtime/asyncio/src/sys/linux/mod.rs deleted file mode 100644 index e2716cc..0000000 --- a/runtime/asyncio/src/sys/linux/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ - -#[cfg(feature = "io_uring")] -mod io_uring; -#[cfg(feature = "epoll")] -mod epoll; \ No newline at end of file diff --git a/runtime/asyncio/src/sys/mod.rs b/runtime/asyncio/src/sys/mod.rs deleted file mode 100644 index c2d2479..0000000 --- a/runtime/asyncio/src/sys/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[cfg(target_os = "linux")] -mod linux; \ No newline at end of file diff --git a/runtime/asyncio/src/syscall.rs b/runtime/asyncio/src/syscall.rs deleted file mode 100644 index 50bd4b1..0000000 --- a/runtime/asyncio/src/syscall.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::io; -use std::os::unix::prelude::RawFd; -use libc::{c_ulong, c_long}; -use crate::ctypes::{IORING_ENTER, IORING_REGISTER_OP}; -use super::ctypes::Params; - -const ENOMEM: i32 = 12; - -const SYS_SETUP: c_long = libc::SYS_io_uring_setup; -const SYS_ENTER: c_long = libc::SYS_io_uring_enter; -const SYS_REGISTER: c_long = libc::SYS_io_uring_register; - -/// Syscall io_uring_setup, creating the io_uring ringbuffers -pub fn setup(entries: u32, params: *mut Params) -> io::Result { - assert!((0 < entries && entries <= 4096), "entries must be between 1 and 4096"); - assert_eq!(entries.count_ones(), 1, "entries must be a power of two"); - - let retval = unsafe { - libc::syscall(SYS_SETUP, entries, params) - }; - if retval < 0 { - let err = io::Error::last_os_error(); - if let Some(ENOMEM) = err.raw_os_error() { - return Err(io::Error::new( - io::ErrorKind::Other, - "Failed to lock enough memory. You may need to increase the memlock limit using \ - rlimits" - )); - } - return Err(err); - } else { - Ok(retval as RawFd) - } -} - -static_assertions::assert_eq_size!(i64, c_long); - -/// enter io_uring, returning when at least `min_complete` events have been completed -pub fn enter(fd: RawFd, - to_submit: u32, - min_complete: u32, - flags: IORING_ENTER, - args: *const libc::c_void, - argsz: libc::size_t - -) -> io::Result { - let retval = unsafe { - libc::syscall(SYS_ENTER, fd, to_submit, min_complete, flags.bits(), args, argsz) - }; - if retval < 0 { - let err = io::Error::last_os_error(); - Err(err) - } else { - Ok(retval) - } -} - -/// Register buffers or file descriptors with the kernel for faster usage and not having to use -/// atomics. -pub fn register(fd: RawFd, opcode: IORING_REGISTER_OP, args: *const (), nargs: u32) - -> io::Result -{ - let retval = unsafe { - libc::syscall(SYS_REGISTER, fd, opcode, args, nargs) - }; - if retval < 0 { - let err = io::Error::last_os_error(); - Err(err) - } else { - Ok(retval) - } -} \ No newline at end of file diff --git a/runtime/executor/Cargo.toml b/runtime/executor/Cargo.toml index 5c27a14..a6ca3ec 100644 --- a/runtime/executor/Cargo.toml +++ b/runtime/executor/Cargo.toml @@ -13,6 +13,21 @@ exclude = [ "scripts/*", ] +[[bench]] +name = "perf" +harness = false +path = "benches/perf.rs" + +[[bench]] +name = "spawn" +harness = false +path = "benches/spawn.rs" + +[[bench]] +name = "stats" +harness = false +path = "benches/stats.rs" + [dependencies] lightproc = { path = "../lightproc" } @@ -24,6 +39,7 @@ lazy_static = "1.4" libc = "0.2" num_cpus = "1.13" pin-utils = "0.1.0" +slab = "0.4" # Allocator arrayvec = { version = "0.7.0" } @@ -32,3 +48,11 @@ once_cell = "1.4.0" lever = "0.1" tracing = "0.1.19" crossbeam-queue = "0.3.0" + +[dev-dependencies] +async-std = "1.10.0" +tracing = { version = "0.1.19", features = ["max_level_trace"]} +tracing-subscriber = "0.3.1" +futures-util = "0.3" +rand = "0.8" +criterion = "0.3" \ No newline at end of file diff --git a/runtime/executor/benches/perf.rs b/runtime/executor/benches/perf.rs index e8a588b..0bbeea2 100644 --- a/runtime/executor/benches/perf.rs +++ b/runtime/executor/benches/perf.rs @@ -1,25 +1,22 @@ -#![feature(test)] +use executor::prelude::*; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; -extern crate test; - -use bastion_executor::prelude::*; -use lightproc::proc_stack::ProcStack; -use test::{black_box, Bencher}; - -#[bench] -fn increment(b: &mut Bencher) { +fn increment(b: &mut Criterion) { let mut sum = 0; + let executor = Executor::new(); - b.iter(|| { - run( + b.bench_function("Executor::run", |b| b.iter(|| { + executor.run( async { (0..10_000_000).for_each(|_| { sum += 1; }); }, - ProcStack::default(), ); - }); + })); black_box(sum); } + +criterion_group!(perf, increment); +criterion_main!(perf); \ No newline at end of file diff --git a/runtime/executor/benches/spawn.rs b/runtime/executor/benches/spawn.rs index 02b896b..bade7f0 100644 --- a/runtime/executor/benches/spawn.rs +++ b/runtime/executor/benches/spawn.rs @@ -1,23 +1,16 @@ -#![feature(test)] - -extern crate test; - -use bastion_executor::load_balancer; -use bastion_executor::prelude::spawn; +use executor::load_balancer; +use executor::prelude::*; use futures_timer::Delay; -use lightproc::proc_stack::ProcStack; use std::time::Duration; -use test::Bencher; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; #[cfg(feature = "tokio-runtime")] -mod tokio_benchs { +mod benches { use super::*; - #[bench] - fn spawn_lot(b: &mut Bencher) { + pub fn spawn_lot(b: &mut Bencher) { tokio_test::block_on(async { _spawn_lot(b) }); } - #[bench] - fn spawn_single(b: &mut Bencher) { + pub fn spawn_single(b: &mut Bencher) { tokio_test::block_on(async { _spawn_single(b); }); @@ -25,46 +18,47 @@ mod tokio_benchs { } #[cfg(not(feature = "tokio-runtime"))] -mod no_tokio_benchs { +mod benches { use super::*; - #[bench] - fn spawn_lot(b: &mut Bencher) { + + pub fn spawn_lot(b: &mut Criterion) { _spawn_lot(b); } - #[bench] - fn spawn_single(b: &mut Bencher) { + pub fn spawn_single(b: &mut Criterion) { _spawn_single(b); } + } +criterion_group!(spawn, benches::spawn_lot, benches::spawn_single); +criterion_main!(spawn); + // Benchmark for a 10K burst task spawn -fn _spawn_lot(b: &mut Bencher) { - let proc_stack = ProcStack::default(); - b.iter(|| { +fn _spawn_lot(b: &mut Criterion) { + let executor = Executor::new(); + b.bench_function("spawn_lot", |b| b.iter(|| { let _ = (0..10_000) .map(|_| { - spawn( + executor.spawn( async { let duration = Duration::from_millis(1); Delay::new(duration).await; }, - proc_stack.clone(), ) }) .collect::>(); - }); + })); } // Benchmark for a single task spawn -fn _spawn_single(b: &mut Bencher) { - let proc_stack = ProcStack::default(); - b.iter(|| { - spawn( +fn _spawn_single(b: &mut Criterion) { + let executor = Executor::new(); + b.bench_function("spawn single", |b| b.iter(|| { + executor.spawn( async { let duration = Duration::from_millis(1); Delay::new(duration).await; }, - proc_stack.clone(), ); - }); + })); } diff --git a/runtime/executor/benches/stats.rs b/runtime/executor/benches/stats.rs index 684e7cb..84679f1 100644 --- a/runtime/executor/benches/stats.rs +++ b/runtime/executor/benches/stats.rs @@ -1,10 +1,7 @@ -#![feature(test)] - -extern crate test; -use bastion_executor::load_balancer::{core_count, get_cores, stats, SmpStats}; -use bastion_executor::placement; +use executor::load_balancer::{core_count, get_cores, stats, SmpStats}; +use executor::placement; use std::thread; -use test::Bencher; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; fn stress_stats(stats: &'static S) { let mut handles = Vec::with_capacity(*core_count()); @@ -29,15 +26,13 @@ fn stress_stats(stats: &'static S) { // previous lock based stats benchmark 1,352,791 ns/iter (+/- 2,682,013) // 158,278 ns/iter (+/- 117,103) -#[bench] -fn lockless_stats_bench(b: &mut Bencher) { - b.iter(|| { +fn lockless_stats_bench(b: &mut Criterion) { + b.bench_function("stress_stats", |b| b.iter(|| { stress_stats(stats()); - }); + })); } -#[bench] -fn lockless_stats_bad_load(b: &mut Bencher) { +fn lockless_stats_bad_load(b: &mut Criterion) { let stats = stats(); const MAX_CORE: usize = 256; for i in 0..MAX_CORE { @@ -50,13 +45,12 @@ fn lockless_stats_bad_load(b: &mut Bencher) { } } - b.iter(|| { + b.bench_function("get_sorted_load", |b| b.iter(|| { let _sorted_load = stats.get_sorted_load(); - }); + })); } -#[bench] -fn lockless_stats_good_load(b: &mut Bencher) { +fn lockless_stats_good_load(b: &mut Criterion) { let stats = stats(); const MAX_CORE: usize = 256; for i in 0..MAX_CORE { @@ -65,7 +59,11 @@ fn lockless_stats_good_load(b: &mut Bencher) { stats.store_load(i, i); } - b.iter(|| { + b.bench_function("get_sorted_load", |b| b.iter(|| { let _sorted_load = stats.get_sorted_load(); - }); + })); } + +criterion_group!(stats_bench, lockless_stats_bench, lockless_stats_bad_load, + lockless_stats_good_load); +criterion_main!(stats_bench); diff --git a/runtime/executor/examples/spawn_async.rs b/runtime/executor/examples/spawn_async.rs index 250f433..0a6e497 100644 --- a/runtime/executor/examples/spawn_async.rs +++ b/runtime/executor/examples/spawn_async.rs @@ -1,42 +1,119 @@ use std::io::Write; use std::panic::resume_unwind; +use std::rc::Rc; use std::time::Duration; +use futures_util::{stream::FuturesUnordered, Stream}; +use futures_util::{FutureExt, StreamExt}; use executor::pool; use executor::prelude::*; +use lightproc::prelude::RecoverableHandle; fn main() { - std::panic::set_hook(Box::new(|info| { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::DEBUG) + .init(); + + let hook = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |info| { + let span = tracing::span!(tracing::Level::ERROR, "panic hook").entered(); let tid = std::thread::current().id(); - println!("Panicking ThreadId: {:?}", tid); - std::io::stdout().flush(); - println!("panic hook: {:?}", info); + tracing::error!("Panicking ThreadId: {:?}", tid); + tracing::error!("{}", info); + span.exit(); })); - let tid = std::thread::current().id(); - println!("Main ThreadId: {:?}", tid); - let handle = spawn( + let executor = Executor::new(); + + let mut handles: FuturesUnordered> = (0..2000).map(|n| { + executor.spawn( + async move { + let m: u64 = rand::random::() % 200; + tracing::debug!("Will sleep {} * 1 ms", m); + // simulate some really heavy load. + for i in 0..m { + async_std::task::sleep(Duration::from_millis(1)).await; + } + return n; + }, + ) + }).collect(); + //let handle = handles.fuse().all(|opt| async move { opt.is_some() }); + + /* Futures passed to `spawn` need to be `Send` so this won't work: + * let n = 1; + * let unsend = spawn(async move { + * let rc = Rc::new(n); + * let tid = std::thread::current().id(); + * tracing::info!("!Send fut {} running on thread {:?}", *rc, tid); + * async_std::task::sleep(Duration::from_millis(20)).await; + * tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid); + * async_std::task::sleep(Duration::from_millis(20)).await; + * tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid); + * async_std::task::sleep(Duration::from_millis(20)).await; + * *rc + * }); + */ + + // But you can use `spawn_local` which will make sure to never Send your task to other threads. + // However, you can't pass it a future outright but have to hand it a generator creating the + // future on the correct thread. + let fut = async { + let local_futs: FuturesUnordered<_> = (0..200).map(|ref n| { + let n = *n; + let exe = executor.clone(); + async move { + exe.spawn( + async { + let tid = std::thread::current().id(); + tracing::info!("spawn_local({}) is on thread {:?}", n, tid); + exe.spawn_local(async move { + let rc = Rc::new(n); + + let tid = std::thread::current().id(); + tracing::info!("!Send fut {} running on thread {:?}", *rc, tid); + + async_std::task::sleep(Duration::from_millis(20)).await; + + let tid2 = std::thread::current().id(); + tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid2); + assert_eq!(tid, tid2); + + async_std::task::sleep(Duration::from_millis(20)).await; + + let tid3 = std::thread::current().id(); + tracing::info!("!Send fut {} still running on thread {:?}!", *rc, tid3); + assert_eq!(tid2, tid3); + + *rc + }) + } + ).await + } + }).collect(); + local_futs + }; + + let a = async move { + let mut local_futs = fut.await; + while let Some(fut) = local_futs.next().await { + assert!(fut.is_some()); + tracing::info!("local fut returned {:?}", fut.unwrap().await) + } + while let Some(a) = handles.next().await { + assert!(a.is_some()); + tracing::info!("shared fut returned {}", a.unwrap()) + } + }; + let b = async move { + async_std::task::sleep(Duration::from_secs(20)).await; + tracing::info!("This is taking too long."); + }; + executor.run( async { - panic!("test"); + let res = futures_util::select! { + _ = a.fuse() => {}, + _ = b.fuse() => {}, + }; }, ); - - run( - async { - handle.await; - }, - ProcStack {}, - ); - - let pool = pool::get(); - let manager = pool::get_manager().unwrap(); - println!("After panic: {:?}", pool); - println!("{:#?}", manager); - - let h = std::thread::spawn(|| { - panic!("This is a test"); - }); - - std::thread::sleep(Duration::from_secs(30)); - - println!("After panic"); } diff --git a/runtime/executor/src/lib.rs b/runtime/executor/src/lib.rs index 4e0a14e..dd851e4 100644 --- a/runtime/executor/src/lib.rs +++ b/runtime/executor/src/lib.rs @@ -20,9 +20,6 @@ //! [lightproc]: https://docs.rs/lightproc //! -#![doc( - html_logo_url = "https://raw.githubusercontent.com/bastion-rs/bastion/master/img/bastion-logo.png" -)] // Force missing implementations #![warn(missing_docs)] #![warn(missing_debug_implementations)] @@ -30,21 +27,15 @@ #![forbid(unused_must_use)] #![forbid(unused_import_braces)] -pub mod blocking; pub mod load_balancer; pub mod placement; pub mod pool; pub mod run; -pub mod sleepers; mod thread_manager; -pub mod worker; -mod proc_stack; +mod worker; /// /// Prelude of Bastion Executor pub mod prelude { - pub use crate::blocking::*; pub use crate::pool::*; - pub use crate::run::*; - pub use crate::proc_stack::*; } diff --git a/runtime/executor/src/pool.rs b/runtime/executor/src/pool.rs index 98ab569..e65a7ac 100644 --- a/runtime/executor/src/pool.rs +++ b/runtime/executor/src/pool.rs @@ -7,217 +7,217 @@ //! [`spawn`]: crate::pool::spawn //! [`Worker`]: crate::run_queue::Worker -use crate::thread_manager::{DynamicPoolManager, DynamicRunner}; -use crate::worker; -use crossbeam_channel::{unbounded, Receiver, Sender}; -use lazy_static::lazy_static; +use std::cell::Cell; +use crate::thread_manager::{ThreadManager, DynamicRunner}; use lightproc::lightproc::LightProc; use lightproc::recoverable_handle::RecoverableHandle; -use once_cell::sync::{Lazy, OnceCell}; use std::future::Future; use std::iter::Iterator; +use std::marker::PhantomData; +use std::mem::MaybeUninit; +use std::sync::Arc; use std::time::Duration; -use std::{env, thread}; -use tracing::trace; +use crossbeam_deque::{Injector, Stealer}; +use crate::run::block; +use crate::worker::{Sleeper, WorkerThread}; -/// -/// Spawn a process (which contains future + process stack) onto the executor from the global level. -/// -/// # Example -/// ```rust -/// use executor::prelude::*; -/// -/// # #[cfg(feature = "tokio-runtime")] -/// # #[tokio::main] -/// # async fn main() { -/// # start(); -/// # } -/// # -/// # #[cfg(not(feature = "tokio-runtime"))] -/// # fn main() { -/// # start(); -/// # } -/// # -/// # fn start() { -/// -/// let handle = spawn( -/// async { -/// panic!("test"); -/// }, -/// ); -/// -/// run( -/// async { -/// handle.await; -/// }, -/// ProcStack { }, -/// ); -/// # } -/// ``` -pub fn spawn(future: F) -> RecoverableHandle -where - F: Future + Send + 'static, - R: Send + 'static, -{ - let (task, handle) = LightProc::recoverable(future, worker::schedule); - task.schedule(); - handle +#[derive(Debug)] +struct Spooler<'a> { + pub spool: Arc>, + threads: &'a ThreadManager, + _marker: PhantomData<&'a ()>, } -/// Spawns a blocking task. -/// -/// The task will be spawned onto a thread pool specifically dedicated to blocking tasks. -pub fn spawn_blocking(future: F) -> RecoverableHandle -where - F: Future + Send + 'static, - R: Send + 'static, -{ - let (task, handle) = LightProc::recoverable(future, schedule); - task.schedule(); - handle +impl Spooler<'_> { + pub fn new() -> Self { + let spool = Arc::new(Injector::new()); + let threads = Box::leak(Box::new( + ThreadManager::new(2, AsyncRunner, spool.clone()))); + threads.initialize(); + Self { spool, threads, _marker: PhantomData } + } } -/// -/// Acquire the static Pool reference -#[inline] -pub fn get() -> &'static Pool { - &*POOL +#[derive(Clone, Debug)] +pub struct Executor<'a> { + spooler: Arc>, } -pub fn get_manager() -> Option<&'static DynamicPoolManager> { - DYNAMIC_POOL_MANAGER.get() -} +impl<'a, 'executor: 'a> Executor<'executor> { + pub fn new() -> Self { + Executor { + spooler: Arc::new(Spooler::new()), + } + } + + fn schedule(&self) -> impl Fn(LightProc) + 'a { + let task_queue = self.spooler.spool.clone(); + move |lightproc: LightProc| { + task_queue.push(lightproc) + } + } -impl Pool { /// - /// Spawn a process (which contains future + process stack) onto the executor via [Pool] interface. + /// Spawn a process (which contains future + process stack) onto the executor from the global level. + /// + /// # Example + /// ```rust + /// use executor::prelude::*; + /// + /// # #[cfg(feature = "tokio-runtime")] + /// # #[tokio::main] + /// # async fn main() { + /// # start(); + /// # } + /// # + /// # #[cfg(not(feature = "tokio-runtime"))] + /// # fn main() { + /// # start(); + /// # } + /// # + /// # fn start() { + /// + /// let executor = Spooler::new(); + /// + /// let handle = executor.spawn( + /// async { + /// panic!("test"); + /// }, + /// ); + /// + /// executor.run( + /// async { + /// handle.await; + /// } + /// ); + /// # } + /// ``` pub fn spawn(&self, future: F) -> RecoverableHandle - where - F: Future + Send + 'static, - R: Send + 'static, + where + F: Future + Send + 'a, + R: Send + 'a, { - let (task, handle) = LightProc::recoverable(future, worker::schedule); + let (task, handle) = + LightProc::recoverable(future, self.schedule()); task.schedule(); handle } -} -/// Enqueues work, attempting to send to the thread pool in a -/// nonblocking way and spinning up needed amount of threads -/// based on the previous statistics without relying on -/// if there is not a thread ready to accept the work or not. -pub(crate) fn schedule(t: LightProc) { - if let Err(err) = POOL.sender.try_send(t) { - // We were not able to send to the channel without - // blocking. - POOL.sender.send(err.into_inner()).unwrap(); - } - // Add up for every incoming scheduled task - DYNAMIC_POOL_MANAGER.get().unwrap().increment_frequency(); -} - -/// -/// Low watermark value, defines the bare minimum of the pool. -/// Spawns initial thread set. -/// Can be configurable with env var `BASTION_BLOCKING_THREADS` at runtime. -#[inline] -fn low_watermark() -> &'static u64 { - lazy_static! { - static ref LOW_WATERMARK: u64 = { - env::var_os("BASTION_BLOCKING_THREADS") - .map(|x| x.to_str().unwrap().parse::().unwrap()) - .unwrap_or(DEFAULT_LOW_WATERMARK) - }; + pub fn spawn_local(&self, future: F) -> RecoverableHandle + where + F: Future + 'a, + R: Send + 'a, + { + let (task, handle) = + LightProc::recoverable(future, schedule_local()); + task.schedule(); + handle } - &*LOW_WATERMARK -} + /// Block the calling thread until the given future completes. + /// + /// # Example + /// ```rust + /// use executor::prelude::*; + /// use lightproc::prelude::*; + /// + /// let executor = Spooler::new(); + /// + /// let mut sum = 0; + /// + /// executor.run( + /// async { + /// (0..10_000_000).for_each(|_| { + /// sum += 1; + /// }); + /// } + /// ); + /// ``` + pub fn run(&self, future: F) -> R + where + F: Future, + { + unsafe { + // An explicitly uninitialized `R`. Until `assume_init` is called this will not call any + // drop code for R + let mut out = MaybeUninit::uninit(); -/// If low watermark isn't configured this is the default scaler value. -/// This value is used for the heuristics of the scaler -const DEFAULT_LOW_WATERMARK: u64 = 2; + // Wrap the future into one that stores the result into `out`. + let future = { + let out = out.as_mut_ptr(); -/// Pool interface between the scheduler and thread pool -#[derive(Debug)] -pub struct Pool { - sender: Sender, - receiver: Receiver, + async move { + *out = future.await; + } + }; + + // Pin the future onto the stack. + pin_utils::pin_mut!(future); + + // Block on the future and and wait for it to complete. + block(future); + + // Assume that if the future completed and didn't panic it fully initialized its output + out.assume_init() + } + } } #[derive(Debug)] -pub struct AsyncRunner { - -} +struct AsyncRunner; impl DynamicRunner for AsyncRunner { - fn run_static(&self, park_timeout: Duration) -> ! { - loop { - for task in &POOL.receiver { - trace!("static: running task"); - self.run(task); - } + fn setup(task_queue: Arc>) -> Sleeper { + let (worker, sleeper) = WorkerThread::new(task_queue); + install_worker(worker); - trace!("static: empty queue, parking with timeout"); - thread::park_timeout(park_timeout); - } + sleeper } - fn run_dynamic(&self, parker: impl Fn()) -> ! { - loop { - while let Ok(task) = POOL.receiver.try_recv() { - trace!("dynamic thread: running task"); - self.run(task); - } - trace!( - "dynamic thread: parking - {:?}", - std::thread::current().id() - ); - parker(); - } + + fn run_static<'b>(fences: impl Iterator>, park_timeout: Duration) -> ! { + let worker = get_worker(); + worker.run_timeout(fences, park_timeout) } - fn run_standalone(&self) { - while let Ok(task) = POOL.receiver.try_recv() { - self.run(task); - } - trace!("standalone thread: quitting."); + + fn run_dynamic<'b>(fences: impl Iterator>) -> ! { + let worker = get_worker(); + worker.run(fences) + } + + fn run_standalone<'b>(fences: impl Iterator>) { + let worker = get_worker(); + worker.run_once(fences) } } -impl AsyncRunner { - fn run(&self, task: LightProc) { - task.run(); - } +thread_local! { + static WORKER: Cell>> = Cell::new(None); } -static DYNAMIC_POOL_MANAGER: OnceCell> = OnceCell::new(); - -static POOL: Lazy = Lazy::new(|| { - #[cfg(feature = "tokio-runtime")] - { - let runner = AsyncRunner { - // We use current() here instead of try_current() - // because we want bastion to crash as soon as possible - // if there is no available runtime. - runtime_handle: tokio::runtime::Handle::current(), +fn get_worker() -> &'static WorkerThread<'static, LightProc> { + WORKER.with(|cell| { + let worker = unsafe { + &*cell.as_ptr() as &'static Option> }; + worker.as_ref() + .expect("AsyncRunner running outside Executor context") + }) +} - DYNAMIC_POOL_MANAGER - .set(DynamicPoolManager::new(*low_watermark() as usize, runner)) - .expect("couldn't create dynamic pool manager"); +fn install_worker(worker_thread: WorkerThread<'static, LightProc>) { + WORKER.with(|cell| { + cell.replace(Some(worker_thread)); + }); +} + +fn schedule_local() -> impl Fn(LightProc) { + let worker = get_worker(); + let unparker = worker.unparker().clone(); + move |lightproc| { + // This is safe because we never replace the value in that Cell and thus never drop the + // SharedWorker pointed to. + worker.schedule_local(lightproc); + // We have to unpark the worker thread for our task to be run. + unparker.unpark(); } - #[cfg(not(feature = "tokio-runtime"))] - { - let runner = AsyncRunner {}; - - DYNAMIC_POOL_MANAGER - .set(DynamicPoolManager::new(*low_watermark() as usize, runner)) - .expect("couldn't create dynamic pool manager"); - } - - DYNAMIC_POOL_MANAGER - .get() - .expect("couldn't get static pool manager") - .initialize(); - - let (sender, receiver) = unbounded(); - Pool { sender, receiver } -}); +} \ No newline at end of file diff --git a/runtime/executor/src/run.rs b/runtime/executor/src/run.rs index 0f23bcd..4e29887 100644 --- a/runtime/executor/src/run.rs +++ b/runtime/executor/src/run.rs @@ -1,73 +1,15 @@ //! //! Blocking run of the async processes //! -//! -use crate::worker; + use crossbeam_utils::sync::{Parker, Unparker}; use std::cell::Cell; use std::future::Future; -use std::mem; -use std::mem::{ManuallyDrop, MaybeUninit}; -use std::pin::Pin; +use std::mem::ManuallyDrop; +use std::ops::Deref; use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -use crate::proc_stack::ProcStack; -/// -/// This method blocks the current thread until passed future is resolved with an output. -/// -/// It is called `block_on` or `blocking` in some executors. -/// -/// # Example -/// ```rust -/// use executor::prelude::*; -/// use lightproc::prelude::*; -/// let mut sum = 0; -/// -/// run( -/// async { -/// (0..10_000_000).for_each(|_| { -/// sum += 1; -/// }); -/// }, -/// ProcStack::default(), -/// ); -/// ``` -pub fn run(future: F, stack: ProcStack) -> T -where - F: Future, -{ - unsafe { - // An explicitly uninitialized `T`. Until `assume_init` is called this will not call any - // drop code for T - let mut out = MaybeUninit::uninit(); - - // Wrap the future into one that stores the result into `out`. - let future = { - let out = out.as_mut_ptr(); - - async move { - *out = future.await; - } - }; - - // Pin the future onto the stack. - pin_utils::pin_mut!(future); - - // Extend the lifetime of the future to 'static. - let future = mem::transmute::< - Pin<&'_ mut dyn Future>, - Pin<&'static mut dyn Future>, - >(future); - - // Block on the future and and wait for it to complete. - worker::set_stack(&stack, || block(future)); - - // Assume that if the future completed and didn't panic it fully initialized its output - out.assume_init() - } -} - -fn block(f: F) -> T +pub(crate) fn block(f: F) -> T where F: Future, { @@ -116,9 +58,10 @@ fn vtable() -> &'static RawWakerVTable { /// original RawWaker. unsafe fn clone_raw(ptr: *const ()) -> RawWaker { // [`Unparker`] implements `Clone` and upholds the contract stated above. The current - // Implementation is simply an Arc over the actual inner values. - let unparker = Unparker::from_raw(ptr).clone(); - RawWaker::new(Unparker::into_raw(unparker), vtable()) + // Implementation is simply an Arc over the actual inner values. However clone takes the + // original value by reference so we need to make sure to not drop it. + let unparker = ManuallyDrop::new(Unparker::from_raw(ptr)); + RawWaker::new(Unparker::into_raw(unparker.deref().clone()), vtable()) } /// This function will be called when wake is called on the Waker. It must wake up the task diff --git a/runtime/executor/src/thread_manager.rs b/runtime/executor/src/thread_manager.rs index ba76d2a..6870f0c 100644 --- a/runtime/executor/src/thread_manager.rs +++ b/runtime/executor/src/thread_manager.rs @@ -52,22 +52,21 @@ use fmt::{Debug, Formatter}; use lazy_static::lazy_static; use lever::prelude::TTas; use placement::CoreId; -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use std::time::Duration; use std::{ sync::{ atomic::{AtomicU64, Ordering}, Mutex, }, - thread::{self, Thread}, + thread, }; -use std::any::Any; -use std::panic::resume_unwind; -use std::thread::{JoinHandle, ThreadId}; -use crossbeam_deque::Worker; -use crossbeam_utils::sync::{Parker, Unparker}; +use std::sync::{Arc, RwLock}; +use crossbeam_channel::bounded; +use crossbeam_deque::{Injector, Stealer}; use tracing::{debug, trace}; use lightproc::lightproc::LightProc; +use crate::worker::Sleeper; /// The default thread park timeout before checking for new tasks. const THREAD_PARK_TIMEOUT: Duration = Duration::from_millis(1); @@ -113,16 +112,16 @@ lazy_static! { /// run_standalone should return once it has no more tasks to process. /// The `DynamicPoolManager` will spawn other standalone threads if needs be. pub trait DynamicRunner { - fn run_static(&self, park_timeout: Duration) -> ! { - let parker = Parker::new(); - self.run_dynamic(|| parker.park_timeout(park_timeout)); - } - fn run_dynamic(&self, parker: impl Fn()) -> !; - fn run_standalone(&self); + fn setup(task_queue: Arc>) -> Sleeper; + + fn run_static<'b>(fences: impl Iterator>, + park_timeout: Duration) -> !; + fn run_dynamic<'b>(fences: impl Iterator>) -> !; + fn run_standalone<'b>(fences: impl Iterator>); } -/// The `DynamicPoolManager` is responsible for -/// growing and shrinking a pool according to EMA rules. +/// The `ThreadManager` is creates and destroys worker threads depending on demand according to +/// EMA rules. /// /// It needs to be passed a structure that implements `DynamicRunner`, /// That will be responsible for actually spawning threads. @@ -159,21 +158,38 @@ pub trait DynamicRunner { /// /// If you use tracing, you can have a look at the trace! logs generated by the structure. /// -pub struct DynamicPoolManager { +pub struct ThreadManager { static_threads: usize, dynamic_threads: usize, - parked_threads: ArrayQueue, + parked_threads: ArrayQueue>, + + task_queue: Arc>, + fences: Arc>>>, + runner: Runner, last_frequency: AtomicU64, frequencies: TTas>, } -impl Debug for DynamicPoolManager { +impl Debug for ThreadManager { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + struct ThreadCount<'a>(&'a usize, &'a usize, &'a usize); + impl<'a> Debug for ThreadCount<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("") + .field("static", self.0) + .field("dynamic", self.1) + .field("parked", self.2) + .finish() + } + } + fmt.debug_struct("DynamicPoolManager") - .field("static_threads", &self.static_threads) - .field("dynamic_threads", &self.dynamic_threads) - .field("parked_threads", &self.parked_threads.len()) + .field("thread pool", &ThreadCount( + &self.static_threads, + &self.dynamic_threads, + &self.parked_threads.len(), + )) .field("runner", &self.runner) .field("last_frequency", &self.last_frequency) .field("frequencies", &self.frequencies.try_lock()) @@ -181,14 +197,20 @@ impl Debug for DynamicPoolManager { } } -impl DynamicPoolManager { - pub fn new(static_threads: usize, runner: Runner) -> Self { +impl ThreadManager { + pub fn new(static_threads: usize, runner: Runner, task_queue: Arc>) -> Self { let dynamic_threads = 1.max(num_cpus::get().checked_sub(static_threads).unwrap_or(0)); + let parked_threads = ArrayQueue::new(1.max(static_threads + dynamic_threads)); + let fences = Arc::new(RwLock::new(Vec::new())); Self { static_threads, dynamic_threads, - parked_threads: ArrayQueue::new(dynamic_threads), + parked_threads, + + task_queue, + fences, + runner, last_frequency: AtomicU64::new(0), frequencies: TTas::new(VecDeque::with_capacity( @@ -203,42 +225,79 @@ impl DynamicPoolManager { /// Initialize the dynamic pool /// That will be scaled - pub fn initialize(&'static self) { + pub(crate) fn initialize(&'static self) { + let (tx, rx) = bounded(self.static_threads + self.dynamic_threads); + + let fencelock = &self.fences; + let _guard = fencelock.write().unwrap(); + + let mut i = 0; + // Static thread manager that will always be available - trace!("spooling up {} static worker threads", self.static_threads); - (0..self.static_threads).for_each(|n| { - let runner = &self.runner; + debug!("spooling up {} static worker threads", self.static_threads); + (0..self.static_threads).for_each(|_| { + let tx = tx.clone(); + let fencelock = fencelock.clone(); + let task_queue = self.task_queue.clone(); thread::Builder::new() - .name(format!("static #{}", n)) + .name(format!("rt({}) [static]", i)) .spawn(move || { Self::affinity_pinner(); - runner.run_static(THREAD_PARK_TIMEOUT); + + let sleeper = Runner::setup(task_queue); + tx.send(sleeper).expect("Failed to push to parked_threads"); + drop(tx); + + let fencelock = fencelock.clone(); + let fences = fencelock.read().unwrap(); + + Runner::run_static(fences.iter(), THREAD_PARK_TIMEOUT); }) .expect("failed to spawn static worker thread"); + i += 1; }); // Dynamic thread manager that will allow us to unpark threads when needed - trace!("spooling up {} dynamic worker threads", self.dynamic_threads); - (0..self.dynamic_threads).for_each(|n| { - let runner = &self.runner; + debug!("spooling up {} dynamic worker threads", self.dynamic_threads); + (0..self.dynamic_threads).for_each(|_| { + let tx = tx.clone(); + let fencelock = fencelock.clone(); + let task_queue = self.task_queue.clone(); thread::Builder::new() - .name(format!("dynamic #{}", n)) + .name(format!("rt({}) [dyn]", i)) .spawn(move || { Self::affinity_pinner(); - let parker = Parker::new(); - let unparker = parker.unparker(); - runner.run_dynamic(|| self.park_thread(&parker, unparker)); + + let sleeper = Runner::setup(task_queue); + tx.send(sleeper).expect("Failed to push to parked_threads"); + drop(tx); + + let fences = fencelock.read().unwrap(); + + Runner::run_dynamic(fences.iter()); }) .expect("failed to spawn dynamic worker thread"); + i += 1; }); + drop(tx); + + let span = tracing::span!(tracing::Level::INFO, "sleepers").entered(); + tracing::info!("Spawned {} threads", i); + for _ in 0..i { + let sleeper = rx.recv().unwrap(); + tracing::info!("{:?}", &sleeper); + self.parked_threads.push(sleeper).unwrap(); + } + span.exit(); + // Pool manager to check frequency of task rates // and take action by scaling the pool accordingly. thread::Builder::new() .name("pool manager".to_string()) .spawn(move || { let poll_interval = Duration::from_millis(SCALER_POLL_INTERVAL); - trace!("setting up the pool manager"); + debug!("setting up the pool manager"); loop { self.scale_pool(); thread::park_timeout(poll_interval); @@ -249,56 +308,46 @@ impl DynamicPoolManager { /// Provision threads takes a number of threads that need to be made available. /// It will try to unpark threads from the dynamic pool, and spawn more threads if needs be. - pub fn provision_threads(&'static self, n: usize) { - for i in 0..n { - if !self.unpark_thread() { - let new_threads = n - i; - trace!( - "no more threads to unpark, spawning {} new threads", - new_threads - ); - return self.spawn_threads(new_threads); - } + pub fn provision_threads(&'static self, + n: usize, + fencelock: &Arc>>>) + { + let rem = self.unpark_thread(n); + if rem != 0 { + debug!("no more threads to unpark, spawning {} new threads", rem); + //self.spawn_threads(rem, fencelock); } } - fn spawn_threads(&'static self, n: usize) { + fn spawn_threads(&'static self, n: usize, fencelock: &Arc>>>) { (0..n).for_each(|_| { - let runner = &self.runner; + let fencelock = fencelock.clone(); + let task_queue = self.task_queue.clone(); thread::Builder::new() .name("standalone worker".to_string()) .spawn(move || { Self::affinity_pinner(); - runner.run_standalone(); + let _ = Runner::setup(task_queue); + let fences = fencelock.read().unwrap(); + Runner::run_standalone(fences.iter()); }) .unwrap(); }) } - /// Parks a thread until [`unpark_thread`] unparks it - pub fn park_thread(&self, parker: &Parker, unparker: &Unparker) { - if let Err(unparker) = self.parked_threads - // Unparker is an Arc internally so this is (comparatively) cheap to do. - .push(unparker.clone()) { - panic!("Failed to park with {:?}", unparker); + /// Tries to unpark the given number of threads. + /// Returns `num - (number of threads unparked)` + fn unpark_thread(&self, num: usize) -> usize { + let len = self.parked_threads.len(); + debug!("parked_threads: len is {}", len); + // Only check threads once + for _ in 0..len { + if let Some(thread) = self.parked_threads.pop() { + thread.wakeup(); + } } - trace!("parking thread {:?}", std::thread::current().id()); - parker.park(); - } - - /// Pops a thread from the parked_threads queue and unparks it. - /// - /// Returns true if there were threads to unpark - fn unpark_thread(&self) -> bool { - trace!("parked_threads: len is {}", self.parked_threads.len()); - if let Some(unparker) = self.parked_threads.pop() { - debug!("Unparking thread with {:?}", &unparker); - unparker.unpark(); - true - } else { - false - } + num } /// Affinity pinner for blocking pool @@ -372,6 +421,7 @@ impl DynamicPoolManager { // Calculates current time window's EMA value (including last sample) let curr_ema_frequency = Self::calculate_ema(&freq_queue); + trace!("Current EMA freq: {}", curr_ema_frequency); // Adapts the thread count of pool // @@ -389,7 +439,7 @@ impl DynamicPoolManager { trace!("unparking {} threads", scale); // It is time to scale the pool! - self.provision_threads(scale); + self.provision_threads(scale, &self.fences); } else if (curr_ema_frequency - prev_ema_frequency).abs() < f64::EPSILON && current_frequency != 0 { @@ -398,7 +448,7 @@ impl DynamicPoolManager { // For unblock the flow we should add up some threads to the pool, but not that many to // stagger the program's operation. trace!("unparking {} threads", DEFAULT_LOW_WATERMARK); - self.provision_threads(DEFAULT_LOW_WATERMARK as usize); + self.provision_threads(DEFAULT_LOW_WATERMARK as usize, &self.fences); } } } diff --git a/runtime/executor/src/worker.rs b/runtime/executor/src/worker.rs index e69de29..8d10531 100644 --- a/runtime/executor/src/worker.rs +++ b/runtime/executor/src/worker.rs @@ -0,0 +1,174 @@ +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::Duration; +use crossbeam_deque::{Injector, Steal, Stealer, Worker}; +use crossbeam_queue::SegQueue; +use crossbeam_utils::sync::{Parker, Unparker}; +use lightproc::prelude::LightProc; + +pub trait Runnable { + fn run(self); +} +impl Runnable for LightProc { + fn run(self) { + LightProc::run(self) + } +} + +#[derive(Debug)] +/// A thread worker pulling tasks from a shared injector queue and executing them +pub(crate) struct WorkerThread<'a, Task> { + /// Shared task queue + task_queue: Arc>, + + /// This threads task queue. For efficiency reasons worker threads pull a batch of tasks + /// from the injector queue and work on them instead of pulling them one by one. Should the + /// global queue become empty worker threads can steal tasks from each other. + tasks: Worker, + + /// Queue of `!Send` tasks that have to be entirely ran on this thread and must not be moved + /// or stolen to other threads. + local_tasks: SegQueue, + + /// Thread parker. + /// + /// A worker thread will park when there is no more work it can do. Work threads can be + /// unparked by either a local task being woken up or by the Executor owning the Injector queue. + parker: Parker, + + _marker: PhantomData<&'a ()>, +} + +#[derive(Debug)] +pub struct Sleeper { + stealer: Stealer, + unparker: Unparker, +} + +impl Sleeper { + pub fn wakeup(&self) { + self.unparker.unpark(); + } +} + +impl<'a, T: Runnable + 'a> WorkerThread<'a, T> { + pub fn new(task_queue: Arc>) -> (WorkerThread<'a, T>, Sleeper) { + let tasks: Worker = Worker::new_fifo(); + let stealer = tasks.stealer(); + let local_tasks: SegQueue = SegQueue::new(); + let parker = Parker::new(); + let _marker = PhantomData; + let unparker = parker.unparker().clone(); + + ( + Self { task_queue, tasks, local_tasks, parker, _marker }, + Sleeper { stealer, unparker } + ) + } + + pub fn unparker(&self) -> &Unparker { + self.parker.unparker() + } + + /// Run this worker thread "forever" (i.e. until the thread panics or is otherwise killed) + pub fn run(&self, fences: impl Iterator>) -> ! { + let fences: Vec> = fences + .map(|stealer| stealer.clone()) + .collect(); + + loop { + self.run_inner(&fences); + self.parker.park(); + } + } + + pub fn run_timeout(&self, fences: impl Iterator>, timeout: Duration) -> ! { + let fences: Vec> = fences + .map(|stealer| stealer.clone()) + .collect(); + + loop { + self.run_inner(&fences); + self.parker.park_timeout(timeout); + } + } + + pub fn run_once(&self, fences: impl Iterator>) { + let fences: Vec> = fences + .map(|stealer| stealer.clone()) + .collect(); + + self.run_inner(fences); + } + + fn run_inner]>>(&self, fences: F) { + // Continue working until there is no work to do. + 'work: while { + // Always run local tasks first since they can't be done by anybody else. + if let Some(task) = self.local_tasks.pop() { + task.run(); + continue 'work; + } else if let Some(task) = self.tasks.pop() { + task.run(); + continue 'work; + } else { + // If we were woken up by the global scheduler `should_steal` is set to true, + // so we now try to clean out. + + // First try to take work from the global queue. + let mut i = 0; + loop { + match self.task_queue.steal_batch_and_pop(&self.tasks) { + // If we could steal from the global queue do more work. + Steal::Success(task) => { + task.run(); + continue 'work; + }, + + // If there is no more work to steal from the global queue, try other + // workers next + Steal::Empty => break, + + // If a race condition occurred try again with backoff + Steal::Retry => for _ in 0..(1 << i) { + core::hint::spin_loop(); + i += 1; + }, + } + } + + // If the global queue is empty too, steal from the thread with the most work. + // This is only None when there are no stealers installed which, given that we + // exist, *should* never be the case. + while let Some(fence) = select_fence(fences.as_ref().iter()) { + match fence.steal_batch_and_pop(&self.tasks) { + Steal::Success(task) => { + task.run(); + continue 'work; + }, + + // If no other worker has work to do we're done once again. + Steal::Empty => break, + + // If another worker is currently stealing chances are that the + // current `stealer` will not have the most task afterwards so we do + // want to do the maths regarding that again. + Steal::Retry => core::hint::spin_loop(), + } + } + } + + // If we get here we're done and need to park. + false + } {} + } + + pub fn schedule_local(&self, task: T) { + self.local_tasks.push(task); + } +} + +#[inline(always)] +fn select_fence<'a, T>(fences: impl Iterator>) -> Option<&'a Stealer> { + fences.max_by_key(|fence| fence.len()) +} \ No newline at end of file diff --git a/runtime/executor/tests/run_blocking.rs b/runtime/executor/tests/run_blocking.rs index 6f79295..d276d72 100644 --- a/runtime/executor/tests/run_blocking.rs +++ b/runtime/executor/tests/run_blocking.rs @@ -1,8 +1,8 @@ -use bastion_executor::blocking; -use bastion_executor::run::run; -use lightproc::proc_stack::ProcStack; +use std::io::Write; +use executor::run::run; use std::thread; use std::time::Duration; +use executor::prelude::{ProcStack, spawn}; #[cfg(feature = "tokio-runtime")] mod tokio_tests { @@ -21,18 +21,18 @@ mod no_tokio_tests { } fn run_test() { - let output = run( - blocking::spawn_blocking( - async { - let duration = Duration::from_millis(1); - thread::sleep(duration); - 42 - }, - ProcStack::default(), - ), - ProcStack::default(), - ) - .unwrap(); + let handle = spawn( + async { + let duration = Duration::from_millis(1); + thread::sleep(duration); + //42 + }, + ); - assert_eq!(42, output); + let output = run(handle, ProcStack {}); + + println!("{:?}", output); + std::io::stdout().flush(); + assert!(output.is_some()); + std::thread::sleep(Duration::from_millis(200)); } diff --git a/runtime/executor/tests/thread_pool.rs b/runtime/executor/tests/thread_pool.rs index 8673cda..2a37bd9 100644 --- a/runtime/executor/tests/thread_pool.rs +++ b/runtime/executor/tests/thread_pool.rs @@ -1,11 +1,11 @@ -use bastion_executor::blocking; -use bastion_executor::run::run; -use futures::future::join_all; -use lightproc::proc_stack::ProcStack; +use executor::blocking; +use executor::run::run; +use futures_util::future::join_all; use lightproc::recoverable_handle::RecoverableHandle; use std::thread; use std::time::Duration; use std::time::Instant; +use executor::prelude::ProcStack; // Test for slow joins without task bursts during joins. #[test] @@ -22,12 +22,11 @@ fn slow_join() { let duration = Duration::from_millis(1); thread::sleep(duration); }, - ProcStack::default(), ) }) .collect::>>(); - run(join_all(handles), ProcStack::default()); + run(join_all(handles), ProcStack {}); // Let them join to see how it behaves under different workloads. let duration = Duration::from_millis(thread_join_time_max); @@ -41,12 +40,11 @@ fn slow_join() { let duration = Duration::from_millis(100); thread::sleep(duration); }, - ProcStack::default(), ) }) .collect::>>(); - run(join_all(handles), ProcStack::default()); + run(join_all(handles), ProcStack {}); // Slow joins shouldn't cause internal slow down let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128; @@ -70,12 +68,11 @@ fn slow_join_interrupted() { let duration = Duration::from_millis(1); thread::sleep(duration); }, - ProcStack::default(), ) }) .collect::>>(); - run(join_all(handles), ProcStack::default()); + run(join_all(handles), ProcStack {}); // Let them join to see how it behaves under different workloads. // This time join under the time window. @@ -90,12 +87,11 @@ fn slow_join_interrupted() { let duration = Duration::from_millis(100); thread::sleep(duration); }, - ProcStack::default(), ) }) .collect::>>(); - run(join_all(handles), ProcStack::default()); + run(join_all(handles), ProcStack {}); // Slow joins shouldn't cause internal slow down let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128; @@ -120,7 +116,6 @@ fn longhauling_task_join() { let duration = Duration::from_millis(1000); thread::sleep(duration); }, - ProcStack::default(), ) }) .collect::>>(); @@ -137,12 +132,11 @@ fn longhauling_task_join() { let duration = Duration::from_millis(100); thread::sleep(duration); }, - ProcStack::default(), ) }) .collect::>>(); - run(join_all(handles), ProcStack::default()); + run(join_all(handles), ProcStack {}); // Slow joins shouldn't cause internal slow down let elapsed = start.elapsed().as_millis() - thread_join_time_max as u128; diff --git a/runtime/lightproc/examples/proc_run.rs b/runtime/lightproc/examples/proc_run.rs index e8fb87f..572632f 100644 --- a/runtime/lightproc/examples/proc_run.rs +++ b/runtime/lightproc/examples/proc_run.rs @@ -4,23 +4,21 @@ use lightproc::prelude::*; use std::future::Future; use std::sync::Arc; use std::thread; +use std::thread::JoinHandle; use std::time::Duration; -fn spawn_on_thread(fut: F) -> ProcHandle +fn spawn_on_thread(fut: F) -> (JoinHandle<()>, ProcHandle) where F: Future + Send + 'static, R: Send + 'static, { let (sender, receiver) = channel::unbounded(); - let sender = Arc::new(sender); - let s = Arc::downgrade(&sender); let future = async move { - let _ = sender; fut.await }; - let schedule = move |t| s.upgrade().unwrap().send(t).unwrap(); + let schedule = move |t| sender.send(t).unwrap(); let (proc, handle) = LightProc::build( future, schedule, @@ -28,19 +26,30 @@ where proc.schedule(); - thread::spawn(move || { + let join = thread::spawn(move || { for proc in receiver { + println!("Got a task: {:?}", proc); proc.run(); } }); - handle + (join, handle) } fn main() { - executor::block_on(spawn_on_thread(async { + let (join, handle) = spawn_on_thread(async { println!("Sleeping!"); - async_std::task::sleep(Duration::from_secs(1)).await; - println!("Done sleeping"); - })); + async_std::task::sleep(Duration::from_millis(100)).await; + println!("Done sleeping 1"); + async_std::task::sleep(Duration::from_millis(100)).await; + println!("Done sleeping 2"); + async_std::task::sleep(Duration::from_millis(100)).await; + println!("Done sleeping 3"); + async_std::task::sleep(Duration::from_millis(100)).await; + println!("Done sleeping 4"); + return 32; + }); + let output = executor::block_on(handle); + assert_eq!(output, Some(32)); + assert!(join.join().is_ok()); } diff --git a/runtime/lightproc/src/lightproc.rs b/runtime/lightproc/src/lightproc.rs index e271a7a..20f9bc5 100644 --- a/runtime/lightproc/src/lightproc.rs +++ b/runtime/lightproc/src/lightproc.rs @@ -76,10 +76,10 @@ impl LightProc { /// println!("future panicked!: {}", &reason); /// }); /// ``` - pub fn recoverable(future: F, schedule: S) -> (Self, RecoverableHandle) - where F: Future + 'static, - R: 'static, - S: Fn(LightProc) + 'static, + pub fn recoverable<'a, F, R, S>(future: F, schedule: S) -> (Self, RecoverableHandle) + where F: Future + 'a, + R: 'a, + S: Fn(LightProc) + 'a, { let recovery_future = AssertUnwindSafe(future).catch_unwind(); let (proc, handle) = Self::build(recovery_future, schedule); @@ -114,10 +114,10 @@ impl LightProc { /// schedule_function, /// ); /// ``` - pub fn build(future: F, schedule: S) -> (Self, ProcHandle) - where F: Future + 'static, - R: 'static, - S: Fn(LightProc) + 'static, + pub fn build<'a, F, R, S>(future: F, schedule: S) -> (Self, ProcHandle) + where F: Future + 'a, + R: 'a, + S: Fn(LightProc) + 'a, { let raw_proc = RawProc::allocate(future, schedule); let proc = LightProc { raw_proc }; diff --git a/runtime/lightproc/src/proc_data.rs b/runtime/lightproc/src/proc_data.rs index 9c9b7c6..651fa64 100644 --- a/runtime/lightproc/src/proc_data.rs +++ b/runtime/lightproc/src/proc_data.rs @@ -37,14 +37,16 @@ impl ProcData { loop { // If the proc has been completed or closed, it can't be cancelled. - if state.intersects(COMPLETED | CLOSED) { + if state.get_flags().intersects(COMPLETED | CLOSED) { break; } + let (flags, references) = state.parts(); + let new = State::new(flags | CLOSED, references); // Mark the proc as closed. match self.state.compare_exchange_weak( - state.into(), - (state | CLOSED).into(), + state, + new, Ordering::AcqRel, Ordering::Acquire, ) { @@ -96,9 +98,9 @@ impl ProcData { loop { // Acquire the lock. If we're storing an awaiter, then also set the awaiter flag. let state = if new_is_none { - self.state.fetch_or(LOCKED.into(), Ordering::Acquire) + self.state.fetch_or(LOCKED, Ordering::Acquire) } else { - self.state.fetch_or((LOCKED | AWAITER).into(), Ordering::Acquire) + self.state.fetch_or(LOCKED | AWAITER, Ordering::Acquire) }; // If the lock was acquired, break from the loop. diff --git a/runtime/lightproc/src/proc_handle.rs b/runtime/lightproc/src/proc_handle.rs index 46c006f..f2144e6 100644 --- a/runtime/lightproc/src/proc_handle.rs +++ b/runtime/lightproc/src/proc_handle.rs @@ -52,15 +52,16 @@ impl ProcHandle { loop { // If the proc has been completed or closed, it can't be cancelled. - if state.intersects(COMPLETED | CLOSED) { + if state.get_flags().intersects(COMPLETED | CLOSED) { break; } // If the proc is not scheduled nor running, we'll need to schedule it. - let new = if state.intersects(SCHEDULED | RUNNING) { - (state | SCHEDULED | CLOSED) + 1 + let (flags, references) = state.parts(); + let new = if flags.intersects(SCHEDULED | RUNNING) { + State::new(flags | SCHEDULED | CLOSED, references + 1) } else { - state | CLOSED + State::new(flags | CLOSED, references) }; // Mark the proc as closed. @@ -73,7 +74,7 @@ impl ProcHandle { Ok(_) => { // If the proc is not scheduled nor running, schedule it so that its future // gets dropped by the executor. - if !state.intersects(SCHEDULED | RUNNING) { + if !state.get_flags().intersects(SCHEDULED | RUNNING) { ((*pdata).vtable.schedule)(ptr); } @@ -142,9 +143,11 @@ impl Future for ProcHandle { } // Since the proc is now completed, mark it as closed in order to grab its output. + let (flags, references) = state.parts(); + let new = State::new(flags | CLOSED, references); match (*pdata).state.compare_exchange( state, - state | CLOSED, + new, Ordering::AcqRel, Ordering::Acquire, ) { @@ -190,8 +193,8 @@ impl Drop for ProcHandle { // proc. This is a common case so if the handle is not used, the overhead of it is only // one compare-exchange operation. if let Err(mut state) = (*pdata).state.compare_exchange_weak( - SCHEDULED | HANDLE | REFERENCE, - SCHEDULED | REFERENCE, + State::new(SCHEDULED | HANDLE, 1), + State::new(SCHEDULED, 1), Ordering::AcqRel, Ordering::Acquire, ) { @@ -200,9 +203,10 @@ impl Drop for ProcHandle { // must be dropped. if state.is_completed() && !state.is_closed() { // Mark the proc as closed in order to grab its output. + let (flags, references) = state.parts(); match (*pdata).state.compare_exchange_weak( state, - state | CLOSED, + State::new(flags | CLOSED, references), Ordering::AcqRel, Ordering::Acquire, ) { @@ -211,7 +215,7 @@ impl Drop for ProcHandle { output = Some((((*pdata).vtable.get_output)(ptr) as *mut R).read()); // Update the state variable because we're continuing the loop. - state |= CLOSED; + state = State::new(flags | CLOSED, references); } Err(s) => state = s, } @@ -220,9 +224,10 @@ impl Drop for ProcHandle { // close it and schedule one more time so that its future gets dropped by // the executor. let new = if state.get_refcount() == 0 && !state.is_closed() { - SCHEDULED | CLOSED | REFERENCE + State::new(SCHEDULED | CLOSED, 1) } else { - state & !HANDLE + let (flags, references) = state.parts(); + State::new(flags & !HANDLE, references) }; // Unset the handle flag. diff --git a/runtime/lightproc/src/raw_proc.rs b/runtime/lightproc/src/raw_proc.rs index 8334bca..034dd95 100644 --- a/runtime/lightproc/src/raw_proc.rs +++ b/runtime/lightproc/src/raw_proc.rs @@ -8,6 +8,7 @@ use crate::state::*; use std::alloc::{self, Layout}; use std::cell::Cell; use std::future::Future; +use std::marker::PhantomData; use std::mem::{self, ManuallyDrop}; use std::panic::AssertUnwindSafe; use std::pin::Pin; @@ -17,18 +18,21 @@ use std::sync::atomic::Ordering; use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; /// Raw pointers to the fields of a proc. -pub(crate) struct RawProc { +pub(crate) struct RawProc<'a, F, R, S> { pub(crate) pdata: *const ProcData, pub(crate) schedule: *const S, pub(crate) future: *mut F, pub(crate) output: *mut R, + + // Make the lifetime 'a of the future invariant + _marker: PhantomData<&'a ()>, } -impl RawProc +impl<'a, F, R, S> RawProc<'a, F, R, S> where - F: Future + 'static, - R: 'static, - S: Fn(LightProc) + 'static, + F: Future + 'a, + R: 'a, + S: Fn(LightProc) + 'a, { /// Allocates a proc with the given `future` and `schedule` function. /// @@ -46,10 +50,11 @@ where let raw = Self::from_ptr(raw_proc.as_ptr()); + let state = AtomicState::new(State::new(SCHEDULED | HANDLE, 1)); // Write the pdata as the first field of the proc. (raw.pdata as *mut ProcData).write(ProcData { - state: AtomicState::new(SCHEDULED | HANDLE | REFERENCE), + state, awaiter: Cell::new(None), vtable: &ProcVTable { raw_waker: RawWakerVTable::new( @@ -115,6 +120,7 @@ where schedule: p.add(proc_layout.offset_schedule) as *const S, future: p.add(proc_layout.offset_future) as *mut F, output: p.add(proc_layout.offset_output) as *mut R, + _marker: PhantomData, } } } @@ -127,7 +133,7 @@ where loop { // If the proc is completed or closed, it can't be woken. - if state.intersects(COMPLETED | CLOSED) { + if state.get_flags().intersects(COMPLETED | CLOSED) { // Drop the waker. Self::decrement(ptr); break; @@ -138,8 +144,8 @@ where if state.is_scheduled() { // Update the state without actually modifying it. match (*raw.pdata).state.compare_exchange_weak( - state.into(), - state.into(), + state, + state, Ordering::AcqRel, Ordering::Acquire, ) { @@ -151,10 +157,12 @@ where Err(s) => state = s, } } else { + let (flags, references) = state.parts(); + let new = State::new(flags | SCHEDULED, references); // Mark the proc as scheduled. match (*raw.pdata).state.compare_exchange_weak( state, - state | SCHEDULED, + new, Ordering::AcqRel, Ordering::Acquire, ) { @@ -188,7 +196,7 @@ where loop { // If the proc is completed or closed, it can't be woken. - if state.intersects(COMPLETED | CLOSED) { + if state.get_flags().intersects(COMPLETED | CLOSED) { break; } @@ -206,11 +214,12 @@ where Err(s) => state = s, } } else { + let (flags, references) = state.parts(); // If the proc is not scheduled nor running, we'll need to schedule after waking. - let new = if !state.intersects(SCHEDULED | RUNNING) { - (state | SCHEDULED) + 1 + let new = if !state.get_flags().intersects(SCHEDULED | RUNNING) { + State::new(flags | SCHEDULED, references + 1) } else { - state | SCHEDULED + State::new(flags | SCHEDULED, references) }; // Mark the proc as scheduled. @@ -222,7 +231,7 @@ where ) { Ok(_) => { // If the proc is not scheduled nor running, now is the time to schedule. - if !state.intersects(SCHEDULED | RUNNING) { + if !state.get_flags().intersects(SCHEDULED | RUNNING) { // Schedule the proc. let proc = LightProc { raw_proc: NonNull::new_unchecked(ptr as *mut ()), @@ -248,7 +257,7 @@ where let state = (*raw.pdata).state.fetch_add(1, Ordering::Relaxed); // If the reference count overflowed, abort. - if state.bits() > i64::MAX as u64 { + if state.get_refcount() > i32::MAX as u32 { std::process::abort(); } @@ -264,10 +273,10 @@ where let raw = Self::from_ptr(ptr); // Decrement the reference count. - let mut new = (*raw.pdata) + let new = (*raw.pdata) .state .fetch_sub(1, Ordering::AcqRel); - new.set_refcount(new.get_refcount().saturating_sub(1)); + let new = new.set_refcount(new.get_refcount().saturating_sub(1)); // If this was the last reference to the proc and the `ProcHandle` has been dropped as // well, then destroy the proc. @@ -353,16 +362,17 @@ where return; } + let (flags, references) = state.parts(); // Mark the proc as unscheduled and running. match (*raw.pdata).state.compare_exchange_weak( state, - (state & !SCHEDULED) | RUNNING, + State::new((flags & !SCHEDULED) | RUNNING, references), Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => { - // Update the state because we're continuing with polling the future. - state = (state & !SCHEDULED) | RUNNING; + // Update our local state because we're continuing with polling the future. + state = State::new((flags & !SCHEDULED) | RUNNING, references); break; } Err(s) => state = s, @@ -387,12 +397,14 @@ where // The proc is now completed. loop { + let (flags, references) = state.parts(); // If the handle is dropped, we'll need to close it and drop the output. - let new = if !state.is_handle() { - (state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED + let new_flags = if !state.is_handle() { + (flags & !(RUNNING & SCHEDULED)) | COMPLETED | CLOSED } else { - (state & !RUNNING & !SCHEDULED) | COMPLETED + (flags & !(RUNNING & SCHEDULED)) | COMPLETED }; + let new = State::new(new_flags, references); // Mark the proc as not running and completed. match (*raw.pdata).state.compare_exchange_weak( @@ -430,11 +442,14 @@ where loop { // If the proc was closed while running, we'll need to unschedule in case it // was woken and then clean up its resources. - let new = if state.is_closed() { - state & !( RUNNING | SCHEDULED ) + let (flags, references) = state.parts(); + let flags = if state.is_closed() { + flags & !( RUNNING | SCHEDULED ) } else { - state & !RUNNING + flags & !RUNNING }; + let new = State::new(flags, references); + // Mark the proc as not running. match (*raw.pdata).state.compare_exchange_weak( @@ -472,30 +487,31 @@ where } } -impl Clone for RawProc { +impl<'a, F, R, S> Clone for RawProc<'a, F, R, S> { fn clone(&self) -> Self { Self { pdata: self.pdata, schedule: self.schedule, future: self.future, output: self.output, + _marker: PhantomData, } } } -impl Copy for RawProc {} +impl<'a, F, R, S> Copy for RawProc<'a, F, R, S> {} /// A guard that closes the proc if polling its future panics. -struct Guard(RawProc) +struct Guard<'a, F, R, S>(RawProc<'a, F, R, S>) where - F: Future + 'static, - R: 'static, - S: Fn(LightProc) + 'static; + F: Future + 'a, + R: 'a, + S: Fn(LightProc) + 'a; -impl Drop for Guard +impl<'a, F, R, S> Drop for Guard<'a, F, R, S> where - F: Future + 'static, - R: 'static, - S: Fn(LightProc) + 'static, + F: Future + 'a, + R: 'a, + S: Fn(LightProc) + 'a, { fn drop(&mut self) { let raw = self.0; @@ -522,9 +538,11 @@ where } // Mark the proc as not running, not scheduled, and closed. + let (flags, references) = state.parts(); + let new = State::new((flags & !(RUNNING & SCHEDULED)) | CLOSED, references); match (*raw.pdata).state.compare_exchange_weak( state, - (state & !RUNNING & !SCHEDULED) | CLOSED, + new, Ordering::AcqRel, Ordering::Acquire, ) { diff --git a/runtime/lightproc/src/state.rs b/runtime/lightproc/src/state.rs index d729900..e2f2dcc 100644 --- a/runtime/lightproc/src/state.rs +++ b/runtime/lightproc/src/state.rs @@ -1,3 +1,4 @@ +use std::fmt::{Debug, Formatter}; use std::sync::atomic::{AtomicU64, Ordering}; /// Set if the proc is scheduled for running. @@ -8,7 +9,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; /// /// This flag can't be set when the proc is completed. However, it can be set while the proc is /// running, in which case it will be rescheduled as soon as polling finishes. -pub(crate) const SCHEDULED: State = State::SCHEDULED; +pub(crate) const SCHEDULED: StateFlags = StateFlags::SCHEDULED; /// Set if the proc is running. /// @@ -16,7 +17,7 @@ pub(crate) const SCHEDULED: State = State::SCHEDULED; /// /// This flag can't be set when the proc is completed. However, it can be in scheduled state while /// it is running, in which case it will be rescheduled when it stops being polled. -pub(crate) const RUNNING: State = State::RUNNING; +pub(crate) const RUNNING: StateFlags = StateFlags::RUNNING; /// Set if the proc has been completed. /// @@ -25,7 +26,7 @@ pub(crate) const RUNNING: State = State::RUNNING; /// the proc as stopped. /// /// This flag can't be set when the proc is scheduled or completed. -pub(crate) const COMPLETED: State = State::COMPLETED; +pub(crate) const COMPLETED: StateFlags = StateFlags::COMPLETED; /// Set if the proc is closed. /// @@ -36,39 +37,29 @@ pub(crate) const COMPLETED: State = State::COMPLETED; /// 2. Its output is awaited by the `ProcHandle`. /// 3. It panics while polling the future. /// 4. It is completed and the `ProcHandle` is dropped. -pub(crate) const CLOSED: State = State::CLOSED; +pub(crate) const CLOSED: StateFlags = StateFlags::CLOSED; /// Set if the `ProcHandle` still exists. /// /// The `ProcHandle` is a special case in that it is only tracked by this flag, while all other /// proc references (`LightProc` and `Waker`s) are tracked by the reference count. -pub(crate) const HANDLE: State = State::HANDLE; +pub(crate) const HANDLE: StateFlags = StateFlags::HANDLE; /// Set if the `ProcHandle` is awaiting the output. /// /// This flag is set while there is a registered awaiter of type `Waker` inside the proc. When the /// proc gets closed or completed, we need to wake the awaiter. This flag can be used as a fast /// check that tells us if we need to wake anyone without acquiring the lock inside the proc. -pub(crate) const AWAITER: State = State::AWAITER; +pub(crate) const AWAITER: StateFlags = StateFlags::AWAITER; /// Set if the awaiter is locked. /// /// This lock is acquired before a new awaiter is registered or the existing one is woken. -pub(crate) const LOCKED: State = State::LOCKED; - -/// A single reference. -/// -/// The lower bits in the state contain various flags representing the proc state, while the upper -/// bits contain the reference count. The value of `REFERENCE` represents a single reference in the -/// total reference count. -/// -/// Note that the reference counter only tracks the `LightProc` and `Waker`s. The `ProcHandle` is -/// tracked separately by the `HANDLE` flag. -pub(crate) const REFERENCE: State = State::REFERENCE; +pub(crate) const LOCKED: StateFlags = StateFlags::LOCKED; bitflags::bitflags! { #[derive(Default)] - pub struct State: u64 { + pub struct StateFlags: u32 { const SCHEDULED = 1 << 0; const RUNNING = 1 << 1; const COMPLETED = 1 << 2; @@ -76,125 +67,115 @@ bitflags::bitflags! { const HANDLE = 1 << 4; const AWAITER = 1 << 5; const LOCKED = 1 << 6; - const REFERENCE = 1 << 7; } } +#[repr(packed)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub struct State { + bytes: [u8; 8] +} + impl State { #[inline(always)] - const fn new(bits: u64) -> Self { - unsafe { Self::from_bits_unchecked(bits) } + pub const fn new(flags: StateFlags, references: u32) -> Self { + let [a,b,c,d] = references.to_ne_bytes(); + let [e,f,g,h] = flags.bits.to_ne_bytes(); + Self::from_bytes([a,b,c,d,e,f,g,h]) } - /// Returns `true` if the future is in the pending. + #[inline(always)] - pub fn is_pending(&self) -> bool { - !self.is_completed() + pub const fn parts(self: Self) -> (StateFlags, u32) { + let [a,b,c,d,e,f,g,h] = self.bytes; + let refcount = u32::from_ne_bytes([a,b,c,d]); + let state = unsafe { + StateFlags::from_bits_unchecked(u32::from_ne_bytes([e,f,g,h])) + }; + (state, refcount) } - bitfield::bitfield_fields! { - u64; - #[inline(always)] - /// A proc is considered to be scheduled whenever its `LightProc` reference exists. It is in scheduled - /// state at the moment of creation and when it gets unpaused either by its `ProcHandle` or woken - /// by a `Waker`. - /// - /// This flag can't be set when the proc is completed. However, it can be set while the proc is - /// running, in which case it will be rescheduled as soon as polling finishes. - pub is_scheduled, set_scheduled: 0; + #[inline(always)] + /// The lower bits in the state contain various flags representing the proc state, while the upper + /// bits contain the reference count. + /// Note that the reference counter only tracks the `LightProc` and `Waker`s. The `ProcHandle` is + /// tracked separately by the `HANDLE` flag. + pub const fn get_refcount(self) -> u32 { + let [a,b,c,d,_,_,_,_] = self.bytes; + u32::from_ne_bytes([a,b,c,d]) + } - #[inline(always)] - /// A proc is running state while its future is being polled. - /// - /// This flag can't be set when the proc is completed. However, it can be in scheduled state while - /// it is running, in which case it will be rescheduled when it stops being polled. - pub is_running, set_running: 1; + #[inline(always)] + #[must_use] + pub const fn set_refcount(self, refcount: u32) -> Self { + let [a, b, c, d] = refcount.to_ne_bytes(); + let [_, _, _, _, e, f, g, h] = self.bytes; + Self::from_bytes([a, b, c, d, e, f, g, h]) + } - #[inline(always)] - /// Set if the proc has been completed. - /// - /// This flag is set when polling returns `Poll::Ready`. The output of the future is then stored - /// inside the proc until it becomes stopped. In fact, `ProcHandle` picks the output up by marking - /// the proc as stopped. - /// - /// This flag can't be set when the proc is scheduled or completed. - pub is_completed, set_completed: 2; + #[inline(always)] + pub const fn get_flags(self) -> StateFlags { + let [_, _, _, _, e, f, g, h] = self.bytes; + unsafe { StateFlags::from_bits_unchecked(u32::from_ne_bytes([e,f,g,h])) } + } - #[inline(always)] - /// Set if the proc is closed. - /// - /// If a proc is closed, that means its either cancelled or its output has been consumed by the - /// `ProcHandle`. A proc becomes closed when: - /// - /// 1. It gets cancelled by `LightProc::cancel()` or `ProcHandle::cancel()`. - /// 2. Its output is awaited by the `ProcHandle`. - /// 3. It panics while polling the future. - /// 4. It is completed and the `ProcHandle` is dropped. - pub is_closed, set_closed: 3; + #[inline(always)] + const fn from_bytes(bytes: [u8; 8]) -> Self { + Self { bytes } + } - #[inline(always)] - /// Set if the `ProcHandle` still exists. - /// - /// The `ProcHandle` is a special case in that it is only tracked by this flag, while all other - /// proc references (`LightProc` and `Waker`s) are tracked by the reference count. - pub is_handle, set_handle: 4; + #[inline(always)] + const fn into_u64(self) -> u64 { + u64::from_ne_bytes(self.bytes) + } - #[inline(always)] - /// Set if the `ProcHandle` is awaiting the output. - /// - /// This flag is set while there is a registered awaiter of type `Waker` inside the proc. When the - /// proc gets closed or completed, we need to wake the awaiter. This flag can be used as a fast - /// check that tells us if we need to wake anyone without acquiring the lock inside the proc. - pub is_awaiter, set_awaiter: 5; + #[inline(always)] + const fn from_u64(value: u64) -> Self { + Self::from_bytes(value.to_ne_bytes()) + } - #[inline(always)] - /// Set if the awaiter is locked. - /// - /// This lock is acquired before a new awaiter is registered or the existing one is woken. - pub is_locked, set_locked: 6; + #[inline(always)] + pub const fn is_awaiter(&self) -> bool { + self.get_flags().contains(AWAITER) + } - #[inline(always)] - /// The lower bits in the state contain various flags representing the proc state, while the upper - /// bits contain the reference count. - /// Note that the reference counter only tracks the `LightProc` and `Waker`s. The `ProcHandle` is - /// tracked separately by the `HANDLE` flag. - pub get_refcount, set_refcount: 63, 7; + #[inline(always)] + pub const fn is_closed(&self) -> bool { + self.get_flags().contains(CLOSED) + } + + #[inline(always)] + pub const fn is_locked(&self) -> bool { + self.get_flags().contains(LOCKED) + } + + #[inline(always)] + pub const fn is_scheduled(&self) -> bool { + self.get_flags().contains(SCHEDULED) + } + + #[inline(always)] + pub const fn is_completed(&self) -> bool { + self.get_flags().contains(COMPLETED) + } + + #[inline(always)] + pub const fn is_handle(&self) -> bool { + self.get_flags().contains(HANDLE) + } + + #[inline(always)] + pub const fn is_running(&self) -> bool { + self.get_flags().contains(RUNNING) } } -impl std::ops::Add for State { - type Output = State; - - fn add(mut self, rhs: u64) -> Self::Output { - self.set_refcount(self.get_refcount() + rhs); - self - } -} - -impl std::ops::Sub for State { - type Output = State; - - fn sub(mut self, rhs: u64) -> Self::Output { - self.set_refcount(self.get_refcount() - rhs); - self - } -} - -impl bitfield::BitRange for State - where u64: bitfield::BitRange -{ - fn bit_range(&self, msb: usize, lsb: usize) -> T { - self.bits.bit_range(msb, lsb) - } - - fn set_bit_range(&mut self, msb: usize, lsb: usize, value: T) { - self.bits.set_bit_range(msb, lsb, value) - } -} - -impl Into for State { - fn into(self) -> usize { - self.bits as usize +impl Debug for State { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("State") + .field("flags", &self.get_flags()) + .field("references", &self.get_refcount()) + .finish() } } @@ -206,19 +187,19 @@ pub struct AtomicState { impl AtomicState { #[inline(always)] pub const fn new(v: State) -> Self { - let inner = AtomicU64::new(v.bits); + let inner = AtomicU64::new(v.into_u64()); Self { inner } } #[inline(always)] pub fn load(&self, order: Ordering) -> State { - State::new(self.inner.load(order)) + State::from_u64(self.inner.load(order)) } #[inline(always)] #[allow(dead_code)] - pub fn store(&self, val: State, order: Ordering) { - self.inner.store(val.bits, order) + pub fn store(&self, state: State, order: Ordering) { + self.inner.store(state.into_u64(), order) } pub fn compare_exchange( @@ -229,9 +210,9 @@ impl AtomicState { failure: Ordering ) -> Result { - self.inner.compare_exchange(current.bits, new.bits, success, failure) - .map(|u| State::new(u)) - .map_err(|u| State::new(u)) + self.inner.compare_exchange(current.into_u64(), new.into_u64(), success, failure) + .map(|u| State::from_u64(u)) + .map_err(|u| State::from_u64(u)) } pub fn compare_exchange_weak( @@ -242,27 +223,35 @@ impl AtomicState { failure: Ordering ) -> Result { - self.inner.compare_exchange_weak(current.bits, new.bits, success, failure) - .map(|u| State::new(u)) - .map_err(|u| State::new(u)) + self.inner.compare_exchange_weak(current.into_u64(), new.into_u64(), success, failure) + .map(|u| State::from_u64(u)) + .map_err(|u| State::from_u64(u)) } - pub fn fetch_or(&self, val: State, order: Ordering) -> State { - State::new(self.inner.fetch_or(val.bits, order)) + pub fn fetch_or(&self, val: StateFlags, order: Ordering) -> State { + let [a,b,c,d] = val.bits.to_ne_bytes(); + let store = u64::from_ne_bytes([0,0,0,0,a,b,c,d]); + State::from_u64(self.inner.fetch_or(store, order)) } - pub fn fetch_and(&self, val: State, order: Ordering) -> State { - State::new(self.inner.fetch_and(val.bits, order)) + pub fn fetch_and(&self, val: StateFlags, order: Ordering) -> State { + let [a,b,c,d] = val.bits.to_ne_bytes(); + let store = u64::from_ne_bytes([!0,!0,!0,!0,a,b,c,d]); + State::from_u64(self.inner.fetch_and(store, order)) } // FIXME: Do this properly - pub fn fetch_add(&self, val: u64, order: Ordering) -> State { - State::new(self.inner.fetch_add(val << 7, order)) + pub fn fetch_add(&self, val: u32, order: Ordering) -> State { + let [a,b,c,d] = val.to_ne_bytes(); + let store = u64::from_ne_bytes([a,b,c,d,0,0,0,0]); + State::from_u64(self.inner.fetch_add(store, order)) } // FIXME: Do this properly - pub fn fetch_sub(&self, val: u64, order: Ordering) -> State { - State::new(self.inner.fetch_sub(val << 7, order)) + pub fn fetch_sub(&self, val: u32, order: Ordering) -> State { + let [a,b,c,d] = val.to_ne_bytes(); + let store = u64::from_ne_bytes([a,b,c,d,0,0,0,0]); + State::from_u64(self.inner.fetch_sub(store, order)) } } @@ -279,112 +268,66 @@ mod tests { #[test] fn test_is_scheduled_returns_true() { let state = SCHEDULED; - assert_eq!(state.is_scheduled(), true); + assert!(state.contains(SCHEDULED)); - let mut state2 = State::default(); - state2.set_scheduled(true); + let mut state2 = StateFlags::default(); + state2 |= SCHEDULED; assert_eq!(state, state2) } #[test] - fn test_is_scheduled_returns_false() { - let state = State::default(); - assert_eq!(state.is_scheduled(), false); - } + fn flags_work() { + let flags = SCHEDULED; + assert_eq!(flags, SCHEDULED); - #[test] - fn test_is_running_returns_true() { - let state = RUNNING; - assert_eq!(state.is_running(), true); - } + let flags = SCHEDULED | RUNNING; + assert_eq!(flags, SCHEDULED | RUNNING); - #[test] - fn test_is_running_returns_false() { - let state = State::default(); - assert_eq!(state.is_running(), false); - } - - #[test] - fn test_is_completed_returns_true() { - let state = COMPLETED; - assert_eq!(state.is_completed(), true); - } - - #[test] - fn test_is_completed_returns_false() { - let state = State::default(); - assert_eq!(state.is_completed(), false); - } - - #[test] - fn test_is_closed_returns_true() { - let state = CLOSED; - assert_eq!(state.is_closed(), true); - } - - #[test] - fn test_is_closed_returns_false() { - let state = State::default(); - assert_eq!(state.is_closed(), false); - } - - #[test] - fn test_is_handle_returns_true() { - let state = HANDLE; - assert_eq!(state.is_handle(), true); - } - - #[test] - fn test_is_handle_returns_false() { - let state = State::default(); - assert_eq!(state.is_handle(), false); - } - - #[test] - fn test_is_awaiter_returns_true() { - let state = AWAITER; - assert_eq!(state.is_awaiter(), true); - } - - #[test] - fn test_is_awaiter_returns_false() { - let state = State::default(); - assert_eq!(state.is_awaiter(), false); - } - - #[test] - fn test_is_locked_returns_true() { - let state = LOCKED; - assert_eq!(state.is_locked(), true); - } - - #[test] - fn test_is_locked_returns_false() { - let state = State::default(); - assert_eq!(state.is_locked(), false); - } - - #[test] - fn test_is_pending_returns_true() { - let state = State::default(); - assert_eq!(state.is_pending(), true); - } - - #[test] - fn test_is_pending_returns_false() { - let state = COMPLETED; - assert_eq!(state.is_pending(), false); + let flags = RUNNING | AWAITER | COMPLETED; + assert_eq!(flags, RUNNING | AWAITER | COMPLETED); } #[test] fn test_add_sub_refcount() { - let state = State::default(); + let state = State::new(StateFlags::default(), 0); assert_eq!(state.get_refcount(), 0); - let state = state + 5; + let state = state.set_refcount(5); assert_eq!(state.get_refcount(), 5); - let mut state = state - 2; + let state = state.set_refcount(3); assert_eq!(state.get_refcount(), 3); - state.set_refcount(1); + let state = state.set_refcount(1); assert_eq!(state.get_refcount(), 1); } + + #[test] + fn test_mixed_refcount() { + let flags = SCHEDULED | RUNNING | AWAITER; + let state = State::new(flags, 0); + println!("{:?}", state); + + assert_eq!(state.get_refcount(), 0); + + let state = state.set_refcount(5); + println!("{:?}", state); + assert_eq!(state.get_refcount(), 5); + + let (mut flags, references) = state.parts(); + assert_eq!(references, 5); + + flags &= !AWAITER; + let state = State::new(flags, references); + println!("{:?}", state); + + assert_eq!(state.get_refcount(), 5); + + let state = state.set_refcount(3); + println!("{:?}", state); + assert_eq!(state.get_refcount(), 3); + + let state = state.set_refcount(1); + println!("{:?}", state); + assert_eq!(state.get_refcount(), 1); + + assert_eq!(state.get_flags(), SCHEDULED | RUNNING); + } }