diff --git a/runtime/asyncio/Cargo.toml b/runtime/asyncio/Cargo.toml new file mode 100644 index 0000000..524e614 --- /dev/null +++ b/runtime/asyncio/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "asyncio" +version = "0.1.0" +edition = "2021" +description = "io_uring-first async I/O implementation" +readme = "README.md" +publish = false + +[dependencies] +static_assertions = "1.1" +libc = "0.2" +nix = "0.23" +bitflags = "1.3" +ptr_meta = "0.1" + +crossbeam-queue = "0.3" \ No newline at end of file diff --git a/runtime/asyncio/examples/raw.rs b/runtime/asyncio/examples/raw.rs new file mode 100644 index 0000000..c2a8993 --- /dev/null +++ b/runtime/asyncio/examples/raw.rs @@ -0,0 +1,13 @@ +use std::fs::File; +use asyncio::ctypes::IORING_OP; +use asyncio::io_uring::IoUring; + +fn main() { + let file = File::open("") + let ring = IoUring::setup(64).unwrap(); + let cqes = ring.cqes(); + ring.try_prepare(1, |sqes| { + let sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::READ) + }).unwrap(); +} \ No newline at end of file diff --git a/runtime/asyncio/gen.rs b/runtime/asyncio/gen.rs new file mode 100644 index 0000000..9d4ff67 --- /dev/null +++ b/runtime/asyncio/gen.rs @@ -0,0 +1,1768 @@ +/* automatically generated by rust-bindgen 0.59.1 */ + +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); +impl __IncompleteArrayField { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::std::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::std::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl ::std::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub type __kernel_sighandler_t = + ::std::option::Option; +pub type __kernel_key_t = ::std::os::raw::c_int; +pub type __kernel_mqd_t = ::std::os::raw::c_int; +pub type __kernel_old_uid_t = ::std::os::raw::c_ushort; +pub type __kernel_old_gid_t = ::std::os::raw::c_ushort; +pub type __kernel_old_dev_t = ::std::os::raw::c_ulong; +pub type __kernel_long_t = ::std::os::raw::c_long; +pub type __kernel_ulong_t = ::std::os::raw::c_ulong; +pub type __kernel_ino_t = __kernel_ulong_t; +pub type __kernel_mode_t = ::std::os::raw::c_uint; +pub type __kernel_pid_t = ::std::os::raw::c_int; +pub type __kernel_ipc_pid_t = ::std::os::raw::c_int; +pub type __kernel_uid_t = ::std::os::raw::c_uint; +pub type __kernel_gid_t = ::std::os::raw::c_uint; +pub type __kernel_suseconds_t = __kernel_long_t; +pub type __kernel_daddr_t = ::std::os::raw::c_int; +pub type __kernel_uid32_t = ::std::os::raw::c_uint; +pub type __kernel_gid32_t = ::std::os::raw::c_uint; +pub type __kernel_size_t = __kernel_ulong_t; +pub type __kernel_ssize_t = __kernel_long_t; +pub type __kernel_ptrdiff_t = __kernel_long_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __kernel_fsid_t { + pub val: [::std::os::raw::c_int; 2usize], +} +#[test] +fn bindgen_test_layout___kernel_fsid_t() { + assert_eq!( + ::std::mem::size_of::<__kernel_fsid_t>(), + 8usize, + concat!("Size of: ", stringify!(__kernel_fsid_t)) + ); + assert_eq!( + ::std::mem::align_of::<__kernel_fsid_t>(), + 4usize, + concat!("Alignment of ", stringify!(__kernel_fsid_t)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::<__kernel_fsid_t>())).val as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__kernel_fsid_t), + "::", + stringify!(val) + ) + ); +} +pub type __kernel_off_t = __kernel_long_t; +pub type __kernel_loff_t = ::std::os::raw::c_longlong; +pub type __kernel_old_time_t = __kernel_long_t; +pub type __kernel_time_t = __kernel_long_t; +pub type __kernel_time64_t = ::std::os::raw::c_longlong; +pub type __kernel_clock_t = __kernel_long_t; +pub type __kernel_timer_t = ::std::os::raw::c_int; +pub type __kernel_clockid_t = ::std::os::raw::c_int; +pub type __kernel_caddr_t = *mut ::std::os::raw::c_char; +pub type __kernel_uid16_t = ::std::os::raw::c_ushort; +pub type __kernel_gid16_t = ::std::os::raw::c_ushort; +pub type __le16 = __u16; +pub type __be16 = __u16; +pub type __le32 = __u32; +pub type __be32 = __u32; +pub type __le64 = __u64; +pub type __be64 = __u64; +pub type __sum16 = __u16; +pub type __wsum = __u32; +pub type __poll_t = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fscrypt_policy_v1 { + pub version: __u8, + pub contents_encryption_mode: __u8, + pub filenames_encryption_mode: __u8, + pub flags: __u8, + pub master_key_descriptor: [__u8; 8usize], +} +#[test] +fn bindgen_test_layout_fscrypt_policy_v1() { + assert_eq!( + ::std::mem::size_of::(), + 12usize, + concat!("Size of: ", stringify!(fscrypt_policy_v1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!("Alignment of ", stringify!(fscrypt_policy_v1)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(version) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).contents_encryption_mode as *const _ + as usize + }, + 1usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(contents_encryption_mode) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ + as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(filenames_encryption_mode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 3usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).master_key_descriptor as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(master_key_descriptor) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fscrypt_key { + pub mode: __u32, + pub raw: [__u8; 64usize], + pub size: __u32, +} +#[test] +fn bindgen_test_layout_fscrypt_key() { + assert_eq!( + ::std::mem::size_of::(), + 72usize, + concat!("Size of: ", stringify!(fscrypt_key)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_key)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).mode as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key), + "::", + stringify!(mode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key), + "::", + stringify!(raw) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).size as *const _ as usize }, + 68usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key), + "::", + stringify!(size) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fscrypt_policy_v2 { + pub version: __u8, + pub contents_encryption_mode: __u8, + pub filenames_encryption_mode: __u8, + pub flags: __u8, + pub __reserved: [__u8; 4usize], + pub master_key_identifier: [__u8; 16usize], +} +#[test] +fn bindgen_test_layout_fscrypt_policy_v2() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(fscrypt_policy_v2)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!("Alignment of ", stringify!(fscrypt_policy_v2)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(version) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).contents_encryption_mode as *const _ + as usize + }, + 1usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(contents_encryption_mode) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ + as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(filenames_encryption_mode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 3usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).master_key_identifier as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(master_key_identifier) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_get_policy_ex_arg { + pub policy_size: __u64, + pub policy: fscrypt_get_policy_ex_arg__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union fscrypt_get_policy_ex_arg__bindgen_ty_1 { + pub version: __u8, + pub v1: fscrypt_policy_v1, + pub v2: fscrypt_policy_v2, +} +#[test] +fn bindgen_test_layout_fscrypt_get_policy_ex_arg__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!( + "Size of: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) + ) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).version as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), + "::", + stringify!(version) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).v1 as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), + "::", + stringify!(v1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).v2 as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), + "::", + stringify!(v2) + ) + ); +} +#[test] +fn bindgen_test_layout_fscrypt_get_policy_ex_arg() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(fscrypt_get_policy_ex_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(fscrypt_get_policy_ex_arg)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).policy_size as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg), + "::", + stringify!(policy_size) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).policy as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg), + "::", + stringify!(policy) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_key_specifier { + pub type_: __u32, + pub __reserved: __u32, + pub u: fscrypt_key_specifier__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union fscrypt_key_specifier__bindgen_ty_1 { + pub __reserved: [__u8; 32usize], + pub descriptor: [__u8; 8usize], + pub identifier: [__u8; 16usize], +} +#[test] +fn bindgen_test_layout_fscrypt_key_specifier__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(fscrypt_key_specifier__bindgen_ty_1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(fscrypt_key_specifier__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier__bindgen_ty_1), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).descriptor as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier__bindgen_ty_1), + "::", + stringify!(descriptor) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).identifier as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier__bindgen_ty_1), + "::", + stringify!(identifier) + ) + ); +} +#[test] +fn bindgen_test_layout_fscrypt_key_specifier() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(fscrypt_key_specifier)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_key_specifier)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier), + "::", + stringify!(type_) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).u as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier), + "::", + stringify!(u) + ) + ); +} +#[repr(C)] +#[derive(Debug)] +pub struct fscrypt_provisioning_key_payload { + pub type_: __u32, + pub __reserved: __u32, + pub raw: __IncompleteArrayField<__u8>, +} +#[test] +fn bindgen_test_layout_fscrypt_provisioning_key_payload() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(fscrypt_provisioning_key_payload)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(fscrypt_provisioning_key_payload) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).type_ as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_provisioning_key_payload), + "::", + stringify!(type_) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ + as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_provisioning_key_payload), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).raw as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_provisioning_key_payload), + "::", + stringify!(raw) + ) + ); +} +#[repr(C)] +pub struct fscrypt_add_key_arg { + pub key_spec: fscrypt_key_specifier, + pub raw_size: __u32, + pub key_id: __u32, + pub __reserved: [__u32; 8usize], + pub raw: __IncompleteArrayField<__u8>, +} +#[test] +fn bindgen_test_layout_fscrypt_add_key_arg() { + assert_eq!( + ::std::mem::size_of::(), + 80usize, + concat!("Size of: ", stringify!(fscrypt_add_key_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_add_key_arg)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(key_spec) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).raw_size as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(raw_size) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).key_id as *const _ as usize }, + 44usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(key_id) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(raw) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_remove_key_arg { + pub key_spec: fscrypt_key_specifier, + pub removal_status_flags: __u32, + pub __reserved: [__u32; 5usize], +} +#[test] +fn bindgen_test_layout_fscrypt_remove_key_arg() { + assert_eq!( + ::std::mem::size_of::(), + 64usize, + concat!("Size of: ", stringify!(fscrypt_remove_key_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_remove_key_arg)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_remove_key_arg), + "::", + stringify!(key_spec) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).removal_status_flags as *const _ + as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_remove_key_arg), + "::", + stringify!(removal_status_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ as usize + }, + 44usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_remove_key_arg), + "::", + stringify!(__reserved) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_get_key_status_arg { + pub key_spec: fscrypt_key_specifier, + pub __reserved: [__u32; 6usize], + pub status: __u32, + pub status_flags: __u32, + pub user_count: __u32, + pub __out_reserved: [__u32; 13usize], +} +#[test] +fn bindgen_test_layout_fscrypt_get_key_status_arg() { + assert_eq!( + ::std::mem::size_of::(), + 128usize, + concat!("Size of: ", stringify!(fscrypt_get_key_status_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_get_key_status_arg)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).key_spec as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(key_spec) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).status as *const _ as usize + }, + 64usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(status) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).status_flags as *const _ as usize + }, + 68usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(status_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).user_count as *const _ as usize + }, + 72usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(user_count) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__out_reserved as *const _ + as usize + }, + 76usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(__out_reserved) + ) + ); +} +pub const fsconfig_command_FSCONFIG_SET_FLAG: fsconfig_command = 0; +pub const fsconfig_command_FSCONFIG_SET_STRING: fsconfig_command = 1; +pub const fsconfig_command_FSCONFIG_SET_BINARY: fsconfig_command = 2; +pub const fsconfig_command_FSCONFIG_SET_PATH: fsconfig_command = 3; +pub const fsconfig_command_FSCONFIG_SET_PATH_EMPTY: fsconfig_command = 4; +pub const fsconfig_command_FSCONFIG_SET_FD: fsconfig_command = 5; +pub const fsconfig_command_FSCONFIG_CMD_CREATE: fsconfig_command = 6; +pub const fsconfig_command_FSCONFIG_CMD_RECONFIGURE: fsconfig_command = 7; +pub type fsconfig_command = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct mount_attr { + pub attr_set: __u64, + pub attr_clr: __u64, + pub propagation: __u64, + pub userns_fd: __u64, +} +#[test] +fn bindgen_test_layout_mount_attr() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(mount_attr)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(mount_attr)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).attr_set as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(attr_set) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).attr_clr as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(attr_clr) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).propagation as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(propagation) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).userns_fd as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(userns_fd) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct file_clone_range { + pub src_fd: __s64, + pub src_offset: __u64, + pub src_length: __u64, + pub dest_offset: __u64, +} +#[test] +fn bindgen_test_layout_file_clone_range() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(file_clone_range)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(file_clone_range)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_fd as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(src_fd) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(src_offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(src_length) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dest_offset as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(dest_offset) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fstrim_range { + pub start: __u64, + pub len: __u64, + pub minlen: __u64, +} +#[test] +fn bindgen_test_layout_fstrim_range() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(fstrim_range)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(fstrim_range)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).start as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fstrim_range), + "::", + stringify!(start) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fstrim_range), + "::", + stringify!(len) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).minlen as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(fstrim_range), + "::", + stringify!(minlen) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct file_dedupe_range_info { + pub dest_fd: __s64, + pub dest_offset: __u64, + pub bytes_deduped: __u64, + pub status: __s32, + pub reserved: __u32, +} +#[test] +fn bindgen_test_layout_file_dedupe_range_info() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(file_dedupe_range_info)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(file_dedupe_range_info)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dest_fd as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(dest_fd) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).dest_offset as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(dest_offset) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).bytes_deduped as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(bytes_deduped) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).status as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(status) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).reserved as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(reserved) + ) + ); +} +#[repr(C)] +#[derive(Debug)] +pub struct file_dedupe_range { + pub src_offset: __u64, + pub src_length: __u64, + pub dest_count: __u16, + pub reserved1: __u16, + pub reserved2: __u32, + pub info: __IncompleteArrayField, +} +#[test] +fn bindgen_test_layout_file_dedupe_range() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(file_dedupe_range)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(file_dedupe_range)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(src_offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(src_length) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dest_count as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(dest_count) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).reserved1 as *const _ as usize }, + 18usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(reserved1) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).reserved2 as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(reserved2) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).info as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(info) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct files_stat_struct { + pub nr_files: ::std::os::raw::c_ulong, + pub nr_free_files: ::std::os::raw::c_ulong, + pub max_files: ::std::os::raw::c_ulong, +} +#[test] +fn bindgen_test_layout_files_stat_struct() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(files_stat_struct)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(files_stat_struct)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_files as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(files_stat_struct), + "::", + stringify!(nr_files) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_free_files as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(files_stat_struct), + "::", + stringify!(nr_free_files) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).max_files as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(files_stat_struct), + "::", + stringify!(max_files) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct inodes_stat_t { + pub nr_inodes: ::std::os::raw::c_long, + pub nr_unused: ::std::os::raw::c_long, + pub dummy: [::std::os::raw::c_long; 5usize], +} +#[test] +fn bindgen_test_layout_inodes_stat_t() { + assert_eq!( + ::std::mem::size_of::(), + 56usize, + concat!("Size of: ", stringify!(inodes_stat_t)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(inodes_stat_t)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_inodes as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(inodes_stat_t), + "::", + stringify!(nr_inodes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_unused as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(inodes_stat_t), + "::", + stringify!(nr_unused) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dummy as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(inodes_stat_t), + "::", + stringify!(dummy) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fsxattr { + pub fsx_xflags: __u32, + pub fsx_extsize: __u32, + pub fsx_nextents: __u32, + pub fsx_projid: __u32, + pub fsx_cowextsize: __u32, + pub fsx_pad: [::std::os::raw::c_uchar; 8usize], +} +#[test] +fn bindgen_test_layout_fsxattr() { + assert_eq!( + ::std::mem::size_of::(), + 28usize, + concat!("Size of: ", stringify!(fsxattr)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fsxattr)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_xflags as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_xflags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_extsize as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_extsize) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_nextents as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_nextents) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_projid as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_projid) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_cowextsize as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_cowextsize) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_pad as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_pad) + ) + ); +} +pub const IORING_CQE_BUFFER_SHIFT: ::std::os::raw::c_uint = 16; +pub type _bindgen_ty_3 = ::std::os::raw::c_uint; +pub type _bindgen_ty_4 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_files_update { + pub offset: __u32, + pub resv: __u32, + pub fds: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_files_update() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_files_update)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_files_update)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_files_update), + "::", + stringify!(offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_files_update), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fds as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_files_update), + "::", + stringify!(fds) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_rsrc_update { + pub offset: __u32, + pub resv: __u32, + pub data: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_rsrc_update() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_rsrc_update)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_rsrc_update)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_rsrc_update), + "::", + stringify!(offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_rsrc_update), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).data as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_rsrc_update), + "::", + stringify!(data) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_probe_op { + pub op: __u8, + pub resv: __u8, + pub flags: __u16, + pub resv2: __u32, +} +#[test] +fn bindgen_test_layout_io_uring_probe_op() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(io_uring_probe_op)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_probe_op)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).op as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(op) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(resv2) + ) + ); +} +#[repr(C)] +#[derive(Debug)] +pub struct io_uring_probe { + pub last_op: __u8, + pub ops_len: __u8, + pub resv: __u16, + pub resv2: [__u32; 3usize], + pub ops: __IncompleteArrayField, +} +#[test] +fn bindgen_test_layout_io_uring_probe() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_probe)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_probe)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).last_op as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(last_op) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ops_len as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(ops_len) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(resv2) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ops as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(ops) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct io_uring_restriction { + pub opcode: __u16, + pub __bindgen_anon_1: io_uring_restriction__bindgen_ty_1, + pub resv: __u8, + pub resv2: [__u32; 3usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union io_uring_restriction__bindgen_ty_1 { + pub register_op: __u8, + pub sqe_op: __u8, + pub sqe_flags: __u8, +} +#[test] +fn bindgen_test_layout_io_uring_restriction__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 1usize, + concat!("Size of: ", stringify!(io_uring_restriction__bindgen_ty_1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(io_uring_restriction__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).register_op as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction__bindgen_ty_1), + "::", + stringify!(register_op) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sqe_op as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction__bindgen_ty_1), + "::", + stringify!(sqe_op) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sqe_flags as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction__bindgen_ty_1), + "::", + stringify!(sqe_flags) + ) + ); +} +#[test] +fn bindgen_test_layout_io_uring_restriction() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_restriction)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_restriction)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction), + "::", + stringify!(opcode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 3usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction), + "::", + stringify!(resv2) + ) + ); +} +pub const IORING_RESTRICTION_REGISTER_OP: ::std::os::raw::c_uint = 0; +pub const IORING_RESTRICTION_SQE_OP: ::std::os::raw::c_uint = 1; +pub const IORING_RESTRICTION_SQE_FLAGS_ALLOWED: ::std::os::raw::c_uint = 2; +pub const IORING_RESTRICTION_SQE_FLAGS_REQUIRED: ::std::os::raw::c_uint = 3; +pub const IORING_RESTRICTION_LAST: ::std::os::raw::c_uint = 4; +pub type _bindgen_ty_5 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_getevents_arg { + pub sigmask: __u64, + pub sigmask_sz: __u32, + pub pad: __u32, + pub ts: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_getevents_arg() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(io_uring_getevents_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_getevents_arg)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sigmask as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(sigmask) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sigmask_sz as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(sigmask_sz) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pad as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(pad) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ts as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(ts) + ) + ); +} diff --git a/runtime/asyncio/gen2.rs b/runtime/asyncio/gen2.rs new file mode 100644 index 0000000..1d37efd --- /dev/null +++ b/runtime/asyncio/gen2.rs @@ -0,0 +1,3040 @@ +/* automatically generated by rust-bindgen 0.59.1 */ + +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); +impl __IncompleteArrayField { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::std::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::std::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl ::std::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub const NR_OPEN: u32 = 1024; +pub const NGROUPS_MAX: u32 = 65536; +pub const ARG_MAX: u32 = 131072; +pub const LINK_MAX: u32 = 127; +pub const MAX_CANON: u32 = 255; +pub const MAX_INPUT: u32 = 255; +pub const NAME_MAX: u32 = 255; +pub const PATH_MAX: u32 = 4096; +pub const PIPE_BUF: u32 = 4096; +pub const XATTR_NAME_MAX: u32 = 255; +pub const XATTR_SIZE_MAX: u32 = 65536; +pub const XATTR_LIST_MAX: u32 = 65536; +pub const RTSIG_MAX: u32 = 32; +pub const _IOC_NRBITS: u32 = 8; +pub const _IOC_TYPEBITS: u32 = 8; +pub const _IOC_SIZEBITS: u32 = 14; +pub const _IOC_DIRBITS: u32 = 2; +pub const _IOC_NRMASK: u32 = 255; +pub const _IOC_TYPEMASK: u32 = 255; +pub const _IOC_SIZEMASK: u32 = 16383; +pub const _IOC_DIRMASK: u32 = 3; +pub const _IOC_NRSHIFT: u32 = 0; +pub const _IOC_TYPESHIFT: u32 = 8; +pub const _IOC_SIZESHIFT: u32 = 16; +pub const _IOC_DIRSHIFT: u32 = 30; +pub const _IOC_NONE: u32 = 0; +pub const _IOC_WRITE: u32 = 1; +pub const _IOC_READ: u32 = 2; +pub const IOC_IN: u32 = 1073741824; +pub const IOC_OUT: u32 = 2147483648; +pub const IOC_INOUT: u32 = 3221225472; +pub const IOCSIZE_MASK: u32 = 1073676288; +pub const IOCSIZE_SHIFT: u32 = 16; +pub const __BITS_PER_LONG: u32 = 64; +pub const __FD_SETSIZE: u32 = 1024; +pub const FSCRYPT_POLICY_FLAGS_PAD_4: u32 = 0; +pub const FSCRYPT_POLICY_FLAGS_PAD_8: u32 = 1; +pub const FSCRYPT_POLICY_FLAGS_PAD_16: u32 = 2; +pub const FSCRYPT_POLICY_FLAGS_PAD_32: u32 = 3; +pub const FSCRYPT_POLICY_FLAGS_PAD_MASK: u32 = 3; +pub const FSCRYPT_POLICY_FLAG_DIRECT_KEY: u32 = 4; +pub const FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64: u32 = 8; +pub const FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32: u32 = 16; +pub const FSCRYPT_MODE_AES_256_XTS: u32 = 1; +pub const FSCRYPT_MODE_AES_256_CTS: u32 = 4; +pub const FSCRYPT_MODE_AES_128_CBC: u32 = 5; +pub const FSCRYPT_MODE_AES_128_CTS: u32 = 6; +pub const FSCRYPT_MODE_ADIANTUM: u32 = 9; +pub const FSCRYPT_POLICY_V1: u32 = 0; +pub const FSCRYPT_KEY_DESCRIPTOR_SIZE: u32 = 8; +pub const FSCRYPT_KEY_DESC_PREFIX: &'static [u8; 9usize] = b"fscrypt:\0"; +pub const FSCRYPT_KEY_DESC_PREFIX_SIZE: u32 = 8; +pub const FSCRYPT_MAX_KEY_SIZE: u32 = 64; +pub const FSCRYPT_POLICY_V2: u32 = 2; +pub const FSCRYPT_KEY_IDENTIFIER_SIZE: u32 = 16; +pub const FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: u32 = 1; +pub const FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: u32 = 2; +pub const FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY: u32 = 1; +pub const FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS: u32 = 2; +pub const FSCRYPT_KEY_STATUS_ABSENT: u32 = 1; +pub const FSCRYPT_KEY_STATUS_PRESENT: u32 = 2; +pub const FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED: u32 = 3; +pub const FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF: u32 = 1; +pub const FS_KEY_DESCRIPTOR_SIZE: u32 = 8; +pub const FS_POLICY_FLAGS_PAD_4: u32 = 0; +pub const FS_POLICY_FLAGS_PAD_8: u32 = 1; +pub const FS_POLICY_FLAGS_PAD_16: u32 = 2; +pub const FS_POLICY_FLAGS_PAD_32: u32 = 3; +pub const FS_POLICY_FLAGS_PAD_MASK: u32 = 3; +pub const FS_POLICY_FLAG_DIRECT_KEY: u32 = 4; +pub const FS_POLICY_FLAGS_VALID: u32 = 7; +pub const FS_ENCRYPTION_MODE_INVALID: u32 = 0; +pub const FS_ENCRYPTION_MODE_AES_256_XTS: u32 = 1; +pub const FS_ENCRYPTION_MODE_AES_256_GCM: u32 = 2; +pub const FS_ENCRYPTION_MODE_AES_256_CBC: u32 = 3; +pub const FS_ENCRYPTION_MODE_AES_256_CTS: u32 = 4; +pub const FS_ENCRYPTION_MODE_AES_128_CBC: u32 = 5; +pub const FS_ENCRYPTION_MODE_AES_128_CTS: u32 = 6; +pub const FS_ENCRYPTION_MODE_SPECK128_256_XTS: u32 = 7; +pub const FS_ENCRYPTION_MODE_SPECK128_256_CTS: u32 = 8; +pub const FS_ENCRYPTION_MODE_ADIANTUM: u32 = 9; +pub const FS_KEY_DESC_PREFIX: &'static [u8; 9usize] = b"fscrypt:\0"; +pub const FS_KEY_DESC_PREFIX_SIZE: u32 = 8; +pub const FS_MAX_KEY_SIZE: u32 = 64; +pub const MS_RDONLY: u32 = 1; +pub const MS_NOSUID: u32 = 2; +pub const MS_NODEV: u32 = 4; +pub const MS_NOEXEC: u32 = 8; +pub const MS_SYNCHRONOUS: u32 = 16; +pub const MS_REMOUNT: u32 = 32; +pub const MS_MANDLOCK: u32 = 64; +pub const MS_DIRSYNC: u32 = 128; +pub const MS_NOSYMFOLLOW: u32 = 256; +pub const MS_NOATIME: u32 = 1024; +pub const MS_NODIRATIME: u32 = 2048; +pub const MS_BIND: u32 = 4096; +pub const MS_MOVE: u32 = 8192; +pub const MS_REC: u32 = 16384; +pub const MS_VERBOSE: u32 = 32768; +pub const MS_SILENT: u32 = 32768; +pub const MS_POSIXACL: u32 = 65536; +pub const MS_UNBINDABLE: u32 = 131072; +pub const MS_PRIVATE: u32 = 262144; +pub const MS_SLAVE: u32 = 524288; +pub const MS_SHARED: u32 = 1048576; +pub const MS_RELATIME: u32 = 2097152; +pub const MS_KERNMOUNT: u32 = 4194304; +pub const MS_I_VERSION: u32 = 8388608; +pub const MS_STRICTATIME: u32 = 16777216; +pub const MS_LAZYTIME: u32 = 33554432; +pub const MS_SUBMOUNT: u32 = 67108864; +pub const MS_NOREMOTELOCK: u32 = 134217728; +pub const MS_NOSEC: u32 = 268435456; +pub const MS_BORN: u32 = 536870912; +pub const MS_ACTIVE: u32 = 1073741824; +pub const MS_NOUSER: u32 = 2147483648; +pub const MS_RMT_MASK: u32 = 41943121; +pub const MS_MGC_VAL: u32 = 3236757504; +pub const MS_MGC_MSK: u32 = 4294901760; +pub const OPEN_TREE_CLONE: u32 = 1; +pub const MOVE_MOUNT_F_SYMLINKS: u32 = 1; +pub const MOVE_MOUNT_F_AUTOMOUNTS: u32 = 2; +pub const MOVE_MOUNT_F_EMPTY_PATH: u32 = 4; +pub const MOVE_MOUNT_T_SYMLINKS: u32 = 16; +pub const MOVE_MOUNT_T_AUTOMOUNTS: u32 = 32; +pub const MOVE_MOUNT_T_EMPTY_PATH: u32 = 64; +pub const MOVE_MOUNT__MASK: u32 = 119; +pub const FSOPEN_CLOEXEC: u32 = 1; +pub const FSPICK_CLOEXEC: u32 = 1; +pub const FSPICK_SYMLINK_NOFOLLOW: u32 = 2; +pub const FSPICK_NO_AUTOMOUNT: u32 = 4; +pub const FSPICK_EMPTY_PATH: u32 = 8; +pub const FSMOUNT_CLOEXEC: u32 = 1; +pub const MOUNT_ATTR_RDONLY: u32 = 1; +pub const MOUNT_ATTR_NOSUID: u32 = 2; +pub const MOUNT_ATTR_NODEV: u32 = 4; +pub const MOUNT_ATTR_NOEXEC: u32 = 8; +pub const MOUNT_ATTR__ATIME: u32 = 112; +pub const MOUNT_ATTR_RELATIME: u32 = 0; +pub const MOUNT_ATTR_NOATIME: u32 = 16; +pub const MOUNT_ATTR_STRICTATIME: u32 = 32; +pub const MOUNT_ATTR_NODIRATIME: u32 = 128; +pub const MOUNT_ATTR_IDMAP: u32 = 1048576; +pub const MOUNT_ATTR_SIZE_VER0: u32 = 32; +pub const INR_OPEN_CUR: u32 = 1024; +pub const INR_OPEN_MAX: u32 = 4096; +pub const BLOCK_SIZE_BITS: u32 = 10; +pub const BLOCK_SIZE: u32 = 1024; +pub const SEEK_SET: u32 = 0; +pub const SEEK_CUR: u32 = 1; +pub const SEEK_END: u32 = 2; +pub const SEEK_DATA: u32 = 3; +pub const SEEK_HOLE: u32 = 4; +pub const SEEK_MAX: u32 = 4; +pub const RENAME_NOREPLACE: u32 = 1; +pub const RENAME_EXCHANGE: u32 = 2; +pub const RENAME_WHITEOUT: u32 = 4; +pub const FILE_DEDUPE_RANGE_SAME: u32 = 0; +pub const FILE_DEDUPE_RANGE_DIFFERS: u32 = 1; +pub const NR_FILE: u32 = 8192; +pub const FS_XFLAG_REALTIME: u32 = 1; +pub const FS_XFLAG_PREALLOC: u32 = 2; +pub const FS_XFLAG_IMMUTABLE: u32 = 8; +pub const FS_XFLAG_APPEND: u32 = 16; +pub const FS_XFLAG_SYNC: u32 = 32; +pub const FS_XFLAG_NOATIME: u32 = 64; +pub const FS_XFLAG_NODUMP: u32 = 128; +pub const FS_XFLAG_RTINHERIT: u32 = 256; +pub const FS_XFLAG_PROJINHERIT: u32 = 512; +pub const FS_XFLAG_NOSYMLINKS: u32 = 1024; +pub const FS_XFLAG_EXTSIZE: u32 = 2048; +pub const FS_XFLAG_EXTSZINHERIT: u32 = 4096; +pub const FS_XFLAG_NODEFRAG: u32 = 8192; +pub const FS_XFLAG_FILESTREAM: u32 = 16384; +pub const FS_XFLAG_DAX: u32 = 32768; +pub const FS_XFLAG_COWEXTSIZE: u32 = 65536; +pub const FS_XFLAG_HASATTR: u32 = 2147483648; +pub const BMAP_IOCTL: u32 = 1; +pub const FSLABEL_MAX: u32 = 256; +pub const FS_SECRM_FL: u32 = 1; +pub const FS_UNRM_FL: u32 = 2; +pub const FS_COMPR_FL: u32 = 4; +pub const FS_SYNC_FL: u32 = 8; +pub const FS_IMMUTABLE_FL: u32 = 16; +pub const FS_APPEND_FL: u32 = 32; +pub const FS_NODUMP_FL: u32 = 64; +pub const FS_NOATIME_FL: u32 = 128; +pub const FS_DIRTY_FL: u32 = 256; +pub const FS_COMPRBLK_FL: u32 = 512; +pub const FS_NOCOMP_FL: u32 = 1024; +pub const FS_ENCRYPT_FL: u32 = 2048; +pub const FS_BTREE_FL: u32 = 4096; +pub const FS_INDEX_FL: u32 = 4096; +pub const FS_IMAGIC_FL: u32 = 8192; +pub const FS_JOURNAL_DATA_FL: u32 = 16384; +pub const FS_NOTAIL_FL: u32 = 32768; +pub const FS_DIRSYNC_FL: u32 = 65536; +pub const FS_TOPDIR_FL: u32 = 131072; +pub const FS_HUGE_FILE_FL: u32 = 262144; +pub const FS_EXTENT_FL: u32 = 524288; +pub const FS_VERITY_FL: u32 = 1048576; +pub const FS_EA_INODE_FL: u32 = 2097152; +pub const FS_EOFBLOCKS_FL: u32 = 4194304; +pub const FS_NOCOW_FL: u32 = 8388608; +pub const FS_DAX_FL: u32 = 33554432; +pub const FS_INLINE_DATA_FL: u32 = 268435456; +pub const FS_PROJINHERIT_FL: u32 = 536870912; +pub const FS_CASEFOLD_FL: u32 = 1073741824; +pub const FS_RESERVED_FL: u32 = 2147483648; +pub const FS_FL_USER_VISIBLE: u32 = 253951; +pub const FS_FL_USER_MODIFIABLE: u32 = 229631; +pub const SYNC_FILE_RANGE_WAIT_BEFORE: u32 = 1; +pub const SYNC_FILE_RANGE_WRITE: u32 = 2; +pub const SYNC_FILE_RANGE_WAIT_AFTER: u32 = 4; +pub const SYNC_FILE_RANGE_WRITE_AND_WAIT: u32 = 7; +pub const IORING_SETUP_IOPOLL: u32 = 1; +pub const IORING_SETUP_SQPOLL: u32 = 2; +pub const IORING_SETUP_SQ_AFF: u32 = 4; +pub const IORING_SETUP_CQSIZE: u32 = 8; +pub const IORING_SETUP_CLAMP: u32 = 16; +pub const IORING_SETUP_ATTACH_WQ: u32 = 32; +pub const IORING_SETUP_R_DISABLED: u32 = 64; +pub const IORING_FSYNC_DATASYNC: u32 = 1; +pub const IORING_TIMEOUT_ABS: u32 = 1; +pub const IORING_TIMEOUT_UPDATE: u32 = 2; +pub const SPLICE_F_FD_IN_FIXED: u32 = 2147483648; +pub const IORING_CQE_F_BUFFER: u32 = 1; +pub const IORING_SQ_NEED_WAKEUP: u32 = 1; +pub const IORING_SQ_CQ_OVERFLOW: u32 = 2; +pub const IORING_CQ_EVENTFD_DISABLED: u32 = 1; +pub const IORING_ENTER_GETEVENTS: u32 = 1; +pub const IORING_ENTER_SQ_WAKEUP: u32 = 2; +pub const IORING_ENTER_SQ_WAIT: u32 = 4; +pub const IORING_ENTER_EXT_ARG: u32 = 8; +pub const IORING_FEAT_SINGLE_MMAP: u32 = 1; +pub const IORING_FEAT_NODROP: u32 = 2; +pub const IORING_FEAT_SUBMIT_STABLE: u32 = 4; +pub const IORING_FEAT_RW_CUR_POS: u32 = 8; +pub const IORING_FEAT_CUR_PERSONALITY: u32 = 16; +pub const IORING_FEAT_FAST_POLL: u32 = 32; +pub const IORING_FEAT_POLL_32BITS: u32 = 64; +pub const IORING_FEAT_SQPOLL_NONFIXED: u32 = 128; +pub const IORING_FEAT_EXT_ARG: u32 = 256; +pub const IORING_FEAT_NATIVE_WORKERS: u32 = 512; +pub const IORING_REGISTER_FILES_SKIP: i32 = -2; +pub const IO_URING_OP_SUPPORTED: u32 = 1; +pub type __s8 = ::std::os::raw::c_schar; +pub type __u8 = ::std::os::raw::c_uchar; +pub type __s16 = ::std::os::raw::c_short; +pub type __u16 = ::std::os::raw::c_ushort; +pub type __s32 = ::std::os::raw::c_int; +pub type __u32 = ::std::os::raw::c_uint; +pub type __s64 = ::std::os::raw::c_longlong; +pub type __u64 = ::std::os::raw::c_ulonglong; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __kernel_fd_set { + pub fds_bits: [::std::os::raw::c_ulong; 16usize], +} +#[test] +fn bindgen_test_layout___kernel_fd_set() { + assert_eq!( + ::std::mem::size_of::<__kernel_fd_set>(), + 128usize, + concat!("Size of: ", stringify!(__kernel_fd_set)) + ); + assert_eq!( + ::std::mem::align_of::<__kernel_fd_set>(), + 8usize, + concat!("Alignment of ", stringify!(__kernel_fd_set)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::<__kernel_fd_set>())).fds_bits as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__kernel_fd_set), + "::", + stringify!(fds_bits) + ) + ); +} +pub type __kernel_sighandler_t = + ::std::option::Option; +pub type __kernel_key_t = ::std::os::raw::c_int; +pub type __kernel_mqd_t = ::std::os::raw::c_int; +pub type __kernel_old_uid_t = ::std::os::raw::c_ushort; +pub type __kernel_old_gid_t = ::std::os::raw::c_ushort; +pub type __kernel_old_dev_t = ::std::os::raw::c_ulong; +pub type __kernel_long_t = ::std::os::raw::c_long; +pub type __kernel_ulong_t = ::std::os::raw::c_ulong; +pub type __kernel_ino_t = __kernel_ulong_t; +pub type __kernel_mode_t = ::std::os::raw::c_uint; +pub type __kernel_pid_t = ::std::os::raw::c_int; +pub type __kernel_ipc_pid_t = ::std::os::raw::c_int; +pub type __kernel_uid_t = ::std::os::raw::c_uint; +pub type __kernel_gid_t = ::std::os::raw::c_uint; +pub type __kernel_suseconds_t = __kernel_long_t; +pub type __kernel_daddr_t = ::std::os::raw::c_int; +pub type __kernel_uid32_t = ::std::os::raw::c_uint; +pub type __kernel_gid32_t = ::std::os::raw::c_uint; +pub type __kernel_size_t = __kernel_ulong_t; +pub type __kernel_ssize_t = __kernel_long_t; +pub type __kernel_ptrdiff_t = __kernel_long_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __kernel_fsid_t { + pub val: [::std::os::raw::c_int; 2usize], +} +#[test] +fn bindgen_test_layout___kernel_fsid_t() { + assert_eq!( + ::std::mem::size_of::<__kernel_fsid_t>(), + 8usize, + concat!("Size of: ", stringify!(__kernel_fsid_t)) + ); + assert_eq!( + ::std::mem::align_of::<__kernel_fsid_t>(), + 4usize, + concat!("Alignment of ", stringify!(__kernel_fsid_t)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::<__kernel_fsid_t>())).val as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__kernel_fsid_t), + "::", + stringify!(val) + ) + ); +} +pub type __kernel_off_t = __kernel_long_t; +pub type __kernel_loff_t = ::std::os::raw::c_longlong; +pub type __kernel_old_time_t = __kernel_long_t; +pub type __kernel_time_t = __kernel_long_t; +pub type __kernel_time64_t = ::std::os::raw::c_longlong; +pub type __kernel_clock_t = __kernel_long_t; +pub type __kernel_timer_t = ::std::os::raw::c_int; +pub type __kernel_clockid_t = ::std::os::raw::c_int; +pub type __kernel_caddr_t = *mut ::std::os::raw::c_char; +pub type __kernel_uid16_t = ::std::os::raw::c_ushort; +pub type __kernel_gid16_t = ::std::os::raw::c_ushort; +pub type __le16 = __u16; +pub type __be16 = __u16; +pub type __le32 = __u32; +pub type __be32 = __u32; +pub type __le64 = __u64; +pub type __be64 = __u64; +pub type __sum16 = __u16; +pub type __wsum = __u32; +pub type __poll_t = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fscrypt_policy_v1 { + pub version: __u8, + pub contents_encryption_mode: __u8, + pub filenames_encryption_mode: __u8, + pub flags: __u8, + pub master_key_descriptor: [__u8; 8usize], +} +#[test] +fn bindgen_test_layout_fscrypt_policy_v1() { + assert_eq!( + ::std::mem::size_of::(), + 12usize, + concat!("Size of: ", stringify!(fscrypt_policy_v1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!("Alignment of ", stringify!(fscrypt_policy_v1)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(version) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).contents_encryption_mode as *const _ + as usize + }, + 1usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(contents_encryption_mode) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ + as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(filenames_encryption_mode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 3usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).master_key_descriptor as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v1), + "::", + stringify!(master_key_descriptor) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fscrypt_key { + pub mode: __u32, + pub raw: [__u8; 64usize], + pub size: __u32, +} +#[test] +fn bindgen_test_layout_fscrypt_key() { + assert_eq!( + ::std::mem::size_of::(), + 72usize, + concat!("Size of: ", stringify!(fscrypt_key)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_key)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).mode as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key), + "::", + stringify!(mode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key), + "::", + stringify!(raw) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).size as *const _ as usize }, + 68usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key), + "::", + stringify!(size) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fscrypt_policy_v2 { + pub version: __u8, + pub contents_encryption_mode: __u8, + pub filenames_encryption_mode: __u8, + pub flags: __u8, + pub __reserved: [__u8; 4usize], + pub master_key_identifier: [__u8; 16usize], +} +#[test] +fn bindgen_test_layout_fscrypt_policy_v2() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(fscrypt_policy_v2)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!("Alignment of ", stringify!(fscrypt_policy_v2)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).version as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(version) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).contents_encryption_mode as *const _ + as usize + }, + 1usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(contents_encryption_mode) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).filenames_encryption_mode as *const _ + as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(filenames_encryption_mode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 3usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).master_key_identifier as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_policy_v2), + "::", + stringify!(master_key_identifier) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_get_policy_ex_arg { + pub policy_size: __u64, + pub policy: fscrypt_get_policy_ex_arg__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union fscrypt_get_policy_ex_arg__bindgen_ty_1 { + pub version: __u8, + pub v1: fscrypt_policy_v1, + pub v2: fscrypt_policy_v2, +} +#[test] +fn bindgen_test_layout_fscrypt_get_policy_ex_arg__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!( + "Size of: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) + ) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).version as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), + "::", + stringify!(version) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).v1 as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), + "::", + stringify!(v1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).v2 as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg__bindgen_ty_1), + "::", + stringify!(v2) + ) + ); +} +#[test] +fn bindgen_test_layout_fscrypt_get_policy_ex_arg() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(fscrypt_get_policy_ex_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(fscrypt_get_policy_ex_arg)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).policy_size as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg), + "::", + stringify!(policy_size) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).policy as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_policy_ex_arg), + "::", + stringify!(policy) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_key_specifier { + pub type_: __u32, + pub __reserved: __u32, + pub u: fscrypt_key_specifier__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union fscrypt_key_specifier__bindgen_ty_1 { + pub __reserved: [__u8; 32usize], + pub descriptor: [__u8; 8usize], + pub identifier: [__u8; 16usize], +} +#[test] +fn bindgen_test_layout_fscrypt_key_specifier__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(fscrypt_key_specifier__bindgen_ty_1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(fscrypt_key_specifier__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier__bindgen_ty_1), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).descriptor as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier__bindgen_ty_1), + "::", + stringify!(descriptor) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).identifier as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier__bindgen_ty_1), + "::", + stringify!(identifier) + ) + ); +} +#[test] +fn bindgen_test_layout_fscrypt_key_specifier() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(fscrypt_key_specifier)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_key_specifier)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier), + "::", + stringify!(type_) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).u as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_key_specifier), + "::", + stringify!(u) + ) + ); +} +#[repr(C)] +#[derive(Debug)] +pub struct fscrypt_provisioning_key_payload { + pub type_: __u32, + pub __reserved: __u32, + pub raw: __IncompleteArrayField<__u8>, +} +#[test] +fn bindgen_test_layout_fscrypt_provisioning_key_payload() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(fscrypt_provisioning_key_payload)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(fscrypt_provisioning_key_payload) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).type_ as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_provisioning_key_payload), + "::", + stringify!(type_) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ + as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_provisioning_key_payload), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).raw as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_provisioning_key_payload), + "::", + stringify!(raw) + ) + ); +} +#[repr(C)] +pub struct fscrypt_add_key_arg { + pub key_spec: fscrypt_key_specifier, + pub raw_size: __u32, + pub key_id: __u32, + pub __reserved: [__u32; 8usize], + pub raw: __IncompleteArrayField<__u8>, +} +#[test] +fn bindgen_test_layout_fscrypt_add_key_arg() { + assert_eq!( + ::std::mem::size_of::(), + 80usize, + concat!("Size of: ", stringify!(fscrypt_add_key_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_add_key_arg)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(key_spec) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).raw_size as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(raw_size) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).key_id as *const _ as usize }, + 44usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(key_id) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).__reserved as *const _ as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).raw as *const _ as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_add_key_arg), + "::", + stringify!(raw) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_remove_key_arg { + pub key_spec: fscrypt_key_specifier, + pub removal_status_flags: __u32, + pub __reserved: [__u32; 5usize], +} +#[test] +fn bindgen_test_layout_fscrypt_remove_key_arg() { + assert_eq!( + ::std::mem::size_of::(), + 64usize, + concat!("Size of: ", stringify!(fscrypt_remove_key_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_remove_key_arg)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).key_spec as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_remove_key_arg), + "::", + stringify!(key_spec) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).removal_status_flags as *const _ + as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_remove_key_arg), + "::", + stringify!(removal_status_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ as usize + }, + 44usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_remove_key_arg), + "::", + stringify!(__reserved) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct fscrypt_get_key_status_arg { + pub key_spec: fscrypt_key_specifier, + pub __reserved: [__u32; 6usize], + pub status: __u32, + pub status_flags: __u32, + pub user_count: __u32, + pub __out_reserved: [__u32; 13usize], +} +#[test] +fn bindgen_test_layout_fscrypt_get_key_status_arg() { + assert_eq!( + ::std::mem::size_of::(), + 128usize, + concat!("Size of: ", stringify!(fscrypt_get_key_status_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fscrypt_get_key_status_arg)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).key_spec as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(key_spec) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__reserved as *const _ as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(__reserved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).status as *const _ as usize + }, + 64usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(status) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).status_flags as *const _ as usize + }, + 68usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(status_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).user_count as *const _ as usize + }, + 72usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(user_count) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__out_reserved as *const _ + as usize + }, + 76usize, + concat!( + "Offset of field: ", + stringify!(fscrypt_get_key_status_arg), + "::", + stringify!(__out_reserved) + ) + ); +} +pub const fsconfig_command_FSCONFIG_SET_FLAG: fsconfig_command = 0; +pub const fsconfig_command_FSCONFIG_SET_STRING: fsconfig_command = 1; +pub const fsconfig_command_FSCONFIG_SET_BINARY: fsconfig_command = 2; +pub const fsconfig_command_FSCONFIG_SET_PATH: fsconfig_command = 3; +pub const fsconfig_command_FSCONFIG_SET_PATH_EMPTY: fsconfig_command = 4; +pub const fsconfig_command_FSCONFIG_SET_FD: fsconfig_command = 5; +pub const fsconfig_command_FSCONFIG_CMD_CREATE: fsconfig_command = 6; +pub const fsconfig_command_FSCONFIG_CMD_RECONFIGURE: fsconfig_command = 7; +pub type fsconfig_command = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct mount_attr { + pub attr_set: __u64, + pub attr_clr: __u64, + pub propagation: __u64, + pub userns_fd: __u64, +} +#[test] +fn bindgen_test_layout_mount_attr() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(mount_attr)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(mount_attr)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).attr_set as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(attr_set) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).attr_clr as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(attr_clr) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).propagation as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(propagation) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).userns_fd as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(mount_attr), + "::", + stringify!(userns_fd) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct file_clone_range { + pub src_fd: __s64, + pub src_offset: __u64, + pub src_length: __u64, + pub dest_offset: __u64, +} +#[test] +fn bindgen_test_layout_file_clone_range() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(file_clone_range)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(file_clone_range)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_fd as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(src_fd) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(src_offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(src_length) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dest_offset as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(file_clone_range), + "::", + stringify!(dest_offset) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fstrim_range { + pub start: __u64, + pub len: __u64, + pub minlen: __u64, +} +#[test] +fn bindgen_test_layout_fstrim_range() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(fstrim_range)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(fstrim_range)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).start as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fstrim_range), + "::", + stringify!(start) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fstrim_range), + "::", + stringify!(len) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).minlen as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(fstrim_range), + "::", + stringify!(minlen) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct file_dedupe_range_info { + pub dest_fd: __s64, + pub dest_offset: __u64, + pub bytes_deduped: __u64, + pub status: __s32, + pub reserved: __u32, +} +#[test] +fn bindgen_test_layout_file_dedupe_range_info() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(file_dedupe_range_info)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(file_dedupe_range_info)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dest_fd as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(dest_fd) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).dest_offset as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(dest_offset) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).bytes_deduped as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(bytes_deduped) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).status as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(status) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).reserved as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range_info), + "::", + stringify!(reserved) + ) + ); +} +#[repr(C)] +#[derive(Debug)] +pub struct file_dedupe_range { + pub src_offset: __u64, + pub src_length: __u64, + pub dest_count: __u16, + pub reserved1: __u16, + pub reserved2: __u32, + pub info: __IncompleteArrayField, +} +#[test] +fn bindgen_test_layout_file_dedupe_range() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(file_dedupe_range)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(file_dedupe_range)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_offset as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(src_offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).src_length as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(src_length) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dest_count as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(dest_count) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).reserved1 as *const _ as usize }, + 18usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(reserved1) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).reserved2 as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(reserved2) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).info as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(file_dedupe_range), + "::", + stringify!(info) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct files_stat_struct { + pub nr_files: ::std::os::raw::c_ulong, + pub nr_free_files: ::std::os::raw::c_ulong, + pub max_files: ::std::os::raw::c_ulong, +} +#[test] +fn bindgen_test_layout_files_stat_struct() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(files_stat_struct)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(files_stat_struct)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_files as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(files_stat_struct), + "::", + stringify!(nr_files) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_free_files as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(files_stat_struct), + "::", + stringify!(nr_free_files) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).max_files as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(files_stat_struct), + "::", + stringify!(max_files) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct inodes_stat_t { + pub nr_inodes: ::std::os::raw::c_long, + pub nr_unused: ::std::os::raw::c_long, + pub dummy: [::std::os::raw::c_long; 5usize], +} +#[test] +fn bindgen_test_layout_inodes_stat_t() { + assert_eq!( + ::std::mem::size_of::(), + 56usize, + concat!("Size of: ", stringify!(inodes_stat_t)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(inodes_stat_t)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_inodes as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(inodes_stat_t), + "::", + stringify!(nr_inodes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).nr_unused as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(inodes_stat_t), + "::", + stringify!(nr_unused) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dummy as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(inodes_stat_t), + "::", + stringify!(dummy) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct fsxattr { + pub fsx_xflags: __u32, + pub fsx_extsize: __u32, + pub fsx_nextents: __u32, + pub fsx_projid: __u32, + pub fsx_cowextsize: __u32, + pub fsx_pad: [::std::os::raw::c_uchar; 8usize], +} +#[test] +fn bindgen_test_layout_fsxattr() { + assert_eq!( + ::std::mem::size_of::(), + 28usize, + concat!("Size of: ", stringify!(fsxattr)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(fsxattr)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_xflags as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_xflags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_extsize as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_extsize) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_nextents as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_nextents) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_projid as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_projid) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_cowextsize as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_cowextsize) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fsx_pad as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(fsxattr), + "::", + stringify!(fsx_pad) + ) + ); +} +pub type __kernel_rwf_t = ::std::os::raw::c_int; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct io_uring_sqe { + pub opcode: __u8, + pub flags: __u8, + pub ioprio: __u16, + pub fd: __s32, + pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_1, + pub __bindgen_anon_2: io_uring_sqe__bindgen_ty_2, + pub len: __u32, + pub __bindgen_anon_3: io_uring_sqe__bindgen_ty_3, + pub user_data: __u64, + pub __bindgen_anon_4: io_uring_sqe__bindgen_ty_4, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union io_uring_sqe__bindgen_ty_1 { + pub off: __u64, + pub addr2: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_sqe__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_1)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).off as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_1), + "::", + stringify!(off) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).addr2 as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_1), + "::", + stringify!(addr2) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union io_uring_sqe__bindgen_ty_2 { + pub addr: __u64, + pub splice_off_in: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_sqe__bindgen_ty_2() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_2)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_2)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).addr as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_2), + "::", + stringify!(addr) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).splice_off_in as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_2), + "::", + stringify!(splice_off_in) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union io_uring_sqe__bindgen_ty_3 { + pub rw_flags: __kernel_rwf_t, + pub fsync_flags: __u32, + pub poll_events: __u16, + pub poll32_events: __u32, + pub sync_range_flags: __u32, + pub msg_flags: __u32, + pub timeout_flags: __u32, + pub accept_flags: __u32, + pub cancel_flags: __u32, + pub open_flags: __u32, + pub statx_flags: __u32, + pub fadvise_advice: __u32, + pub splice_flags: __u32, + pub rename_flags: __u32, + pub unlink_flags: __u32, +} +#[test] +fn bindgen_test_layout_io_uring_sqe__bindgen_ty_3() { + assert_eq!( + ::std::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_3)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_3)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).rw_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(rw_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).fsync_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(fsync_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).poll_events as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(poll_events) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).poll32_events as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(poll32_events) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sync_range_flags as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(sync_range_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).msg_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(msg_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).timeout_flags as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(timeout_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).accept_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(accept_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).cancel_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(cancel_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).open_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(open_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).statx_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(statx_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).fadvise_advice as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(fadvise_advice) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).splice_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(splice_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).rename_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(rename_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).unlink_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(unlink_flags) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union io_uring_sqe__bindgen_ty_4 { + pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_4__bindgen_ty_1, + pub __pad2: [__u64; 3usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct io_uring_sqe__bindgen_ty_4__bindgen_ty_1 { + pub __bindgen_anon_1: io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1, + pub personality: __u16, + pub splice_fd_in: __s32, +} +#[repr(C, packed)] +#[derive(Copy, Clone)] +pub union io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 { + pub buf_index: __u16, + pub buf_group: __u16, +} +#[test] +fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 2usize, + concat!( + "Size of: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())) + .buf_index as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(buf_index) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())) + .buf_group as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(buf_group) + ) + ); +} +#[test] +fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!( + "Size of: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) + ) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).personality + as *const _ as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), + "::", + stringify!(personality) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).splice_fd_in + as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), + "::", + stringify!(splice_fd_in) + ) + ); +} +#[test] +fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_4)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_4)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__pad2 as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4), + "::", + stringify!(__pad2) + ) + ); +} +#[test] +fn bindgen_test_layout_io_uring_sqe() { + assert_eq!( + ::std::mem::size_of::(), + 64usize, + concat!("Size of: ", stringify!(io_uring_sqe)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_sqe)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(opcode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ioprio as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(ioprio) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fd as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(fd) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(len) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(user_data) + ) + ); +} +pub const IOSQE_FIXED_FILE_BIT: ::std::os::raw::c_uint = 0; +pub const IOSQE_IO_DRAIN_BIT: ::std::os::raw::c_uint = 1; +pub const IOSQE_IO_LINK_BIT: ::std::os::raw::c_uint = 2; +pub const IOSQE_IO_HARDLINK_BIT: ::std::os::raw::c_uint = 3; +pub const IOSQE_ASYNC_BIT: ::std::os::raw::c_uint = 4; +pub const IOSQE_BUFFER_SELECT_BIT: ::std::os::raw::c_uint = 5; +pub type _bindgen_ty_1 = ::std::os::raw::c_uint; +pub const IORING_OP_NOP: ::std::os::raw::c_uint = 0; +pub const IORING_OP_READV: ::std::os::raw::c_uint = 1; +pub const IORING_OP_WRITEV: ::std::os::raw::c_uint = 2; +pub const IORING_OP_FSYNC: ::std::os::raw::c_uint = 3; +pub const IORING_OP_READ_FIXED: ::std::os::raw::c_uint = 4; +pub const IORING_OP_WRITE_FIXED: ::std::os::raw::c_uint = 5; +pub const IORING_OP_POLL_ADD: ::std::os::raw::c_uint = 6; +pub const IORING_OP_POLL_REMOVE: ::std::os::raw::c_uint = 7; +pub const IORING_OP_SYNC_FILE_RANGE: ::std::os::raw::c_uint = 8; +pub const IORING_OP_SENDMSG: ::std::os::raw::c_uint = 9; +pub const IORING_OP_RECVMSG: ::std::os::raw::c_uint = 10; +pub const IORING_OP_TIMEOUT: ::std::os::raw::c_uint = 11; +pub const IORING_OP_TIMEOUT_REMOVE: ::std::os::raw::c_uint = 12; +pub const IORING_OP_ACCEPT: ::std::os::raw::c_uint = 13; +pub const IORING_OP_ASYNC_CANCEL: ::std::os::raw::c_uint = 14; +pub const IORING_OP_LINK_TIMEOUT: ::std::os::raw::c_uint = 15; +pub const IORING_OP_CONNECT: ::std::os::raw::c_uint = 16; +pub const IORING_OP_FALLOCATE: ::std::os::raw::c_uint = 17; +pub const IORING_OP_OPENAT: ::std::os::raw::c_uint = 18; +pub const IORING_OP_CLOSE: ::std::os::raw::c_uint = 19; +pub const IORING_OP_FILES_UPDATE: ::std::os::raw::c_uint = 20; +pub const IORING_OP_STATX: ::std::os::raw::c_uint = 21; +pub const IORING_OP_READ: ::std::os::raw::c_uint = 22; +pub const IORING_OP_WRITE: ::std::os::raw::c_uint = 23; +pub const IORING_OP_FADVISE: ::std::os::raw::c_uint = 24; +pub const IORING_OP_MADVISE: ::std::os::raw::c_uint = 25; +pub const IORING_OP_SEND: ::std::os::raw::c_uint = 26; +pub const IORING_OP_RECV: ::std::os::raw::c_uint = 27; +pub const IORING_OP_OPENAT2: ::std::os::raw::c_uint = 28; +pub const IORING_OP_EPOLL_CTL: ::std::os::raw::c_uint = 29; +pub const IORING_OP_SPLICE: ::std::os::raw::c_uint = 30; +pub const IORING_OP_PROVIDE_BUFFERS: ::std::os::raw::c_uint = 31; +pub const IORING_OP_REMOVE_BUFFERS: ::std::os::raw::c_uint = 32; +pub const IORING_OP_TEE: ::std::os::raw::c_uint = 33; +pub const IORING_OP_SHUTDOWN: ::std::os::raw::c_uint = 34; +pub const IORING_OP_RENAMEAT: ::std::os::raw::c_uint = 35; +pub const IORING_OP_UNLINKAT: ::std::os::raw::c_uint = 36; +pub const IORING_OP_LAST: ::std::os::raw::c_uint = 37; +pub type _bindgen_ty_2 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_cqe { + pub user_data: __u64, + pub res: __s32, + pub flags: __u32, +} +#[test] +fn bindgen_test_layout_io_uring_cqe() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_cqe)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_cqe)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_cqe), + "::", + stringify!(user_data) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).res as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_cqe), + "::", + stringify!(res) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_uring_cqe), + "::", + stringify!(flags) + ) + ); +} +pub const IORING_CQE_BUFFER_SHIFT: ::std::os::raw::c_uint = 16; +pub type _bindgen_ty_3 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_sqring_offsets { + pub head: __u32, + pub tail: __u32, + pub ring_mask: __u32, + pub ring_entries: __u32, + pub flags: __u32, + pub dropped: __u32, + pub array: __u32, + pub resv1: __u32, + pub resv2: __u64, +} +#[test] +fn bindgen_test_layout_io_sqring_offsets() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(io_sqring_offsets)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_sqring_offsets)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(head) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(tail) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(ring_mask) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(ring_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dropped as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(dropped) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).array as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(array) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(resv1) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(resv2) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_cqring_offsets { + pub head: __u32, + pub tail: __u32, + pub ring_mask: __u32, + pub ring_entries: __u32, + pub overflow: __u32, + pub cqes: __u32, + pub flags: __u32, + pub resv1: __u32, + pub resv2: __u64, +} +#[test] +fn bindgen_test_layout_io_cqring_offsets() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(io_cqring_offsets)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_cqring_offsets)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(head) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(tail) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(ring_mask) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(ring_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).overflow as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(overflow) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).cqes as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(cqes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(resv1) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(resv2) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_params { + pub sq_entries: __u32, + pub cq_entries: __u32, + pub flags: __u32, + pub sq_thread_cpu: __u32, + pub sq_thread_idle: __u32, + pub features: __u32, + pub wq_fd: __u32, + pub resv: [__u32; 3usize], + pub sq_off: io_sqring_offsets, + pub cq_off: io_cqring_offsets, +} +#[test] +fn bindgen_test_layout_io_uring_params() { + assert_eq!( + ::std::mem::size_of::(), + 120usize, + concat!("Size of: ", stringify!(io_uring_params)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_params)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_entries as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).cq_entries as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(cq_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_thread_cpu as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_thread_cpu) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_thread_idle as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_thread_idle) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).features as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(features) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).wq_fd as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(wq_fd) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_off as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_off) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).cq_off as *const _ as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(cq_off) + ) + ); +} +pub const IORING_REGISTER_BUFFERS: ::std::os::raw::c_uint = 0; +pub const IORING_UNREGISTER_BUFFERS: ::std::os::raw::c_uint = 1; +pub const IORING_REGISTER_FILES: ::std::os::raw::c_uint = 2; +pub const IORING_UNREGISTER_FILES: ::std::os::raw::c_uint = 3; +pub const IORING_REGISTER_EVENTFD: ::std::os::raw::c_uint = 4; +pub const IORING_UNREGISTER_EVENTFD: ::std::os::raw::c_uint = 5; +pub const IORING_REGISTER_FILES_UPDATE: ::std::os::raw::c_uint = 6; +pub const IORING_REGISTER_EVENTFD_ASYNC: ::std::os::raw::c_uint = 7; +pub const IORING_REGISTER_PROBE: ::std::os::raw::c_uint = 8; +pub const IORING_REGISTER_PERSONALITY: ::std::os::raw::c_uint = 9; +pub const IORING_UNREGISTER_PERSONALITY: ::std::os::raw::c_uint = 10; +pub const IORING_REGISTER_RESTRICTIONS: ::std::os::raw::c_uint = 11; +pub const IORING_REGISTER_ENABLE_RINGS: ::std::os::raw::c_uint = 12; +pub const IORING_REGISTER_LAST: ::std::os::raw::c_uint = 13; +pub type _bindgen_ty_4 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_files_update { + pub offset: __u32, + pub resv: __u32, + pub fds: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_files_update() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_files_update)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_files_update)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_files_update), + "::", + stringify!(offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_files_update), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fds as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_files_update), + "::", + stringify!(fds) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_rsrc_update { + pub offset: __u32, + pub resv: __u32, + pub data: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_rsrc_update() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_rsrc_update)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_rsrc_update)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_rsrc_update), + "::", + stringify!(offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_rsrc_update), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).data as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_rsrc_update), + "::", + stringify!(data) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_probe_op { + pub op: __u8, + pub resv: __u8, + pub flags: __u16, + pub resv2: __u32, +} +#[test] +fn bindgen_test_layout_io_uring_probe_op() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(io_uring_probe_op)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_probe_op)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).op as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(op) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe_op), + "::", + stringify!(resv2) + ) + ); +} +#[repr(C)] +#[derive(Debug)] +pub struct io_uring_probe { + pub last_op: __u8, + pub ops_len: __u8, + pub resv: __u16, + pub resv2: [__u32; 3usize], + pub ops: __IncompleteArrayField, +} +#[test] +fn bindgen_test_layout_io_uring_probe() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_probe)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_probe)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).last_op as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(last_op) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ops_len as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(ops_len) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(resv2) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ops as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_uring_probe), + "::", + stringify!(ops) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct io_uring_restriction { + pub opcode: __u16, + pub __bindgen_anon_1: io_uring_restriction__bindgen_ty_1, + pub resv: __u8, + pub resv2: [__u32; 3usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union io_uring_restriction__bindgen_ty_1 { + pub register_op: __u8, + pub sqe_op: __u8, + pub sqe_flags: __u8, +} +#[test] +fn bindgen_test_layout_io_uring_restriction__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 1usize, + concat!("Size of: ", stringify!(io_uring_restriction__bindgen_ty_1)) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(io_uring_restriction__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).register_op as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction__bindgen_ty_1), + "::", + stringify!(register_op) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sqe_op as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction__bindgen_ty_1), + "::", + stringify!(sqe_op) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sqe_flags as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction__bindgen_ty_1), + "::", + stringify!(sqe_flags) + ) + ); +} +#[test] +fn bindgen_test_layout_io_uring_restriction() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_restriction)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_restriction)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction), + "::", + stringify!(opcode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv as *const _ as usize }, + 3usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_restriction), + "::", + stringify!(resv2) + ) + ); +} +pub const IORING_RESTRICTION_REGISTER_OP: ::std::os::raw::c_uint = 0; +pub const IORING_RESTRICTION_SQE_OP: ::std::os::raw::c_uint = 1; +pub const IORING_RESTRICTION_SQE_FLAGS_ALLOWED: ::std::os::raw::c_uint = 2; +pub const IORING_RESTRICTION_SQE_FLAGS_REQUIRED: ::std::os::raw::c_uint = 3; +pub const IORING_RESTRICTION_LAST: ::std::os::raw::c_uint = 4; +pub type _bindgen_ty_5 = ::std::os::raw::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct io_uring_getevents_arg { + pub sigmask: __u64, + pub sigmask_sz: __u32, + pub pad: __u32, + pub ts: __u64, +} +#[test] +fn bindgen_test_layout_io_uring_getevents_arg() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(io_uring_getevents_arg)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_getevents_arg)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sigmask as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(sigmask) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sigmask_sz as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(sigmask_sz) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pad as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(pad) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ts as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_uring_getevents_arg), + "::", + stringify!(ts) + ) + ); +} diff --git a/runtime/asyncio/io_uring.h b/runtime/asyncio/io_uring.h new file mode 100644 index 0000000..8a63099 --- /dev/null +++ b/runtime/asyncio/io_uring.h @@ -0,0 +1 @@ +#include diff --git a/runtime/asyncio/poem b/runtime/asyncio/poem new file mode 100644 index 0000000..de37932 --- /dev/null +++ b/runtime/asyncio/poem @@ -0,0 +1,19 @@ +Dunkel war’s, der Mond schien helle, +schneebedeckt die grüne Flur, +als ein Wagen blitzesschnelle, +langsam um die Ecke fuhr. + +Drinnen saßen stehend Leute, +schweigend ins Gespräch vertieft, +als ein totgeschoss’ner Hase +auf der Sandbank Schlittschuh lief. + +Und ein blondgelockter Jüngling +mit kohlrabenschwarzem Haar +saß auf einer grünen Kiste, +die rot angestrichen war. + +Neben ihm ’ne alte Schrulle, +zählte kaum erst sechzehn Jahr, +in der Hand ’ne Butterstulle, +die mit Schmalz bestrichen war. diff --git a/runtime/asyncio/src/cancellation.rs b/runtime/asyncio/src/cancellation.rs new file mode 100644 index 0000000..36614d5 --- /dev/null +++ b/runtime/asyncio/src/cancellation.rs @@ -0,0 +1,149 @@ +use std::ptr; +use std::any::Any; +use std::ffi::CString; +use ptr_meta::DynMetadata; + +/// Cancellation callback to clean up I/O resources +/// +/// This allows IO actions to properly cancel and have their resources cleaned up without having to +/// worry about the current state of the io_uring queues. +pub struct Cancellation { + data: *mut (), + metadata: usize, + drop: unsafe fn (*mut (), usize), +} + +pub unsafe trait Cancel { + fn into_raw(self) -> (*mut (), usize); + unsafe fn drop_raw(ptr: *mut (), metadata: usize); +} + +pub unsafe trait CancelNarrow { + fn into_narrow_raw(self) -> *mut (); + unsafe fn drop_narrow_raw(ptr: *mut ()); +} + +unsafe impl Cancel for T { + fn into_raw(self) -> (*mut (), usize) { + (T::into_narrow_raw(self), 0) + } + + unsafe fn drop_raw(ptr: *mut (), _: usize) { + T::drop_narrow_raw(ptr) + } +} + +unsafe impl CancelNarrow for Box { + fn into_narrow_raw(self) -> *mut () { + Box::into_raw(self) as *mut () + } + + unsafe fn drop_narrow_raw(ptr: *mut ()) { + drop(Box::from_raw(ptr)) + } +} + +unsafe impl Cancel for Box<[T]> { + fn into_raw(self) -> (*mut (), usize) { + let len = self.len(); + (Box::into_raw(self) as *mut (), len) + } + + unsafe fn drop_raw(ptr: *mut (), metadata: usize) { + drop(Vec::from_raw_parts(ptr, metadata, metadata)) + } +} + +// Cancel impl for panics +unsafe impl Cancel for Box { + fn into_raw(self) -> (*mut (), usize) { + let ptr = Box::into_raw(self); + let metadata = ptr_meta::metadata(ptr as *mut dyn Any); + let metadata = unsafe { + // SAFETY: None. I happen to know that metadata is always exactly `usize`-sized for this + // type but only `std` can guarantee it. + std::mem::transmute::<_, usize>(metadata) + }; + (ptr as *mut(), metadata) + } + + unsafe fn drop_raw(ptr: *mut (), metadata: usize) { + let boxed: Box = unsafe { + let metadata = + // SAFETY: We did it the other way around so this is safe if the previous step was. + std::mem::transmute::<_, DynMetadata>(metadata); + + // We can then (safely) construct a fat pointer from the metadata and data address + let ptr = ptr_meta::from_raw_parts_mut(ptr, metadata); + + // SAFETY: We know the pointer is valid since `Self::into_raw` took ownership and the + // vtable was extracted from this known good reference. + Box::from_raw(ptr) + }; + drop(boxed) + } +} + +unsafe impl CancelNarrow for CString { + fn into_narrow_raw(self) -> *mut () { + self.into_raw() as *mut () + } + + unsafe fn drop_narrow_raw(ptr: *mut ()) { + drop(CString::from_raw(ptr as *mut libc::c_char)); + } +} + +unsafe impl CancelNarrow for () { + fn into_narrow_raw(self) -> *mut () { + ptr::null_mut() + } + + unsafe fn drop_narrow_raw(_: *mut ()) {} +} + +unsafe impl Cancel for (T, F) + where T: CancelNarrow, + F: CancelNarrow, +{ + fn into_raw(self) -> (*mut (), usize) { + let (t, f) = self; + let (t, _) = t.into_raw(); + let (f, _) = f.into_raw(); + (t, f as usize) + } + + unsafe fn drop_raw(t: *mut (), f: usize) { + T::drop_raw(t, 0); + F::drop_raw(f as *mut (), 0); + } +} + +impl Cancellation { + pub fn new(cancel: T) -> Self { + let (data, metadata) = cancel.into_raw(); + Self { data, metadata, drop: T::drop_raw } + } +} + +impl Drop for Cancellation { + fn drop(&mut self) { + unsafe { + (self.drop)(self.data, self.metadata) + } + } +} + +impl From for Cancellation { + fn from(cancel: T) -> Self { + Cancellation::new(cancel) + } +} + +impl From> for Cancellation + where Cancellation: From +{ + fn from(option: Option) -> Self { + option.map_or(Cancellation::new(()), Cancellation::from) + } +} diff --git a/runtime/asyncio/src/completion.rs b/runtime/asyncio/src/completion.rs new file mode 100644 index 0000000..5fda033 --- /dev/null +++ b/runtime/asyncio/src/completion.rs @@ -0,0 +1,73 @@ +use std::cell::Cell; +use std::io; +use std::marker::PhantomData; +use std::mem::ManuallyDrop; +use std::task::Waker; +use crate::cancellation::Cancellation; + +pub struct Completion<'cx> { + state: ManuallyDrop>>, + marker: PhantomData &'cx ()>, +} + +enum State { + Submitted(Waker), + Completed(io::Result), + Cancelled(Cancellation), + Empty, +} + +impl<'cx> Completion<'cx> { + pub fn new(waker: Waker) -> Self { + Self { + state: ManuallyDrop::new(Box::new(Cell::new(State::Submitted(waker)))), + marker: PhantomData, + } + } + + pub fn addr(&self) -> u64 { + self.state.as_ptr() as *const _ as usize as u64 + } + + pub fn check(self, waker: &Waker) -> Result, Self> { + match self.state.replace(State::Empty) { + State::Submitted(old_waker) => { + // If the given waker wakes a different task than the one we were constructed + // with we must replace our waker. + if !old_waker.will_wake(waker) { + self.state.replace(State::Submitted(waker.clone())); + } else { + self.state.replace(State::Submitted(old_waker)); + } + Err(self) + }, + State::Completed(result) => { + Ok(result) + }, + _ => unreachable!(), + } + } + + pub fn cancel(self, callback: Cancellation) { + match self.state.replace(State::Cancelled(callback)) { + State::Completed(_) => { + drop(self.state); + }, + State::Submitted(_) => { + }, + _ => unreachable!(), + } + } + + pub fn complete(self, result: io::Result) { + match self.state.replace(State::Completed(result)) { + State::Submitted(waker) => { + waker.wake(); + }, + State::Cancelled(callback) => { + drop(callback); + }, + _ => unreachable!(), + } + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/cq.rs b/runtime/asyncio/src/cq.rs new file mode 100644 index 0000000..49a776f --- /dev/null +++ b/runtime/asyncio/src/cq.rs @@ -0,0 +1,102 @@ +use std::cell::UnsafeCell; +use std::os::unix::prelude::RawFd; +use std::ptr::NonNull; +use std::sync::atomic::{AtomicU32, Ordering}; +use nix::sys::mman::munmap; +use crate::cqe::CQE; +use crate::ctypes::{CQOffsets, IORING_CQ}; + +#[derive(Debug)] +pub struct CQ { + /// Head of the completion queue. Moved by the program to indicate that it has consumed + /// completions. + /// + /// While it's important that the kernel sees the same value as the userspace program the + /// main problem that can happen otherwise is that the kernel assumes it lost completions + /// which we already successfully pulled from the queue. + head: &'static AtomicU32, + cached_head: UnsafeCell, + + /// Tail of the completion queue. Moved by the kernel when new completions are stored. + /// + /// Since this is modified by the kernel we should use atomic operations to read it, making + /// sure both the kernel and any program have a consistent view of its contents. + tail: &'static AtomicU32, + ring_mask: u32, + num_entries: u32, + flags: &'static AtomicU32, + entries: &'static [CQE], + + // cq_ptr is set to `None` if we used a single mmap for both SQ and CQ. + cq_ptr: *mut libc::c_void, + cq_map_size: usize, +} + +impl Drop for CQ { + fn drop(&mut self) { + if !self.cq_ptr.is_null() { + unsafe { munmap(self.cq_ptr, self.cq_map_size) }; + } + } +} + +impl CQ { + pub unsafe fn new(ptr: *mut libc::c_void, + offs: CQOffsets, + cq_entries: u32, + split_mmap: bool, + cq_map_size: usize, + ) -> Self { + // Sanity check the pointer and offsets. If these fail we were probably passed an + // offsets from an uninitialized parameter struct. + assert!(!ptr.is_null()); + assert_ne!(offs.head, offs.tail); + + // Eagerly extract static values. Since they won't ever change again there's no reason to + // not read them now. + let ring_mask = *(ptr.offset(offs.ring_mask as isize).cast()); + let num_entries = *(ptr.offset(offs.ring_entries as isize).cast()); + + let head: &AtomicU32 = &*(ptr.offset(offs.head as isize).cast()); + let cached_head = UnsafeCell::new(head.load(Ordering::Acquire)); + let tail: &AtomicU32 = &*(ptr.offset(offs.tail as isize).cast()); + let flags: &AtomicU32 = &*(ptr.offset(offs.flags as isize).cast()); + let entries = std::slice::from_raw_parts( + ptr.offset(offs.cqes as isize).cast(), + cq_entries as usize + ); + + Self { + head, + cached_head, + tail, + ring_mask, + num_entries, + flags, + + entries, + + // Only store a pointer if we used a separate mmap() syscall for the CQ + cq_ptr: if split_mmap { ptr } else { std::ptr::null_mut() }, + cq_map_size, + } + } + + pub fn get_next(&self) -> Option<&CQE> { + let tail = self.tail.load(Ordering::Acquire); + let head = self.head.load(Ordering::Acquire); + if tail == head { + None + } else { + self.head.fetch_add(1, Ordering::Release); + let index = (head & self.ring_mask) as usize; + Some(&self.entries[index]) + } + } + + pub fn ready(&self) -> u32 { + let tail = self.tail.load(Ordering::Acquire); + let head = self.head.load(Ordering::Acquire); + tail.wrapping_sub(head) + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/cqe.rs b/runtime/asyncio/src/cqe.rs new file mode 100644 index 0000000..a0810f4 --- /dev/null +++ b/runtime/asyncio/src/cqe.rs @@ -0,0 +1,137 @@ +use std::io; +use std::ptr::NonNull; +use std::sync::atomic::Ordering; +use crate::cq::CQ; +use crate::io_uring::{IoUring}; + +#[repr(C)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +/// Completion Queue Event +pub struct CQE { + pub user_data: u64, + res: i32, + pub flags: IOCQE, +} + +impl CQE { + pub fn raw_result(&self) -> i32 { + self.res + } + + pub fn result(&self) -> io::Result { + if self.res < 0 { + let err = io::Error::from_raw_os_error(-self.res); + Err(err) + } else { + Ok(self.res) + } + } +} + +pub struct CQEs<'a> { + cq: &'a CQ, + ready: u32, +} + +impl<'a> CQEs<'a> { + pub fn new(cq: &'a CQ) -> Self { + Self { cq, ready: 0 } + } + + fn get(&mut self) -> Option { + self.cq.get_next().map(|cqe| *cqe) + } + + fn ready(&mut self) -> u32 { + self.cq.ready() + } +} + +impl<'a> Iterator for CQEs<'a> { + type Item = CQE; + + fn next(&mut self) -> Option { + if self.ready == 0 { + self.ready = self.ready(); + if self.ready == 0 { + return None; + } + } + + self.ready -= 1; + self.get() + } +} + +bitflags::bitflags! { + #[derive(Default)] + #[repr(C)] + pub struct IOCQE: u32 { + const F_BUFFER = 1; + const F_MORE = 1 << 1; + } +} +static_assertions::assert_eq_size!(u32, IOCQE); + +mod tests { + use super::*; + + #[test] + fn test_result_into_std() { + let cqe = CQE { res: 0, .. Default::default() }; + assert_eq!(cqe.result().unwrap(), 0); + let cqe = CQE { res: 42567, .. Default::default() }; + assert_eq!(cqe.result().unwrap(), 42567); + + let cqe = CQE { res: -32, .. Default::default() }; + assert_eq!(cqe.result().unwrap_err().kind(), io::ErrorKind::BrokenPipe); + + let cqe = CQE { res: -2, .. Default::default() }; + assert_eq!(cqe.result().unwrap_err().kind(), io::ErrorKind::NotFound); + } + + #[test] + fn test_layout_io_uring_cqe() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(io_uring_cqe)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_cqe)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_cqe), + "::", + stringify!(user_data) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).res as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_cqe), + "::", + stringify!(res) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_uring_cqe), + "::", + stringify!(flags) + ) + ); + } + +} \ No newline at end of file diff --git a/runtime/asyncio/src/ctypes.rs b/runtime/asyncio/src/ctypes.rs new file mode 100644 index 0000000..547dd75 --- /dev/null +++ b/runtime/asyncio/src/ctypes.rs @@ -0,0 +1,1164 @@ +#![allow(non_camel_case_types)] + +// Generated using bindgen-0.59.1 and then cleaned up by hand + +use std::fmt::{Debug, Formatter}; +use std::os::unix::prelude::RawFd; +use libc::{c_ulong, c_long, c_uint, c_int}; + +#[repr(C)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +/// Parameters for the io_uring_setup syscall. +/// +/// Except for `flags`, `sq_thread_cpu`, `sq_thread_idle` and `wq_fd` this is filled entirely by +/// the kernel. +pub struct Params { + /// Number of entries in the submission queue + pub sq_entries: u32, + /// Number of entries in the completion queue + pub cq_entries: u32, + /// Setup Flags passed to the kernel + pub flags: IORING_SETUP, + /// If `!= 0` this will pin the kernel thread for submission queue polling to a given CPU + pub sq_thread_cpu: u32, + /// Timeout for the submission queue polling kernel thread + pub sq_thread_idle: u32, + /// Bitflags of features available in the current context (i.e. as that uid with that kernel) + pub features: IORING_FEAT, + /// file descriptor for a previous io_uring instance to share kernel async backend. To use + /// this you also need to set [`IORING_SETUP::ATTACH_WQ`]. + pub wq_fd: u32, + + // reserved + _resv: [u32; 3], + + /// Submission Queue offsets + pub sq_off: SQOffsets, + /// Completion Queue offsets + pub cq_off: CQOffsets, +} + +impl Params { + pub fn new(flags: IORING_SETUP) -> Self + { + Self { + flags, + .. Default::default() + } + } +} + +#[repr(C)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +/// Submission Queue offsets +/// +/// These are offsets (on top of [`IORING_OFF_SQ_RING`]) into the fd returned by `io_uring_setup` +/// at which relevant parts of information are stored. io_uring assumes this file to be mmap()ed +/// into the process memory space, thus allowing communication with the kernel using this shared +/// memory. +pub struct SQOffsets { + pub head: u32, + pub tail: u32, + pub ring_mask: u32, + pub ring_entries: u32, + pub flags: u32, + pub dropped: u32, + pub array: u32, + pub resv1: u32, + pub resv2: u64, +} + +#[repr(C)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +/// Completion Queue offsets +/// +/// These are offsets (on top of [`IORING_OFF_SQ_RING`]) into the fd returned by `io_uring_setup` +/// at which relevant parts of information are stored. io_uring assumes this file to be mmap()ed +/// into the process memory space, thus allowing communication with the kernel using this shared +/// memory. +pub struct CQOffsets { + pub head: u32, + pub tail: u32, + pub ring_mask: u32, + pub ring_entries: u32, + pub overflow: u32, + pub cqes: u32, + pub flags: u32, + pub resv1: u32, + pub resv2: u64, +} + +#[repr(C)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +/// Submission Queue Event +/// +/// This struct describes the I/O action that the kernel should execute on the programs behalf. +/// Every SQE will generate a [`CQE`] reply on the completion queue when the action has been +/// completed (successfully or not) which will contain the same `user_data` value. Usually +/// `user_data` is set to a pointer value, e.g. to a [`std::task::Waker`] allowing a task +/// blocking on this I/O action to be woken up. +pub struct io_uring_sqe { + /// Type of operation for this SQE + pub opcode: IORING_OP, + pub flags: IOSQE, + pub ioprio: u16, + pub fd: RawFd, + pub offset: u64, + pub address: u64, + pub len: i32, + pub op_flags: SQEOpFlags, + pub user_data: u64, + pub personality: pers_buf_pad, +} + +#[repr(C)] +#[derive(Eq, Copy, Clone)] +pub union SQEOpFlags { + pub rw_flags: c_int, + pub fsync_flags: FSYNC_FLAGS, + pub poll_events: u16, + pub poll32_events: u32, + pub sync_range_flags: u32, + pub msg_flags: u32, + pub timeout_flags: TIMEOUT_FLAGS, + pub accept_flags: u32, + pub cancel_flags: u32, + pub open_flags: u32, + pub statx_flags: u32, + pub fadvise_advice: u32, + pub splice_flags: u32, + pub rename_flags: u32, + pub unlink_flags: u32, +} +static_assertions::assert_eq_size!(u32, SQEOpFlags); + +impl PartialEq for SQEOpFlags { + fn eq(&self, other: &Self) -> bool { + unsafe { self.rw_flags == other.rw_flags } + } +} + +impl Default for SQEOpFlags { + fn default() -> Self { + Self { rw_flags: 0 } + } +} + +impl Debug for SQEOpFlags { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + unsafe { + f.debug_struct("union ioop_flags") + .field("raw", &self.rw_flags) + .field("fsync_flags", &self.fsync_flags) + .field("timeout_flags", &self.timeout_flags) + .finish() + } + } +} + +#[repr(C)] +#[derive(Eq, Copy, Clone)] +pub union pers_buf_pad { + pub personality: personality, + pub __pad2: [u64; 3], +} + +impl PartialEq for pers_buf_pad { + fn eq(&self, other: &Self) -> bool { + unsafe { self.personality == other.personality } + } +} + +impl Default for pers_buf_pad { + fn default() -> Self { + Self { personality: personality::default() } + } +} + +impl Debug for pers_buf_pad { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + unsafe { + f.debug_struct("union pers_buf_pad") + .field("personality", &self.personality) + .finish() + } + } +} + +#[repr(C)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +pub struct personality { + pub buffer: buffer_selection, + pub personality: u16, + pub splice_fd_in: i32, +} +#[repr(C, packed)] +#[derive(Eq, Copy, Clone)] +pub union buffer_selection { + pub buf_index: u16, + pub buf_group: u16, +} + +impl Debug for buffer_selection { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("personality_buffer_selection") + } +} + +impl PartialEq for buffer_selection { + fn eq(&self, other: &Self) -> bool { + unsafe { self.buf_index == other.buf_index } + } +} + +impl Default for buffer_selection { + fn default() -> Self { + Self { buf_index: 0 } + } +} + +bitflags::bitflags! { + #[derive(Default)] + #[repr(C)] + /// Available features + pub struct IORING_FEAT: u32 { + /// SQ, CQ and CQE can be mapped using a single mmap(), reducing the required mmap()s + /// from three to two. + const SINGLE_MMAP = 1; + const NODROP = 2; + const SUBMIT_STABLE = 4; + const RW_CUR_POS = 8; + const CUR_PERSONALITY = 16; + const FAST_POLL = 32; + const POLL_32BITS = 64; + const SQPOLL_NONFIXED = 128; + const EXT_ARG = 256; + const NATIVE_WORKERS = 512; + const RSRC_TAGS = 1024; + } + + #[derive(Default)] + #[repr(C)] + pub struct IORING_SETUP: u32 { + const IOPOLL = 1; + const SQPOLL = 2; + const SQ_AFF = 4; + const CQSIZE = 8; + const CLAMP = 16; + /// Attach to an existing io_uring async backend kernel-side. This allows sharing + /// resources while setting up several independent rings + const ATTACH_WQ = 32; + /// Disable the io_uring async backend on creation. This allows registering + /// resources but prevents submitting and polling + const R_DISABLED = 64; + } + + #[derive(Default)] + #[repr(C)] + pub struct IORING_SQ: u32 { + /// The Kernel Submission Queue thread was stopped and needs to be waked up again. + const NEED_WAKEUP = 1; + /// The Completion Queue as overflown and completions were dropped. + const CQ_OVERFLOW = 2; + } + + #[derive(Default)] + #[repr(C)] + pub struct IORING_CQ: u32 { + const EVENTFD_DISABLED = 1; + } + + #[derive(Default)] + #[repr(C)] + pub struct IORING_ENTER: u32 { + /// If this flag is set, then the system call will wait for the specified number of + /// events in `min_complete` before returning. This flag can be set along with `to_submit` + /// to both submit and complete events in a single system call. + const GETEVENTS = 1; + /// If the io_uring was created with [`IORING_SETUP::SQPOLL`] then this flag asks the kernel + /// to wake up the kernel SQ thread. + const SQ_WAKEUP = 2; + /// When the io_uring was created with [`IORING_SETUP::SQPOLL`] it's impossible to know + /// for an application when the kernel as consumed an SQ event. If this flag is set + /// io_uring_enter will block until at least one SQE was consumed and can be re-used. + const SQ_WAIT = 4; + /// Setting this flags allows passing extra arguments to recent enough kernel versions + /// (>= 5.11). + /// This allows passing arguments other than a [`libc::sigset_t`] to `io_uring_enter` + const EXT_ARG = 8; + } + + #[derive(Default)] + #[repr(C)] + pub struct IOSQE: u8 { + /// If set a passed `fd` is not a fd but instead an index into the array of file + /// descriptors registered using [`io_uring_register`](crate::syscall::io_uring_register). + const FIXED_FILE = 1 << 0; + + /// When this flag is specified, the SQE will not be started before previously submitted + /// `SQEs` have completed, and new `SQEs` will not be started before this one completes. + /// Available since 5.2. + const IO_DRAIN = 1 << 1; + + + /// When this flag is specified, it forms a link with the next [`SQE`] in the submission + /// ring. That next `SQE` will not be started before this one completes. This, in effect, + /// forms a chain of `SQEs`, which can be arbitrarily long. The tail of the chain is + /// denoted by the first `SQE` that does not have this flag set. This flag has no effect on + /// previous `SQE` submissions, nor does it impact `SQEs` that are outside of the chain + /// tail. This means that multiple chains can be executing in parallel, or chains and + /// individual `SQEs`. Only members inside the chain are serialized. A chain of `SQEs` will + /// be broken, if any request in that chain ends in error. `io_uring` considers any + /// unexpected result an error. This means that, eg., a short read will also terminate the + /// remainder of the chain. If a chain of `SQE` links is broken, the remaining unstarted + /// part of the chain will be terminated and completed with `-ECANCELED` as the error code. + /// Available since 5.3. + const IO_LINK = 1 << 2; + + /// Like [`IOSQE::IO_LINK`], but it doesn't sever regardless of the completion result. + /// Note that the link will still sever if we fail submitting the parent request, hard + /// links are only resilient in the presence of completion results for requests that did + /// submit correctly. `IOSQE::IO_HARDLINK` implies `IO_LINK`. Available since 5.5. + const IO_HARDLINK = 1 << 3; + + /// Normal operation for io_uring is to try and issue an sqe as non-blocking first, and if + /// that fails, execute it in an async manner. To support more efficient overlapped + /// operation of requests that the application knows/assumes will always (or most of the + /// time) block, the application can ask for an sqe to be issued async from the start. + /// Available since 5.6. + const ASYNC = 1 << 4; + + /// Used in conjunction with the [`IORING_OP::PROVIDE_BUFFERS`] command, which registers a + /// pool of buffers to be used by commands that read or receive data. When buffers are + /// registered for this use case, and this flag is set in the command, io_uring will grab a + /// buffer from this pool when the request is ready to receive or read data. If successful, + /// the resulting `CQE` will have [`IOCQE::F_BUFFER`] set in the flags part of the struct, + /// and the upper `IORING_CQE_BUFFER_SHIFT` bits will contain the ID of the selected + /// buffers. This allows the application to know exactly which buffer was selected for the + /// op‐ eration. If no buffers are available and this flag is set, then the request will + /// fail with `-ENOBUFS` as the error code. Once a buffer has been used, it is no longer + /// available in the kernel pool. The application must re-register the given buffer again + /// when it is ready to recycle it (eg has completed using it). Available since 5.7. + const BUFFER_SELECT = 1 << 5; + } + + #[derive(Default)] + #[repr(C)] + pub struct FSYNC_FLAGS: u32 { + const DATASYNC = 1; + } + + #[derive(Default)] + #[repr(C)] + pub struct TIMEOUT_FLAGS: u32 { + const ABS = 0; + const UPDATE = 1; + const BOOTTIME = 1 << 2; + const REALTIME = 1 << 3; + const LINK_UPDATE = 1 << 4; + const CLOCK_MASK = (Self::BOOTTIME.bits | Self::REALTIME.bits); + const UPDATE_MASK = (Self::UPDATE.bits | Self::LINK_UPDATE.bits); + } +} +static_assertions::assert_eq_size!(u32, IORING_FEAT); +static_assertions::assert_eq_size!(u32, IORING_SETUP); +static_assertions::assert_eq_size!(u32, IORING_SQ); +static_assertions::assert_eq_size!(u32, IORING_CQ); +static_assertions::assert_eq_size!(u32, IORING_ENTER); +static_assertions::assert_eq_size!(u8, IOSQE); + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[repr(u8)] +#[non_exhaustive] +pub enum IORING_OP { + NOP = 0, + READV = 1, + WRITEV = 2, + FSYNC = 3, + READ_FIXED = 4, + WRITE_FIXED = 5, + POLL_ADD = 6, + POLL_REMOVE = 7, + SYNC_FILE_RANGE = 8, + SENDMSG = 9, + RECVMSG = 10, + TIMEOUT = 11, + TIMEOUT_REMOVE = 12, + ACCEPT = 13, + ASYNC_CANCEL = 14, + LINK_TIMEOUT = 15, + CONNECT = 16, + FALLOCATE = 17, + OPENAT = 18, + CLOSE = 19, + FILES_UPDATE = 20, + STATX = 21, + READ = 22, + WRITE = 23, + FADVISE = 24, + MADVISE = 25, + SEND = 26, + RECV = 27, + OPENAT2 = 28, + EPOLL_CTL = 29, + SPLICE = 30, + PROVIDE_BUFFERS = 31, + REMOVE_BUFFERS = 32, + TEE = 33, + SHUTDOWN = 34, + RENAMEAT = 35, + UNLINKAT = 36, + MKDIRAT = 37, + SYMLINKAT = 38, + LINKAT = 39, + + LAST = 40, +} +static_assertions::assert_eq_size!(u8, IORING_OP); + +impl Default for IORING_OP { + fn default() -> Self { + IORING_OP::NOP + } +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[repr(u32)] +#[non_exhaustive] +pub enum IORING_REGISTER_OP { + REGISTER_BUFFERS = 0, + UNREGISTER_BUFFERS = 1, + REGISTER_FILES = 2, + UNREGISTER_FILES = 3, + REGISTER_EVENTFD = 4, + UNREGISTER_EVENTFD = 5, + REGISTER_FILES_UPDATE = 6, + REGISTER_EVENTFD_ASYNC = 7, + REGISTER_PROBE = 8, + REGISTER_PERSONALITY = 9, + UNREGISTER_PERSONALITY = 10, + REGISTER_RESTRICTIONS = 11, + REGISTER_ENABLE_RINGS = 12, + REGISTER_LAST = 13, +} +static_assertions::assert_eq_size!(u32, IORING_REGISTER_OP); + +pub const IORING_OFF_SQ_RING: u32 = 0; +pub const IORING_OFF_CQ_RING: u32 = 134217728; +pub const IORING_OFF_SQES: u32 = 268435456; + +mod tests { + use super::*; + + #[test] + fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 2usize, + concat!( + "Size of: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + ::std::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())) + .buf_index as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(buf_index) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())) + .buf_group as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(buf_group) + ) + ); + } + + #[test] + fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4__bindgen_ty_1() { + assert_eq!( + ::std::mem::size_of::(), + 8usize, + concat!( + "Size of: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) + ) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).personality + as *const _ as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), + "::", + stringify!(personality) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).splice_fd_in + as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4__bindgen_ty_1), + "::", + stringify!(splice_fd_in) + ) + ); + } + + #[test] + fn bindgen_test_layout_io_uring_sqe__bindgen_ty_4() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_4)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_4)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).__pad2 as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_4), + "::", + stringify!(__pad2) + ) + ); + } + + #[test] + fn bindgen_test_layout_io_uring_sqe() { + assert_eq!( + ::std::mem::size_of::(), + 64usize, + concat!("Size of: ", stringify!(io_uring_sqe)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_sqe)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(opcode) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ioprio as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(ioprio) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).fd as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(fd) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).len as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(len) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).user_data as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe), + "::", + stringify!(user_data) + ) + ); + } + + #[test] + fn bindgen_test_layout_io_uring_sqe__bindgen_ty_3() { + assert_eq!( + ::std::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(io_uring_sqe__bindgen_ty_3)) + ); + assert_eq!( + ::std::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(io_uring_sqe__bindgen_ty_3)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).rw_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(rw_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).fsync_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(fsync_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).poll_events as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(poll_events) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).poll32_events as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(poll32_events) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).sync_range_flags as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(sync_range_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).msg_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(msg_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).timeout_flags as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(timeout_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).accept_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(accept_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).cancel_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(cancel_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).open_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(open_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).statx_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(statx_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).fadvise_advice as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(fadvise_advice) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).splice_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(splice_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).rename_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(rename_flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).unlink_flags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_sqe__bindgen_ty_3), + "::", + stringify!(unlink_flags) + ) + ); + } + + #[test] + fn bindgen_test_layout_io_sqring_offsets() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(io_sqring_offsets)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_sqring_offsets)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(head) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(tail) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(ring_mask) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(ring_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).dropped as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(dropped) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).array as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(array) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(resv1) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(io_sqring_offsets), + "::", + stringify!(resv2) + ) + ); + } + + #[test] + fn bindgen_test_layout_io_cqring_offsets() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(io_cqring_offsets)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_cqring_offsets)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).head as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(head) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).tail as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(tail) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_mask as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(ring_mask) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).ring_entries as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(ring_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).overflow as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(overflow) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).cqes as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(cqes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv1 as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(resv1) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).resv2 as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(io_cqring_offsets), + "::", + stringify!(resv2) + ) + ); + } + #[test] + fn bindgen_test_layout_io_uring_params() { + assert_eq!( + ::std::mem::size_of::(), + 120usize, + concat!("Size of: ", stringify!(io_uring_params)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(io_uring_params)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_entries as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).cq_entries as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(cq_entries) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_thread_cpu as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_thread_cpu) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_thread_idle as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_thread_idle) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).features as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(features) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).wq_fd as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(wq_fd) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::()))._resv as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(resv) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).sq_off as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(sq_off) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).cq_off as *const _ as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(io_uring_params), + "::", + stringify!(cq_off) + ) + ); + } + +} diff --git a/runtime/asyncio/src/io_uring.rs b/runtime/asyncio/src/io_uring.rs new file mode 100644 index 0000000..051224b --- /dev/null +++ b/runtime/asyncio/src/io_uring.rs @@ -0,0 +1,127 @@ +use std::fmt::{Debug, Formatter}; +use std::io; +use std::marker::PhantomData; +use std::mem::{size_of, align_of}; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::os::unix::prelude::RawFd; +use std::pin::Pin; +use std::ptr::NonNull; +use std::task::{Context, Poll, Waker}; +use crossbeam_queue::SegQueue; +use nix::sys::{mman, mman::{MapFlags, ProtFlags}}; +use crate::completion::Completion; +use crate::cq::CQ; +use crate::cqe::{CQE, CQEs}; +use crate::ctypes::{CQOffsets, IORING_ENTER, SQOffsets}; +use crate::sq::SQ; +use crate::sqe::SQEs; +use super::ctypes::{Params, io_uring_sqe, IORING_CQ, IORING_FEAT, + IORING_OFF_CQ_RING, IORING_OFF_SQ_RING, IORING_OFF_SQES, IORING_SQ}; +use super::syscall; + +#[derive(Debug)] +pub struct IoUring { + fd: RawFd, + params: Params, + sq: SQ, + cq: CQ, + + waiting: SegQueue<(u32, Waker)>, +} + +unsafe fn mmap(map_size: usize, fd: RawFd, offset: i64) -> nix::Result<*mut libc::c_void> { + mman::mmap( + std::ptr::null_mut(), + map_size, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_SHARED | MapFlags::MAP_POPULATE, + fd, + offset + ) +} + +impl IoUring { + pub fn setup(entries: u32) -> io::Result { + let mut params = Params::default(); + let fd = syscall::setup(entries, &mut params)?; + + let mut sq_map_size = (params.sq_off.array as usize) + + (params.sq_entries as usize) * size_of::(); + let mut cq_map_size = (params.cq_off.cqes as usize) + + (params.cq_entries as usize) * size_of::(); + println!("{:?} {}", params.sq_off, sq_map_size); + + // If we can use a single mmap() syscall to map sq, cq and cqe the size of the total map + // is the largest of `sq_map_size` and `cq_map_size`. + if params.features.contains(IORING_FEAT::SINGLE_MMAP) { + sq_map_size = sq_map_size.max(cq_map_size); + cq_map_size = sq_map_size; + } + + println!("{:?}", params.cq_off); + let sq_ptr = unsafe { + mmap(sq_map_size as usize, fd, IORING_OFF_SQ_RING as i64)? + }; + + let sqes_map_size = (params.sq_entries as usize) * size_of::(); + let sqes = unsafe { + let ptr = mmap(sqes_map_size, fd, IORING_OFF_SQES as i64)?.cast(); + std::slice::from_raw_parts_mut(ptr, params.sq_entries as usize) + }; + + let sq = unsafe { + SQ::new(sq_ptr, + params.sq_off, + sqes, + sq_map_size, + sqes_map_size + ) + }; + + let cq_ptr = if params.features.contains(IORING_FEAT::SINGLE_MMAP) { + sq_ptr + } else { + unsafe { + mmap(cq_map_size, fd, IORING_OFF_CQ_RING as i64)? + } + }; + let cq = unsafe { + CQ::new(cq_ptr, + params.cq_off, + params.cq_entries, + sq_ptr != cq_ptr, + cq_map_size, + ) + }; + + Ok(IoUring { + fd, + params, + sq, + cq, + waiting: SegQueue::new(), + }) + } + + pub fn try_prepare<'cx>( + &self, + count: u32, + prepare: impl FnOnce(SQEs<'_>) + ) -> Option<()> { + // TODO: Lock the required amount of slots on both submission and completion queue, then + // construct the sqes. + if let Some(sqes) = self.sq.try_reserve(count) { + Some(prepare(sqes)) + } else { + None + } + } + + pub fn submit(&self) -> io::Result { + self.sq.submit(self.fd) + } + + pub fn cqes(&self) -> CQEs { + CQEs::new(&self.cq) + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/lib.rs b/runtime/asyncio/src/lib.rs new file mode 100644 index 0000000..04624dc --- /dev/null +++ b/runtime/asyncio/src/lib.rs @@ -0,0 +1,24 @@ + +// Raw typedefs and structs for kernel communication via syscalls +pub mod ctypes; +mod syscall; +pub mod io_uring; + +mod sq; +mod sqe; +mod cq; +mod cqe; + +mod completion; +mod cancellation; + + +#[macro_export] +macro_rules! ready { + ($e:expr $(,)?) => { + match $e { + std::task::Poll::Ready(t) => t, + std::task::Poll::Pending => return std::task::Poll::Pending, + } + }; +} \ No newline at end of file diff --git a/runtime/asyncio/src/rtypes.rs b/runtime/asyncio/src/rtypes.rs new file mode 100644 index 0000000..a67ee6b --- /dev/null +++ b/runtime/asyncio/src/rtypes.rs @@ -0,0 +1,5 @@ + + +pub struct CQE { + +} \ No newline at end of file diff --git a/runtime/asyncio/src/sq.rs b/runtime/asyncio/src/sq.rs new file mode 100644 index 0000000..82969d2 --- /dev/null +++ b/runtime/asyncio/src/sq.rs @@ -0,0 +1,426 @@ +use std::cell::UnsafeCell; +use std::fmt::{Debug, Formatter}; +use std::io; +use std::mem::ManuallyDrop; +use std::os::unix::prelude::RawFd; +use std::ptr::NonNull; +use std::sync::atomic::{AtomicU32, compiler_fence, fence, Ordering}; +use nix::sys::mman::munmap; +use crate::ctypes::{IORING_ENTER, IORING_SQ, io_uring_sqe, SQOffsets}; +use crate::sqe::SQEs; +use crate::syscall; + +pub struct SQ { + /// Head of the submission queue. This value is set by the kernel when it consumes SQE. + /// Thus we need to use atomic operations when passing information, making sure both the kernel + /// and program have a consistent view of its contents. + array_head: &'static AtomicU32, + + /// The head of the sqes buffer. This value is our local cache of `array_head` that's not + /// shared with or modified by the kernel. We use it to index the start of the prepared SQE. + /// This means that this value lags behind after `array_head`. + sqes_head: UnsafeCell, + + /// Tail of the submission queue. While this will be modified by the userspace program only, + /// the kernel uses atomic operations to read it so we want to use atomic operations to write + /// it. + array_tail: &'static AtomicU32, + // non-atomic cache of array_tail + cached_tail: UnsafeCell, + /// Tail of the sqes buffer. This value serves as our local cache of `array_tail` and, in + /// combination with `sqes_head` allows us to more efficiently submit SQE by skipping already + /// submitted ones. + /// `sqes_tail` marks the end of the prepared SQE. + sqes_tail: UnsafeCell, + + ring_mask: u32, + num_entries: u32, + + flags: &'static AtomicU32, + + dropped: &'static AtomicU32, + + array: &'static [AtomicU32], + sqes: &'static mut [UnsafeCell], + + sq_ptr: NonNull<()>, + sq_map_size: usize, + sqes_map_size: usize, +} + +static_assertions::assert_not_impl_any!(SQ: Send, Sync); + +impl Drop for SQ { + fn drop(&mut self) { + unsafe { + munmap(self.sq_ptr.as_ptr().cast(), self.sq_map_size); + let sqes_ptr: *mut libc::c_void = self.sqes.as_mut_ptr().cast(); + munmap(sqes_ptr, self.sqes_map_size); + } + } +} + +impl Debug for SQ { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + unsafe { + // TODO: Complete + f.debug_struct("SQ") + .field("head", self.array_head) + .field("tail", self.array_tail) + .field("ring_mask", &self.ring_mask) + .field("num_entries", &self.num_entries) + .field("flags", self.flags) + .field("dropped", self.dropped) + .field("array", &self.array) + .finish() + } + } +} + +impl SQ { + pub unsafe fn new(ptr: *mut libc::c_void, + offs: SQOffsets, + sqes: &'static mut [UnsafeCell], + sq_map_size: usize, + sqes_map_size: usize, + ) -> Self { + // Sanity check the pointer and offsets. If these fail we were probably passed an + // offsets from an uninitialized parameter struct. + assert!(!ptr.is_null()); + assert_ne!(offs.head, offs.tail); + + // Eagerly extract static values. Since they won't ever change again there's no reason to + // not read them now. + let ring_mask = *(ptr.offset(offs.ring_mask as isize).cast()); + let num_entries = *(ptr.offset(offs.ring_entries as isize).cast()); + + // These are valid Rust references; they are valid for the entire lifetime of self, + // properly initialized by the kernel and well aligned. + let array_head: &AtomicU32 = &*(ptr.offset(offs.head as isize).cast()); + let sqes_head = UnsafeCell::new(array_head.load(Ordering::Acquire)); + let array_tail: &AtomicU32 = &*ptr.offset(offs.tail as isize).cast(); + let sqes_tail = UnsafeCell::new(array_tail.load(Ordering::Acquire)); + let cached_tail = UnsafeCell::new(array_tail.load(Ordering::Acquire)); + let flags = &*ptr.offset(offs.flags as isize).cast(); + let dropped = &*ptr.offset(offs.dropped as isize).cast(); + + let array = std::slice::from_raw_parts( + ptr.offset(offs.array as isize).cast(), + sqes.len() as usize, + ); + let sq_ptr = NonNull::new_unchecked(ptr).cast(); + + Self { + array_head, + sqes_head, + array_tail, + sqes_tail, + cached_tail, + ring_mask, + num_entries, + flags, + dropped, + array, + sqes, + sq_ptr, + sq_map_size, + sqes_map_size, + } + } + + #[inline(always)] + fn sqes_head(&self) -> &mut u32 { + unsafe { &mut *self.sqes_head.get() } + } + + #[inline(always)] + fn sqes_tail(&self) -> &mut u32 { + unsafe { &mut *self.sqes_tail.get() } + } + + #[inline(always)] + fn cached_tail(&self) -> &mut u32 { + unsafe { &mut *self.cached_tail.get() } + } + + #[inline(always)] + fn increment_tail(&self, count: u32) -> u32 { + let tail = self.sqes_tail(); + let old = *tail; + *tail = (*tail).wrapping_add(count); + old + } + + #[inline(always)] + fn increment_head(&self, count: u32) -> u32{ + let head = self.sqes_head(); + let old = *head; + *head = (*head).wrapping_add(count); + old + } + + #[inline(always)] + fn used(&self) -> u32 { + (*self.sqes_tail()).wrapping_sub(*self.sqes_head()) + } + + #[inline(always)] + fn available(&self) -> u32 { + self.num_entries - self.used() + } + + /// Submit all prepared entries to the kernel. This function will return the number of + /// entries successfully submitted to the kernel. + pub fn submit(&self, fd: RawFd) -> io::Result { + let retval = syscall::enter( + fd, + self.num_entries, + 0, + IORING_ENTER::GETEVENTS, + std::ptr::null(), + 0, + )? as u32; + // Return SQE into circulation that we successfully submitted to the kernel. + self.increment_head(retval); + Ok(retval) + } + + + /// Prepare actions for submission by shuffling them into the correct order. + /// + /// Kernelside `array` is used to index into the sqes, more specifically the code behaves + /// like this: + /// ```C + /// u32 mask = ctx->sq_entries - 1; + /// u32 sq_idx = ctx->cached_sq_head++ & mask; + /// u32 head = READ_ONCE(ctx->sq_array[sq_idx]); + /// if (likely(head < ctx->sq_entries)) + /// return &ctx->sq_sqes[head]; + /// ``` + /// Where `ctx->sq_entries` is the number of slots in the ring (i.e. simply a boundary check). + /// + /// So we need to make sure that for every new entry since we last submitted we have the + /// correct index set. In our case shuffle will map the next `count` entries in `self.array` to + /// point to `count` entries in `self.sqes` starting at `start`. This allows actions to be + /// submitted to the kernel even when there are still reserved SQE in between that weren't yet + /// filled. + fn prepare(&self, start: u32, count: u32) { + // Load the tail of the array (i.e. where we will start filling) + let tail = self.cached_tail(); + let mut head = start; + + for _ in 0..count { + let index = (*tail & self.ring_mask) as usize; + + // We can allow this store to be an Relaxed operation since updating the shared tail + // is done after a memory barrier. + self.array[index].store(head & self.ring_mask, Ordering::Relaxed); + + // Same here. We need to take the overflow into account but don't have to explicitly + // handle it. + head = head.wrapping_add(1); + *tail = (*tail).wrapping_add(1); + } + // Ensure that the writes into the array are not moved after the write of the tail. + // Otherwise kernelside may read completely wrong indices from array. + compiler_fence(Ordering::Release); + self.array_tail.store(*tail, Ordering::Release); + } + + pub fn try_reserve(&self, count: u32) -> Option> { + if self.available() >= count { + let start = self.increment_tail(count); + Some(SQEs::new(self.sqes, start, count)) + } else { + None + } + } +} + +mod tests { + use std::mem::ManuallyDrop; + use std::sync::atomic::Ordering::Relaxed; + use crate::ctypes::{IORING_OP, IOSQE}; + use super::*; + + fn gen_sq(num_entries: u32, head: u32, tail: u32) -> ManuallyDrop { + assert!((0 < num_entries && num_entries <= 4096), "entries must be between 1 and 4096"); + assert_eq!(num_entries.count_ones(), 1, "entries must be a power of two"); + + let array_head = Box::leak(Box::new(AtomicU32::new(head))); + let array_tail = Box::leak(Box::new(AtomicU32::new(tail))); + let flags = Box::leak(Box::new(AtomicU32::new(0))); + let dropped = Box::leak(Box::new(AtomicU32::new(0))); + let array = Box::leak((0..num_entries) + .map(|n| AtomicU32::new(n)) + .collect::>()); + let sqes = Box::leak((0..num_entries) + .map(|_| UnsafeCell::new(io_uring_sqe::default())) + .collect::>()); + + unsafe { + ManuallyDrop::new(SQ { + array_head, + sqes_head: UnsafeCell::new(head), + array_tail, + sqes_tail: UnsafeCell::new(tail), + cached_tail: UnsafeCell::new(0), + ring_mask: num_entries - 1, + num_entries, + flags, + dropped, + array, + sqes, + sq_ptr: NonNull::dangling(), + sq_map_size: 0, + sqes_map_size: 0 + }) + } + } + + #[test] + fn test_head_tail() { + let mut sq = gen_sq(64, 30, 30); + assert_eq!(*sq.sqes_head(), 30); + assert_eq!(*sq.sqes_tail(), 30); + assert_eq!(sq.used(), 0); + assert_eq!(sq.available(), 64); + + sq.increment_tail(4); + assert_eq!(*sq.sqes_head(), 30); + assert_eq!(*sq.sqes_tail(), 34); + assert_eq!(sq.used(), 4); + assert_eq!(sq.available(), 60); + + sq.increment_head(2); + assert_eq!(*sq.sqes_head(), 32); + assert_eq!(*sq.sqes_tail(), 34); + assert_eq!(sq.used(), 2); + assert_eq!(sq.available(), 62); + } + + #[test] + fn test_sq_getter_setter() { + let mut sq = gen_sq(64, 30, 30); + assert_eq!(*sq.sqes_head(), 30); + assert_eq!(*sq.sqes_tail(), 30); + assert_eq!(sq.used(), 0); + assert_eq!(sq.available(), 64); + + { + let mut sqes = sq.try_reserve(2).unwrap(); + assert_eq!(sq.used(), 2); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::READV); + sqe.add_flags(IOSQE::IO_HARDLINK); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::WRITEV); + sqe.set_userdata(823); + } + assert_eq!(sq.used(), 2); + + { + let sqes = &mut sq.sqes; + assert_eq!(sqes[30].get_mut().opcode, IORING_OP::READV); + assert_eq!(sqes[30].get_mut().flags, IOSQE::IO_HARDLINK); + assert_eq!(sqes[31].get_mut().opcode, IORING_OP::WRITEV); + assert_eq!(sqes[31].get_mut().user_data, 823); + } + + + } + + #[test] + fn test_sq_full() { + let mut sq = gen_sq(64, 1, 65); + let sqe = sq.try_reserve(1); + assert!(sqe.is_none()); + } + + #[test] + fn test_out_of_order_submit() { + let mut sq = gen_sq(64, 0, 0); + + let start; + { + let mut sqes = sq.try_reserve(4).unwrap(); + start = sqes.start(); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::READV); + sqe.add_flags(IOSQE::IO_HARDLINK); + sqe.set_address(1); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::READV); + sqe.add_flags(IOSQE::IO_HARDLINK); + sqe.set_address(2); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::READV); + sqe.add_flags(IOSQE::IO_HARDLINK); + sqe.set_address(3); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::READV); + sqe.set_address(4); + sqe.set_userdata(823); + } + assert_eq!(sq.used(), 4); + + let start2; + { + let mut sqes = sq.try_reserve(4).unwrap(); + start2 = sqes.start(); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::WRITEV); + sqe.add_flags(IOSQE::IO_LINK); + sqe.set_address(1); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::WRITEV); + sqe.add_flags(IOSQE::IO_LINK); + sqe.set_address(2); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::WRITEV); + sqe.add_flags(IOSQE::IO_LINK); + sqe.set_address(3); + let mut sqe = sqes.next().unwrap(); + sqe.set_opcode(IORING_OP::WRITEV); + sqe.set_address(4); + sqe.set_userdata(0xDEADBEEF); + } + assert_eq!(sq.used(), 8); + + sq.prepare(start2, 4); + sq.prepare(start, 4); + + let sqes: Vec<_> = sq.sqes.iter_mut() + .map(|c| c.get_mut().clone()) + .collect(); + let mut out: Vec<_> = sq.array.iter().map(|n| { + let i = n.load(Relaxed) as usize; + sqes[i] + }).collect(); + + for (n, s) in out.iter().take(4).enumerate() { + assert_eq!(s.opcode, IORING_OP::WRITEV); + assert_eq!(s.address, n as u64 + 1); + if n == 3 { + assert_eq!(s.user_data, 0xDEADBEEF); + } else { + assert_eq!(s.flags, IOSQE::IO_LINK); + } + } + + for (n, s) in out.iter().skip(4).take(4).enumerate() { + assert_eq!(s.opcode, IORING_OP::READV); + assert_eq!(s.address, n as u64 + 1); + if n == 3 { + assert_eq!(s.user_data, 823); + } else { + assert_eq!(s.flags, IOSQE::IO_HARDLINK); + } + } + + let mut i = out.iter().skip(8); + while let Some(sqe) = i.next() { + assert_eq!(*sqe, io_uring_sqe::default()); + } + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sqe.rs b/runtime/asyncio/src/sqe.rs new file mode 100644 index 0000000..42518f0 --- /dev/null +++ b/runtime/asyncio/src/sqe.rs @@ -0,0 +1,337 @@ +use std::cell::UnsafeCell; +use std::ops::{Deref, DerefMut}; +use std::slice::IterMut; +use crate::ctypes::{IORING_OP, IOSQE, io_uring_sqe}; + +#[derive(Debug)] +pub struct SQE<'iou> { + sqe: &'iou mut io_uring_sqe, +} + +impl<'iou> SQE<'iou> { + pub fn new(sqe: &'iou mut io_uring_sqe) -> Self { + Self { sqe } + } + + #[inline(always)] + pub fn add_flags(&mut self, flags: IOSQE) { + self.sqe.flags |= flags; + } + + #[inline(always)] + pub fn set_opcode(&mut self, opcode: IORING_OP) { + self.sqe.opcode = opcode; + } + + #[inline(always)] + pub fn set_userdata(&mut self, user_data: u64) { + self.sqe.user_data = user_data; + } + + #[inline(always)] + pub fn set_address(&mut self, address: u64) { + self.sqe.address = address; + } + + #[inline(always)] + pub fn set_len(&mut self, len: i32) { + self.sqe.len = len; + } +} + +pub struct SQEs<'iou> { + slice: &'iou [UnsafeCell], + mask: u32, + start: u32, + count: u32, + capacity: u32, +} + +impl<'iou> SQEs<'iou> { + pub(crate) fn new(slice: &'iou [UnsafeCell], start: u32, capacity: u32) + -> Self + { + let mask = (slice.len() - 1) as u32; + Self { slice, mask, count: 0, start, capacity } + } + + pub fn last(&mut self) -> Option> { + let mut last = None; + while let Some(sqe) = self.consume() { last = Some(sqe) } + last + } + + /// An iterator of [`HardLinkedSQE`]s. These will be [`SQE`]s that are hard linked together. + /// + /// Hard linked SQEs will occur sequentially. All of them will be completed, even if one of the + /// events resolves to an error. + pub fn hard_linked(&mut self) -> HardLinked<'iou, '_> { + HardLinked { sqes: self } + } + + /// An iterator of [`SoftLinkedSQE`]s. These will be [`SQE`]s that are soft linked together. + /// + /// Soft linked SQEs will occur sequentially. If one the events errors, all events after it + /// will be cancelled. + pub fn soft_linked(&mut self) -> SoftLinked<'iou, '_> { + SoftLinked { sqes: self } + } + + /// Remaining [`SQE`]s that can be modified. + pub fn remaining(&self) -> u32 { + self.capacity - self.count + } + + pub fn start(&self) -> u32 { + self.start + } + + pub fn capacity(&self) -> u32 { + self.capacity + } + + fn consume(&mut self) -> Option> { + if self.count >= self.capacity { + None + } else { + let index = (self.start + self.count) & self.mask; + self.count += 1; + + let sqe: &mut io_uring_sqe = unsafe { + &mut *self.slice.get_unchecked(index as usize).get() + }; + + // Ensure that all SQE passing through here are wiped into NOPs first. + *sqe = io_uring_sqe::default(); + sqe.opcode = IORING_OP::NOP; + + Some(SQE { sqe }) + } + } + + /// Exhaust this iterator, thus ensuring all entries are set to NOP + fn exhaust(&mut self) { + while let Some(_) = self.consume() {} + } +} + +impl<'iou> Iterator for SQEs<'iou> { + type Item = SQE<'iou>; + + fn next(&mut self) -> Option> { + self.consume() + } +} + +impl<'iou> Drop for SQEs<'iou> { + fn drop(&mut self) { + if self.count != 0 { + // This iterator is responsible for all of its SQE and must NOP every not used one. + self.exhaust() + } + } +} + +/// An Iterator of [`SQE`]s which will be hard linked together. +pub struct HardLinked<'iou, 'a> { + sqes: &'a mut SQEs<'iou>, +} + +impl<'iou> HardLinked<'iou, '_> { + pub fn terminate(self) -> Option> { + self.sqes.consume() + } +} + +impl<'iou> Iterator for HardLinked<'iou, '_> { + type Item = HardLinkedSQE<'iou>; + + fn next(&mut self) -> Option { + let is_final = self.sqes.remaining() == 1; + self.sqes.consume().map(|sqe| HardLinkedSQE { sqe, is_final }) + } +} + +pub struct HardLinkedSQE<'iou> { + sqe: SQE<'iou>, + is_final: bool, +} + +impl<'iou> Deref for HardLinkedSQE<'iou> { + type Target = SQE<'iou>; + + fn deref(&self) -> &SQE<'iou> { + &self.sqe + } +} + +impl<'iou> DerefMut for HardLinkedSQE<'iou> { + fn deref_mut(&mut self) -> &mut SQE<'iou> { + &mut self.sqe + } +} + +impl<'iou> Drop for HardLinkedSQE<'iou> { + fn drop(&mut self) { + if !self.is_final { + self.sqe.add_flags(IOSQE::IO_HARDLINK); + } + } +} + +/// An Iterator of [`SQE`]s which will be soft linked together. +pub struct SoftLinked<'iou, 'a> { + sqes: &'a mut SQEs<'iou>, +} + +impl<'iou> SoftLinked<'iou, '_> { + pub fn terminate(self) -> Option> { + self.sqes.consume() + } +} + +impl<'iou> Iterator for SoftLinked<'iou, '_> { + type Item = SoftLinkedSQE<'iou>; + + fn next(&mut self) -> Option { + let is_final = self.sqes.remaining() == 1; + self.sqes.consume().map(|sqe| SoftLinkedSQE { sqe, is_final }) + } +} + +pub struct SoftLinkedSQE<'iou> { + sqe: SQE<'iou>, + is_final: bool, +} + +impl<'iou> Deref for SoftLinkedSQE<'iou> { + type Target = SQE<'iou>; + + fn deref(&self) -> &SQE<'iou> { + &self.sqe + } +} + +impl<'iou> DerefMut for SoftLinkedSQE<'iou> { + fn deref_mut(&mut self) -> &mut SQE<'iou> { + &mut self.sqe + } +} + +impl<'iou> Drop for SoftLinkedSQE<'iou> { + fn drop(&mut self) { + if !self.is_final { + self.sqe.add_flags(IOSQE::IO_LINK); + } + } +} + +mod tests { + use super::*; + + fn gen_buf(num_entries: usize) -> &'static mut [UnsafeCell]{ + Box::leak((0..num_entries) + .map(|_| UnsafeCell::new(io_uring_sqe::default())) + .collect::>()) + } + + #[test] + fn test_wrapping_sqes() { + let mut sqe_buf = gen_buf(64); + + { + let mut sqes = SQEs::new(&mut sqe_buf[..], 62, 5); + assert_eq!(sqes.next().map(|i| i.sqe.user_data = 1), Some(())); + assert_eq!(sqes.next().map(|i| i.sqe.user_data = 2), Some(())); + assert_eq!(sqes.next().map(|i| i.sqe.user_data = 3), Some(())); + assert_eq!(sqes.next().map(|i| i.sqe.user_data = 4), Some(())); + assert_eq!(sqes.next().map(|i| i.sqe.user_data = 5), Some(())); + assert_eq!(sqes.next().map(|i| i.sqe.user_data = 6), None); + } + + assert_eq!(sqe_buf[61].get_mut().user_data, 0); + assert_eq!(sqe_buf[62].get_mut().user_data, 1); + assert_eq!(sqe_buf[63].get_mut().user_data, 2); + assert_eq!(sqe_buf[0].get_mut().user_data, 3); + assert_eq!(sqe_buf[1].get_mut().user_data, 4); + assert_eq!(sqe_buf[2].get_mut().user_data, 5); + assert_eq!(sqe_buf[3].get_mut().user_data, 0); + + } + + #[test] + fn test_hard_linked_sqes() { + let mut sqe_buf = gen_buf(64); + + { + let mut sqes = SQEs::new(&mut sqe_buf, 62, 5); + let mut linked = sqes.hard_linked(); + + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::READ), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::TEE), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::ACCEPT), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CLOSE), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CONNECT), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::FADVISE), None); + } + + assert_eq!(sqe_buf[61].get_mut().opcode, IORING_OP::NOP); + assert_eq!(sqe_buf[61].get_mut().flags, IOSQE::empty()); + + assert_eq!(sqe_buf[62].get_mut().opcode, IORING_OP::READ); + assert_eq!(sqe_buf[62].get_mut().flags, IOSQE::IO_HARDLINK); + + assert_eq!(sqe_buf[63].get_mut().opcode, IORING_OP::TEE); + assert_eq!(sqe_buf[63].get_mut().flags, IOSQE::IO_HARDLINK); + + assert_eq!(sqe_buf[0].get_mut().opcode, IORING_OP::ACCEPT); + assert_eq!(sqe_buf[0].get_mut().flags, IOSQE::IO_HARDLINK); + + assert_eq!(sqe_buf[1].get_mut().opcode, IORING_OP::CLOSE); + assert_eq!(sqe_buf[1].get_mut().flags, IOSQE::IO_HARDLINK); + + assert_eq!(sqe_buf[2].get_mut().opcode, IORING_OP::CONNECT); + assert_eq!(sqe_buf[2].get_mut().flags, IOSQE::empty()); + + assert_eq!(sqe_buf[3].get_mut().opcode, IORING_OP::NOP); + assert_eq!(sqe_buf[3].get_mut().flags, IOSQE::empty()); + } + + #[test] + fn test_soft_linked_sqes() { + let mut sqe_buf = gen_buf(64); + + { + let mut sqes = SQEs::new(&mut sqe_buf, 62, 5); + let mut linked = sqes.soft_linked(); + + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::READ), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::TEE), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::ACCEPT), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CLOSE), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::CONNECT), Some(())); + assert_eq!(linked.next().map(|i| i.sqe.sqe.opcode = IORING_OP::FADVISE), None); + } + + assert_eq!(sqe_buf[61].get_mut().opcode, IORING_OP::NOP); + assert_eq!(sqe_buf[61].get_mut().flags, IOSQE::empty()); + + assert_eq!(sqe_buf[62].get_mut().opcode, IORING_OP::READ); + assert_eq!(sqe_buf[62].get_mut().flags, IOSQE::IO_LINK); + + assert_eq!(sqe_buf[63].get_mut().opcode, IORING_OP::TEE); + assert_eq!(sqe_buf[63].get_mut().flags, IOSQE::IO_LINK); + + assert_eq!(sqe_buf[0].get_mut().opcode, IORING_OP::ACCEPT); + assert_eq!(sqe_buf[0].get_mut().flags, IOSQE::IO_LINK); + + assert_eq!(sqe_buf[1].get_mut().opcode, IORING_OP::CLOSE); + assert_eq!(sqe_buf[1].get_mut().flags, IOSQE::IO_LINK); + + assert_eq!(sqe_buf[2].get_mut().opcode, IORING_OP::CONNECT); + assert_eq!(sqe_buf[2].get_mut().flags, IOSQE::empty()); + + assert_eq!(sqe_buf[3].get_mut().opcode, IORING_OP::NOP); + assert_eq!(sqe_buf[3].get_mut().flags, IOSQE::empty()); + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/epoll/mod.rs b/runtime/asyncio/src/sys/linux/epoll/mod.rs new file mode 100644 index 0000000..e69de29 diff --git a/runtime/asyncio/src/sys/linux/io_uring/driver.rs b/runtime/asyncio/src/sys/linux/io_uring/driver.rs new file mode 100644 index 0000000..a6458f8 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/driver.rs @@ -0,0 +1,61 @@ +use std::marker::PhantomData; +use std::pin::Pin; +use std::task::{Context, Poll}; +use iou::{SQE, SQEs}; +use super::{Event, Submission}; + +pub struct Completion<'cx> { + inner: super::Completion, + marker: PhantomData &'cx ()>, +} + +impl<'cx> Completion<'cx> { + pub(crate) fn new(mut sqe: SQE<'_>, _sqes: SQEs<'_>, cx: &mut Context<'cx>) -> Self { + let inner = super::Completion::new(cx.waker().clone()); + + // Make the userdata for the (final) SQE a pointer to the waker for the task blocking on + // this IO. + unsafe { sqe.set_user_data(inner.addr()) }; + + Self { inner, marker: PhantomData } + } + + #[inline(always)] + pub(crate) fn into_inner(self) -> super::Completion { + self.inner + } +} + +pub trait Driver: Clone { + /// Poll to prepare a number of submissions for the submission queue. + /// + /// If the driver has space for `count` SQE available it calls `prepare` to have said `SQE` + /// inserted. A driver can assume that prepare will use exactly `count` slots. Using this + /// drivers can implement backpressure by returning `Poll::Pending` if less than `count` + /// slots are available and waking the respective task up if enough slots have become available. + fn poll_prepare<'cx>( + self: Pin<&mut Self>, + ctx: &mut Context<'cx>, + count: u32, + prepare: impl FnOnce(SQEs<'_>, &mut Context<'cx>) -> Completion<'cx>, + ) -> Poll>; + + /// Suggestion for the driver to submit their queue to the kernel. + /// + /// This will be called by tasks after they have finished preparing submissions. Drivers must + /// eventually submit these to the kernel but aren't required to do so right away. + fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<()>; + + /// Completion hint + /// + /// This should return `Poll::Ready` if an completion with the given user_data may have been + /// received since the last call to this function. It is safe to always return `Poll::Ready`, + /// even if no actions were completed. + fn poll_complete(self: Pin<&mut Self>, ctx: &mut Context<'_>, user_data: u64) -> Poll<()>; + + fn submit(self, event: E) -> Submission + where Self: Sized + { + Submission::new(self, event) + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/accept.rs b/runtime/asyncio/src/sys/linux/io_uring/events/accept.rs new file mode 100644 index 0000000..42b3502 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/accept.rs @@ -0,0 +1,27 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::sqe::{SockFlag, SockAddrStorage}; +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct Accept { + pub addr: Option>, + pub fd: FD, + pub flags: SockFlag, +} + +impl Event for Accept { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_accept(self.fd, self.addr.as_deref_mut(), self.flags); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).addr) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/close.rs b/runtime/asyncio/src/sys/linux/io_uring/events/close.rs new file mode 100644 index 0000000..b2cfdf8 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/close.rs @@ -0,0 +1,19 @@ +use std::os::unix::io::RawFd; + +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs}; + +pub struct Close { + pub fd: FD, +} + +impl Event for Close { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_close(self.fd); + sqe + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/connect.rs b/runtime/asyncio/src/sys/linux/io_uring/events/connect.rs new file mode 100644 index 0000000..ad928c7 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/connect.rs @@ -0,0 +1,26 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::sqe::SockAddr; +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct Connect { + pub fd: FD, + pub addr: Box, +} + +impl Event for Connect { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_connect(self.fd, &mut *self.addr); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).addr) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/epoll_ctl.rs b/runtime/asyncio/src/sys/linux/io_uring/events/epoll_ctl.rs new file mode 100644 index 0000000..a9f7269 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/epoll_ctl.rs @@ -0,0 +1,27 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::sqe::{EpollOp, EpollEvent}; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct EpollCtl { + pub epoll_fd: RawFd, + pub op: EpollOp, + pub fd: RawFd, + pub event: Option>, +} + +impl Event for EpollCtl { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_epoll_ctl(self.epoll_fd, self.op, self.fd, self.event.as_deref_mut()); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).event) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/fadvise.rs b/runtime/asyncio/src/sys/linux/io_uring/events/fadvise.rs new file mode 100644 index 0000000..9560b22 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/fadvise.rs @@ -0,0 +1,23 @@ +use std::os::unix::io::RawFd; + +use iou::sqe::PosixFadviseAdvice; +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs}; + +pub struct Fadvise { + pub fd: FD, + pub offset: u64, + pub size: u64, + pub flags: PosixFadviseAdvice, +} + +impl Event for Fadvise { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_fadvise(self.fd, self.offset, self.size, self.flags); + sqe + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/fallocate.rs b/runtime/asyncio/src/sys/linux/io_uring/events/fallocate.rs new file mode 100644 index 0000000..01d6cd1 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/fallocate.rs @@ -0,0 +1,23 @@ +use std::os::unix::io::RawFd; + +use iou::registrar::UringFd; +use iou::sqe::FallocateFlags; + +use super::{Event, SQE, SQEs}; + +pub struct Fallocate { + pub fd: FD, + pub offset: u64, + pub size: u64, + pub flags: FallocateFlags, +} + +impl Event for Fallocate { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_fallocate(self.fd, self.offset, self.size, self.flags); + sqe + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/files_update.rs b/runtime/asyncio/src/sys/linux/io_uring/events/files_update.rs new file mode 100644 index 0000000..d1f8337 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/files_update.rs @@ -0,0 +1,23 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct FilesUpdate { + pub files: Box<[RawFd]>, + pub offset: u32, +} + +impl Event for FilesUpdate { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_files_update(&self.files[..], self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).files) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/fsync.rs b/runtime/asyncio/src/sys/linux/io_uring/events/fsync.rs new file mode 100644 index 0000000..60c73ca --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/fsync.rs @@ -0,0 +1,21 @@ +use std::os::unix::io::RawFd; + +use iou::registrar::UringFd; +use iou::sqe::FsyncFlags; + +use super::{Event, SQE, SQEs}; + +pub struct Fsync { + pub fd: FD, + pub flags: FsyncFlags, +} + +impl Event for Fsync { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_fsync(self.fd, self.flags); + sqe + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/mod.rs b/runtime/asyncio/src/sys/linux/io_uring/events/mod.rs new file mode 100644 index 0000000..2b7369a --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/mod.rs @@ -0,0 +1,56 @@ + +mod accept; +mod close; +mod connect; +mod epoll_ctl; +mod fadvise; +mod fallocate; +mod files_update; +mod fsync; +mod openat; +mod provide_buffers; +mod read; +mod readv; +mod recv; +mod send; +mod splice; +mod statx; +mod timeout; +mod write; +mod writev; + +use std::mem::ManuallyDrop; +use iou::{SQE, SQEs}; +use super::Cancellation; + +pub use accept::Accept; +pub use close::Close; +pub use connect::Connect; +pub use epoll_ctl::EpollCtl; +pub use fadvise::Fadvise; +pub use fallocate::Fallocate; +pub use files_update::FilesUpdate; +pub use fsync::Fsync; +pub use openat::OpenAt; +pub use provide_buffers::ProvideBuffers; +pub use read::Read; +pub use readv::ReadVectored; +pub use recv::Recv; +pub use send::Send; +pub use splice::Splice; +pub use statx::Statx; +pub use timeout::Timeout; +pub use write::Write; +pub use writev::WriteVectored; + +pub trait Event { + fn sqes_needed() -> u32; + + unsafe fn prepare<'a>(&mut self, sqs: &mut SQEs<'a>) -> SQE<'a>; + + fn cancel(_: ManuallyDrop) -> Cancellation + where Self: Sized + { + Cancellation::from(()) + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/openat.rs b/runtime/asyncio/src/sys/linux/io_uring/events/openat.rs new file mode 100644 index 0000000..b814124 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/openat.rs @@ -0,0 +1,39 @@ +use std::ffi::CString; +use std::mem::ManuallyDrop; +use std::os::unix::ffi::OsStrExt; +use std::os::unix::prelude::RawFd; +use std::path::Path; +use iou::{SQE, SQEs}; +use iou::sqe::{Mode, OFlag}; +use crate::sys::linux::io_uring::cancellation::Cancellation; +use super::Event; + +pub struct OpenAt { + pub path: CString, + pub dir_fd: RawFd, + pub flags: OFlag, + pub mode: Mode, +} + +impl OpenAt { + pub fn without_dir(path: impl AsRef, flags: OFlag, mode: Mode) -> Self { + let path = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap(); + Self { path, dir_fd: libc::AT_FDCWD, flags, mode } + } +} + +impl Event for OpenAt { + fn sqes_needed() -> u32 { + 1 + } + + unsafe fn prepare<'a>(&mut self, sqs: &mut SQEs<'a>) -> SQE<'a> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_openat(self.dir_fd, &*self.path, self.flags, self.mode); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation where Self: Sized { + ManuallyDrop::into_inner(this).path.into() + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/provide_buffers.rs b/runtime/asyncio/src/sys/linux/io_uring/events/provide_buffers.rs new file mode 100644 index 0000000..569d559 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/provide_buffers.rs @@ -0,0 +1,40 @@ +use std::mem::ManuallyDrop; +use iou::sqe::BufferGroupId; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct ProvideBuffers { + pub bufs: Box<[u8]>, + pub count: u32, + pub group: BufferGroupId, + pub index: u32, +} + +impl Event for ProvideBuffers { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_provide_buffers(&mut self.bufs[..], self.count, self.group, self.index); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).bufs) + } +} + +pub struct RemoveBuffers { + pub count: u32, + pub group: BufferGroupId, +} + +impl Event for RemoveBuffers { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_remove_buffers(self.count, self.group); + sqe + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/read.rs b/runtime/asyncio/src/sys/linux/io_uring/events/read.rs new file mode 100644 index 0000000..cd3ac83 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/read.rs @@ -0,0 +1,47 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::registrar::{UringFd, RegisteredBuf}; + +use super::{Event, SQE, SQEs, Cancellation}; + +/// A basic read event. +pub struct Read { + pub fd: FD, + pub buf: Box<[u8]>, + pub offset: u64, +} + +impl Event for Read { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_read(self.fd, &mut self.buf[..], self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).buf) + } +} + +pub struct ReadFixed { + pub fd: FD, + pub buf: RegisteredBuf, + pub offset: u64, +} + +impl Event for ReadFixed { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_read(self.fd, self.buf.as_mut(), self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).buf) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/readv.rs b/runtime/asyncio/src/sys/linux/io_uring/events/readv.rs new file mode 100644 index 0000000..dfdf384 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/readv.rs @@ -0,0 +1,48 @@ +use std::io::IoSliceMut; +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +/// A `readv` event. +pub struct ReadVectored { + pub fd: FD, + pub bufs: Box<[Box<[u8]>]>, + pub offset: u64, +} + +impl ReadVectored { + fn as_iovecs(buffers: &mut [Box<[u8]>]) -> &mut [IoSliceMut] { + // Unsafe contract: + // This pointer cast is defined behaviour because Box<[u8]> (wide pointer) + // is currently ABI compatible with libc::iovec. + // + // Then, libc::iovec is guaranteed ABI compatible with IoSliceMut on Unix: + // https://doc.rust-lang.org/beta/std/io/struct.IoSliceMut.html + // + // We are relying on the internals of Box<[u8]>, but this is such a + // foundational part of Rust it's unlikely the data layout would change + // without warning. + // + // Pointer cast expression adapted from the "Turning a &mut T into an &mut U" + // example of: https://doc.rust-lang.org/std/mem/fn.transmute.html#alternatives + unsafe { &mut *(buffers as *mut [Box<[u8]>] as *mut [IoSliceMut]) } + } +} + + +impl Event for ReadVectored { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_read_vectored(self.fd, Self::as_iovecs(&mut self.bufs[..]), self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).bufs) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/recv.rs b/runtime/asyncio/src/sys/linux/io_uring/events/recv.rs new file mode 100644 index 0000000..86dceea --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/recv.rs @@ -0,0 +1,27 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::sqe::MsgFlags; +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct Recv { + pub fd: FD, + pub buf: Box<[u8]>, + pub flags: MsgFlags, +} + +impl Event for Recv { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_recv(self.fd, &mut self.buf[..], self.flags); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).buf) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/send.rs b/runtime/asyncio/src/sys/linux/io_uring/events/send.rs new file mode 100644 index 0000000..740dade --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/send.rs @@ -0,0 +1,27 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::sqe::MsgFlags; +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct Send { + pub fd: FD, + pub buf: Box<[u8]>, + pub flags: MsgFlags, +} + +impl Event for Send { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_send(self.fd, &self.buf[..], self.flags); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).buf) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/splice.rs b/runtime/asyncio/src/sys/linux/io_uring/events/splice.rs new file mode 100644 index 0000000..c574d34 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/splice.rs @@ -0,0 +1,24 @@ +use std::os::unix::io::RawFd; + +use iou::sqe::SpliceFlags; + +use super::{Event, SQE, SQEs}; + +pub struct Splice { + pub fd_in: RawFd, + pub off_in: i64, + pub fd_out: RawFd, + pub off_out: i64, + pub bytes: u32, + pub flags: SpliceFlags, +} + +impl Event for Splice { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_splice(self.fd_in, self.off_in, self.fd_out, self.off_out, self.bytes, self.flags); + sqe + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/statx.rs b/runtime/asyncio/src/sys/linux/io_uring/events/statx.rs new file mode 100644 index 0000000..1349ab7 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/statx.rs @@ -0,0 +1,53 @@ +use std::ffi::CString; +use std::mem::{self, ManuallyDrop}; +use std::os::unix::io::RawFd; +use std::os::unix::ffi::OsStrExt; +use std::path::Path; + +use iou::sqe::{StatxFlags, StatxMode}; +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +pub struct Statx { + pub dir_fd: FD, + pub path: CString, + pub flags: StatxFlags, + pub mask: StatxMode, + pub statx: Box, +} + +impl Statx { + pub fn without_dir(path: impl AsRef, flags: StatxFlags, mask: StatxMode) -> Statx { + let path = CString::new(path.as_ref().as_os_str().as_bytes()).unwrap(); + let statx = unsafe { Box::new(mem::zeroed()) }; + Statx { path, dir_fd: libc::AT_FDCWD, flags, mask, statx } + } +} + +impl Statx { + pub fn without_path(fd: FD, mut flags: StatxFlags, mask: StatxMode) -> Statx { + unsafe { + // TODO don't allocate? Use Cow? Use NULL? + let path = CString::new("").unwrap(); + let statx = Box::new(mem::zeroed()); + flags.insert(StatxFlags::AT_EMPTY_PATH); + Statx { dir_fd: fd, path, flags, mask, statx } + } + } +} + +impl Event for Statx { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_statx(self.dir_fd, self.path.as_c_str(), self.flags, self.mask, &mut *self.statx); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + let this = ManuallyDrop::into_inner(this); + Cancellation::from((this.statx, this.path)) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/timeout.rs b/runtime/asyncio/src/sys/linux/io_uring/events/timeout.rs new file mode 100644 index 0000000..b2d9f9a --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/timeout.rs @@ -0,0 +1,67 @@ +use std::mem::ManuallyDrop; +use std::time::Duration; + +use super::{Event, SQE, SQEs, Cancellation}; + +use iou::sqe::TimeoutFlags; + +pub struct StaticTimeout { + ts: uring_sys::__kernel_timespec, + events: u32, + flags: TimeoutFlags, +} + +impl StaticTimeout { + pub const fn new(duration: Duration, events: u32, flags: TimeoutFlags) -> StaticTimeout { + StaticTimeout { + ts: timespec(duration), + events, flags, + } + } +} + +impl Event for &'static StaticTimeout { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_timeout(&self.ts, self.events, self.flags); + sqe + } +} + +pub struct Timeout { + ts: Box, + events: u32, + flags: TimeoutFlags, +} + +impl Timeout { + pub fn new(duration: Duration, events: u32, flags: TimeoutFlags) -> Timeout { + Timeout { + ts: Box::new(timespec(duration)), + events, flags, + } + } +} + +impl Event for Timeout { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_timeout(&*self.ts, self.events, self.flags); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).ts) + } +} + +const fn timespec(duration: Duration) -> uring_sys::__kernel_timespec { + uring_sys::__kernel_timespec { + tv_sec: duration.as_secs() as i64, + tv_nsec: duration.subsec_nanos() as _, + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/write.rs b/runtime/asyncio/src/sys/linux/io_uring/events/write.rs new file mode 100644 index 0000000..bf0d308 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/write.rs @@ -0,0 +1,47 @@ +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::registrar::{UringFd, RegisteredBuf}; + +use super::{Event, SQE, SQEs, Cancellation}; + +/// A basic write event. +pub struct Write { + pub fd: FD, + pub buf: Box<[u8]>, + pub offset: u64, +} + +impl Event for Write { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_write(self.fd, &self.buf[..], self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).buf) + } +} + +pub struct WriteFixed { + pub fd: FD, + pub buf: RegisteredBuf, + pub offset: u64, +} + +impl Event for WriteFixed { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_write(self.fd, self.buf.as_ref(), self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).buf) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/events/writev.rs b/runtime/asyncio/src/sys/linux/io_uring/events/writev.rs new file mode 100644 index 0000000..447ada2 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/events/writev.rs @@ -0,0 +1,34 @@ +use std::io::IoSlice; +use std::mem::ManuallyDrop; +use std::os::unix::io::RawFd; + +use iou::registrar::UringFd; + +use super::{Event, SQE, SQEs, Cancellation}; + +/// A `writev` event. +pub struct WriteVectored { + pub fd: FD, + pub bufs: Box<[Box<[u8]>]>, + pub offset: u64, +} + +impl WriteVectored { + fn iovecs(&self) -> &[IoSlice] { + unsafe { & *(&self.bufs[..] as *const [Box<[u8]>] as *const [IoSlice]) } + } +} + +impl Event for WriteVectored { + fn sqes_needed() -> u32 { 1 } + + unsafe fn prepare<'sq>(&mut self, sqs: &mut SQEs<'sq>) -> SQE<'sq> { + let mut sqe = sqs.single().unwrap(); + sqe.prep_write_vectored(self.fd, self.iovecs(), self.offset); + sqe + } + + fn cancel(this: ManuallyDrop) -> Cancellation { + Cancellation::from(ManuallyDrop::into_inner(this).bufs) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/fs.rs b/runtime/asyncio/src/sys/linux/io_uring/fs.rs new file mode 100644 index 0000000..104a63b --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/fs.rs @@ -0,0 +1,187 @@ +// Imported here for modules +use std::future::Future; +use std::{fs, io}; +use std::mem::ManuallyDrop; +use std::os::unix::prelude::{FromRawFd, RawFd}; +use std::path::Path; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use super::{Driver, Ring, Submission, events::*}; + +use futures_core::ready; +use futures_io::{AsyncRead, AsyncWrite, AsyncSeek, AsyncBufRead}; + +use iou::sqe::{Mode, OFlag}; + +pub struct File { + ring: Ring, + fd: RawFd, + active: Op, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +enum Op { + Read, + Write, + Close, + Nothing, + Statx, + Closed, +} + + +impl File { + fn from_fd(fd: RawFd, driver: D) -> File { + File { + ring: Ring::new(driver), + fd, + active: Op::Nothing, + } + } + + pub fn open>(driver: D, path: P) -> impl Future> { + let flags = OFlag::O_CLOEXEC | OFlag::O_RDONLY; + open::Open(driver.submit(OpenAt::without_dir( + path, flags, Mode::from_bits(0o666).unwrap() + ))) + } + + pub fn create>(driver: D, path: P) -> impl Future> { + let flags = OFlag::O_CLOEXEC | OFlag::O_WRONLY | OFlag::O_CREAT | OFlag::O_TRUNC; + create::Create(driver.submit(OpenAt::without_dir( + path, flags, Mode::from_bits(0o666).unwrap() + ))) + } +} + +mod open; +mod create; + +impl AsyncRead for File { + fn poll_read(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, buf: &mut [u8]) + -> Poll> + { + let mut inner = ready!(self.as_mut().poll_fill_buf(ctx))?; + let len = io::Read::read(&mut inner, buf)?; + self.consume(len); + Poll::Ready(Ok(len)) + } +} + +impl AsyncBufRead for File { + fn poll_fill_buf(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { + let fd = self.fd; + let (ring, buf, pos, ..) = self.split_with_buf(); + buf.fill_buf(|buf| { + let n = ready!(ring.poll(ctx, 1, |sqs| { + let mut sqe = sqs.single().unwrap(); + unsafe { + sqe.prep_read(fd, buf, *pos); + } + sqe + }))?; + *pos += n as u64; + Poll::Ready(Ok(n as u32)) + }) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.buf().consume(amt); + } +} + +impl AsyncWrite for File { + fn poll_write(mut self: Pin<&mut Self>, ctx: &mut Context<'_>, slice: &[u8]) -> Poll> { + let fd = self.fd; + let (ring, buf, pos, ..) = self.split_with_buf(); + let data = ready!(buf.fill_buf(|mut buf| { + Poll::Ready(Ok(io::Write::write(&mut buf, slice)? as u32)) + }))?; + let n = ready!(ring.poll(ctx, 1, |sqs| { + let mut sqe = sqs.single().unwrap(); + unsafe { + sqe.prep_write(fd, data, *pos); + } + sqe + }))?; + *pos += n as u64; + buf.clear(); + Poll::Ready(Ok(n as usize)) + } + + fn poll_flush(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { + ready!(self.poll_write(ctx, &[]))?; + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { + self.as_mut().guard_op(Op::Close); + let fd = self.fd; + ready!(self.as_mut().ring().poll(ctx, 1, |sqs| { + let mut sqe = sqs.single().unwrap(); + unsafe { + sqe.prep_close(fd); + } + sqe + }))?; + self.confirm_close(); + Poll::Ready(Ok(())) + } +} + +impl AsyncSeek for File { + fn poll_seek(mut self: Pin<&mut Self>, ctx: &mut Context, pos: io::SeekFrom) + -> Poll> + { + let (start, offset) = match pos { + io::SeekFrom::Start(n) => { + *self.as_mut().pos() = n; + return Poll::Ready(Ok(self.pos)); + } + io::SeekFrom::Current(n) => (self.pos, n), + io::SeekFrom::End(n) => { + (ready!(self.as_mut().poll_file_size(ctx))?, n) + } + }; + let valid_seek = if offset.is_negative() { + match start.checked_sub(offset.abs() as u64) { + Some(valid_seek) => valid_seek, + None => { + let invalid = io::Error::from(io::ErrorKind::InvalidInput); + return Poll::Ready(Err(invalid)); + } + } + } else { + match start.checked_add(offset as u64) { + Some(valid_seek) => valid_seek, + None => { + let overflow = io::Error::from_raw_os_error(libc::EOVERFLOW); + return Poll::Ready(Err(overflow)); + } + } + }; + *self.as_mut().pos() = valid_seek; + Poll::Ready(Ok(self.pos)) + } +} + +impl From> for fs::File { + fn from(mut file: File) -> fs::File { + file.cancel(); + let file = ManuallyDrop::new(file); + unsafe { + fs::File::from_raw_fd(file.fd) + } + } +} + +impl Drop for File { + fn drop(&mut self) { + match self.active { + Op::Closed => { } + Op::Nothing => unsafe { libc::close(self.fd); }, + _ => self.cancel(), + } + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/fs/create.rs b/runtime/asyncio/src/sys/linux/io_uring/fs/create.rs new file mode 100644 index 0000000..fbcc760 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/fs/create.rs @@ -0,0 +1,18 @@ +use std::future::Future; +use futures_core::ready; +use super::*; + +pub(super) struct Create(pub(super) Submission); + +impl Future for Create { + type Output = io::Result>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut inner = unsafe { + self.map_unchecked_mut(|this| &mut this.0) + }; + let (_, ready) = ready!(inner.as_mut().poll(cx)); + let fd = ready? as i32; + Poll::Ready(Ok(File::from_fd(fd, inner.driver().clone()))) + } +} diff --git a/runtime/asyncio/src/sys/linux/io_uring/fs/open.rs b/runtime/asyncio/src/sys/linux/io_uring/fs/open.rs new file mode 100644 index 0000000..498aaf1 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/fs/open.rs @@ -0,0 +1,18 @@ +use std::future::Future; +use futures_core::ready; +use super::*; + +pub(super) struct Open(pub(super) Submission); + +impl Future for Open { + type Output = io::Result>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut inner = unsafe { + self.map_unchecked_mut(|this| &mut this.0) + }; + let (_, ready) = ready!(inner.as_mut().poll(cx)); + let fd = ready? as i32; + Poll::Ready(Ok(File::from_fd(fd, inner.driver().clone()))) + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/mod.rs b/runtime/asyncio/src/sys/linux/io_uring/mod.rs new file mode 100644 index 0000000..1c45b67 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/mod.rs @@ -0,0 +1,20 @@ + +mod completion; +use completion::Completion; + +mod cancellation; +use cancellation::Cancellation; + +mod ring; +use ring::Ring; + +mod events; +use events::Event; + +mod submission; +use submission::Submission; + +mod driver; +use driver::Driver; + +mod fs; \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/ring.rs b/runtime/asyncio/src/sys/linux/io_uring/ring.rs new file mode 100644 index 0000000..5f0425b --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/ring.rs @@ -0,0 +1,176 @@ +use std::{io, mem}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use iou::{SQE, SQEs}; +use super::{driver, Driver}; +use super::Completion; + +use futures_core::ready; +use crate::sys::linux::io_uring::cancellation::Cancellation; + +/// +pub struct Ring { + state: State, + driver: D, +} + +enum State { + Empty, + Prepared(Completion), + Submitted(Completion), + Cancelled(u64), + Lost, +} + +impl Ring { + pub fn new(driver: D) -> Self { + Self { + state: State::Empty, + driver, + } + } + + pub fn driver(&self) -> &D { + &self.driver + } + + fn split_pinned(self: Pin<&mut Self>) -> (&mut State, Pin<&mut D>) { + unsafe { + let this = Pin::get_unchecked_mut(self); + (&mut this.state, Pin::new_unchecked(&mut this.driver)) + } + } + + pub fn poll( + mut self: Pin<&mut Self>, + ctx: &mut Context<'_>, + count: u32, + prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>, + ) -> Poll> { + match self.state { + State::Empty => { + ready!(self.as_mut().poll_prepare_empty(ctx, count, prepare)); + ready!(self.as_mut().poll_submit(ctx)); + self.poll_complete(ctx) + }, + State::Cancelled(previous) => { + ready!(self.as_mut().poll_prepare_canceled(ctx, previous, count, prepare)); + ready!(self.as_mut().poll_submit(ctx)); + self.poll_complete(ctx) + }, + State::Prepared(_) => match self.as_mut().poll_complete(ctx) { + Poll::Pending => { + ready!(self.as_mut().poll_submit(ctx)); + self.poll_complete(ctx) + }, + ready @ Poll::Ready(_) => ready, + }, + State::Submitted(_) => self.poll_complete(ctx), + State::Lost => panic!("Lost events, ring is now in an invalid state"), + } + } + + fn poll_prepare_empty( + self: Pin<&mut Self>, + ctx: &mut Context<'_>, + count: u32, + prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>, + ) -> Poll<()> { + let (state, driver) = self.split_pinned(); + let completion = ready!(driver.poll_prepare(ctx, count, |mut sqes, ctx| { + *state = State::Lost; + let sqe = prepare(&mut sqes); + let completion = driver::Completion::new(sqe, sqes, ctx); + completion + })); + *state = State::Prepared(completion.into_inner()); + Poll::Ready(()) + } + + fn poll_prepare_canceled( + self: Pin<&mut Self>, + ctx: &mut Context<'_>, + previous: u64, + count: u32, + prepare: impl for<'sq> FnOnce(&mut SQEs<'sq>) -> SQE<'sq>, + ) -> Poll<()> { + let (mut state, driver) = self.split_pinned(); + let completion = ready!(driver.poll_prepare(ctx, count + 1, |mut sqes, ctx| { + *state = State::Lost; + unsafe { sqes.hard_linked().next().unwrap().prep_cancel(previous, 0); } + let sqe = prepare(&mut sqes); + let completion = driver::Completion::new(sqe, sqes, ctx); + completion + })); + *state = State::Prepared(completion.into_inner()); + Poll::Ready(()) + } + + fn poll_submit(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<()> { + let (state, driver) = self.split_pinned(); + let _ = ready!(driver.poll_submit(ctx)); + if let State::Prepared(completion) | State::Submitted(completion) + = mem::replace(state, State::Lost) + { + *state = State::Submitted(completion); + Poll::Ready(()) + } else { + unreachable!(); + } + } + + fn poll_complete(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { + let (state, driver) = self.split_pinned(); + match mem::replace(state, State::Lost) { + State::Prepared(completion) => { + ready!(driver.poll_complete(ctx, completion.addr())); + match completion.check(ctx.waker()) { + Ok(result) => { + *state = State::Empty; + Poll::Ready(result) + }, + Err(completion) => { + *state = State::Prepared(completion); + Poll::Pending + } + } + }, + State::Submitted(completion) => { + ready!(driver.poll_complete(ctx, completion.addr())); + match completion.check(ctx.waker()) { + Ok(result) => { + *state = State::Empty; + Poll::Ready(result) + }, + Err(completion) => { + *state = State::Submitted(completion); + Poll::Pending + } + } + }, + _ => unreachable!(), + } + } + + pub fn cancel_pinned(self: Pin<&mut Self>, cancellation: Cancellation) { + self.split_pinned().0.cancel(cancellation); + } + + pub fn cancel(&mut self, cancellation: Cancellation) { + self.state.cancel(cancellation) + } +} + +impl State { + fn cancel(&mut self, cancellation: Cancellation) { + match mem::replace(self, State::Lost) { + State::Submitted(completion) | State::Prepared(completion) => { + *self = State::Cancelled(completion.addr()); + completion.cancel(cancellation); + }, + state=> { + *self = state; + } + } + } +} \ No newline at end of file diff --git a/runtime/asyncio/src/sys/linux/io_uring/submission.rs b/runtime/asyncio/src/sys/linux/io_uring/submission.rs new file mode 100644 index 0000000..10a7c02 --- /dev/null +++ b/runtime/asyncio/src/sys/linux/io_uring/submission.rs @@ -0,0 +1,48 @@ +use std::future::Future; +use futures_core::ready; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use super::{Ring, Driver, Event}; + +pub struct Submission { + ring: Ring, + event: Option, +} + +impl Submission { + pub fn new(driver: D, event: E) -> Self { + Self { + ring: Ring::new(driver), + event: Some(event), + } + } + + pub fn driver(&self) -> &D { + self.ring.driver() + } + + fn split_pinned(self: Pin<&mut Self>) -> (Pin<&mut Ring>, &mut Option) { + unsafe { + let this = Pin::get_unchecked_mut(self); + (Pin::new_unchecked(&mut this.ring), &mut this.event) + } + } +} + +impl Future for Submission { + type Output = (E, io::Result); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let (ring, event) = self.split_pinned(); + + let result = if let Some(event) = event { + let count = E::sqes_needed(); + ready!(ring.poll(cx, count, |sqes| unsafe { event.prepare(sqes) })) + } else { + panic!("polled Submission after completion") + }; + + Poll::Ready((event.take().unwrap(), result)) + } +} diff --git a/runtime/asyncio/src/sys/linux/mod.rs b/runtime/asyncio/src/sys/linux/mod.rs new file mode 100644 index 0000000..e2716cc --- /dev/null +++ b/runtime/asyncio/src/sys/linux/mod.rs @@ -0,0 +1,5 @@ + +#[cfg(feature = "io_uring")] +mod io_uring; +#[cfg(feature = "epoll")] +mod epoll; \ No newline at end of file diff --git a/runtime/asyncio/src/sys/mod.rs b/runtime/asyncio/src/sys/mod.rs new file mode 100644 index 0000000..c2d2479 --- /dev/null +++ b/runtime/asyncio/src/sys/mod.rs @@ -0,0 +1,2 @@ +#[cfg(target_os = "linux")] +mod linux; \ No newline at end of file diff --git a/runtime/asyncio/src/syscall.rs b/runtime/asyncio/src/syscall.rs new file mode 100644 index 0000000..50bd4b1 --- /dev/null +++ b/runtime/asyncio/src/syscall.rs @@ -0,0 +1,72 @@ +use std::io; +use std::os::unix::prelude::RawFd; +use libc::{c_ulong, c_long}; +use crate::ctypes::{IORING_ENTER, IORING_REGISTER_OP}; +use super::ctypes::Params; + +const ENOMEM: i32 = 12; + +const SYS_SETUP: c_long = libc::SYS_io_uring_setup; +const SYS_ENTER: c_long = libc::SYS_io_uring_enter; +const SYS_REGISTER: c_long = libc::SYS_io_uring_register; + +/// Syscall io_uring_setup, creating the io_uring ringbuffers +pub fn setup(entries: u32, params: *mut Params) -> io::Result { + assert!((0 < entries && entries <= 4096), "entries must be between 1 and 4096"); + assert_eq!(entries.count_ones(), 1, "entries must be a power of two"); + + let retval = unsafe { + libc::syscall(SYS_SETUP, entries, params) + }; + if retval < 0 { + let err = io::Error::last_os_error(); + if let Some(ENOMEM) = err.raw_os_error() { + return Err(io::Error::new( + io::ErrorKind::Other, + "Failed to lock enough memory. You may need to increase the memlock limit using \ + rlimits" + )); + } + return Err(err); + } else { + Ok(retval as RawFd) + } +} + +static_assertions::assert_eq_size!(i64, c_long); + +/// enter io_uring, returning when at least `min_complete` events have been completed +pub fn enter(fd: RawFd, + to_submit: u32, + min_complete: u32, + flags: IORING_ENTER, + args: *const libc::c_void, + argsz: libc::size_t + +) -> io::Result { + let retval = unsafe { + libc::syscall(SYS_ENTER, fd, to_submit, min_complete, flags.bits(), args, argsz) + }; + if retval < 0 { + let err = io::Error::last_os_error(); + Err(err) + } else { + Ok(retval) + } +} + +/// Register buffers or file descriptors with the kernel for faster usage and not having to use +/// atomics. +pub fn register(fd: RawFd, opcode: IORING_REGISTER_OP, args: *const (), nargs: u32) + -> io::Result +{ + let retval = unsafe { + libc::syscall(SYS_REGISTER, fd, opcode, args, nargs) + }; + if retval < 0 { + let err = io::Error::last_os_error(); + Err(err) + } else { + Ok(retval) + } +} \ No newline at end of file diff --git a/runtime/executor/benches/blocking.rs b/runtime/executor/benches/blocking.rs deleted file mode 100644 index 6c5a6ff..0000000 --- a/runtime/executor/benches/blocking.rs +++ /dev/null @@ -1,67 +0,0 @@ -#![feature(test)] - -extern crate test; - -use bastion_executor::blocking; -use lightproc::proc_stack::ProcStack; -use std::thread; -use std::time::Duration; -use test::Bencher; - -#[cfg(feature = "tokio-runtime")] -mod tokio_benchs { - use super::*; - #[bench] - fn blocking(b: &mut Bencher) { - tokio_test::block_on(async { _blocking(b) }); - } - #[bench] - fn blocking_single(b: &mut Bencher) { - tokio_test::block_on(async { - _blocking_single(b); - }); - } -} - -#[cfg(not(feature = "tokio-runtime"))] -mod no_tokio_benchs { - use super::*; - #[bench] - fn blocking(b: &mut Bencher) { - _blocking(b); - } - #[bench] - fn blocking_single(b: &mut Bencher) { - _blocking_single(b); - } -} - -// Benchmark for a 10K burst task spawn -fn _blocking(b: &mut Bencher) { - b.iter(|| { - (0..10_000) - .map(|_| { - blocking::spawn_blocking( - async { - let duration = Duration::from_millis(1); - thread::sleep(duration); - }, - ProcStack::default(), - ) - }) - .collect::>() - }); -} - -// Benchmark for a single blocking task spawn -fn _blocking_single(b: &mut Bencher) { - b.iter(|| { - blocking::spawn_blocking( - async { - let duration = Duration::from_millis(1); - thread::sleep(duration); - }, - ProcStack::default(), - ) - }); -} diff --git a/runtime/executor/benches/run_blocking.rs b/runtime/executor/benches/run_blocking.rs deleted file mode 100644 index 43de440..0000000 --- a/runtime/executor/benches/run_blocking.rs +++ /dev/null @@ -1,69 +0,0 @@ -#![feature(test)] - -extern crate test; - -use bastion_executor::blocking; -use bastion_executor::run::run; -use futures::future::join_all; -use lightproc::proc_stack::ProcStack; -use std::thread; -use std::time::Duration; -use test::Bencher; - -#[cfg(feature = "tokio-runtime")] -mod tokio_benchs { - use super::*; - #[bench] - fn blocking(b: &mut Bencher) { - tokio_test::block_on(async { _blocking(b) }); - } - #[bench] - fn blocking_single(b: &mut Bencher) { - tokio_test::block_on(async { - _blocking_single(b); - }); - } -} - -#[cfg(not(feature = "tokio-runtime"))] -mod no_tokio_benchs { - use super::*; - #[bench] - fn blocking(b: &mut Bencher) { - _blocking(b); - } - #[bench] - fn blocking_single(b: &mut Bencher) { - _blocking_single(b); - } -} - -// Benchmark for a 10K burst task spawn -fn _blocking(b: &mut Bencher) { - b.iter(|| { - (0..10_000) - .map(|_| { - blocking::spawn_blocking( - async { - let duration = Duration::from_millis(1); - thread::sleep(duration); - }, - ProcStack::default(), - ) - }) - .collect::>() - }); -} - -// Benchmark for a single blocking task spawn -fn _blocking_single(b: &mut Bencher) { - b.iter(|| { - blocking::spawn_blocking( - async { - let duration = Duration::from_millis(1); - thread::sleep(duration); - }, - ProcStack::default(), - ) - }); -} diff --git a/runtime/executor/src/blocking.rs b/runtime/executor/src/blocking.rs deleted file mode 100644 index 29e34ef..0000000 --- a/runtime/executor/src/blocking.rs +++ /dev/null @@ -1,165 +0,0 @@ -//! -//! Pool of threads to run heavy processes -//! -//! We spawn futures onto the pool with [`spawn_blocking`] method of global run queue or -//! with corresponding [`Worker`]'s spawn method. -//! -//! [`Worker`]: crate::run_queue::Worker - -use crate::thread_manager::{DynamicPoolManager, DynamicRunner}; -use crossbeam_channel::{unbounded, Receiver, Sender}; -use lazy_static::lazy_static; -use lightproc::lightproc::LightProc; -use lightproc::recoverable_handle::RecoverableHandle; -use once_cell::sync::{Lazy, OnceCell}; -use std::future::Future; -use std::iter::Iterator; -use std::time::Duration; -use std::{env, thread}; -use tracing::trace; - -/// If low watermark isn't configured this is the default scaler value. -/// This value is used for the heuristics of the scaler -const DEFAULT_LOW_WATERMARK: u64 = 2; - -const THREAD_RECV_TIMEOUT: Duration = Duration::from_millis(100); - -/// Spawns a blocking task. -/// -/// The task will be spawned onto a thread pool specifically dedicated to blocking tasks. -pub fn spawn_blocking(future: F) -> RecoverableHandle -where - F: Future + Send + 'static, - R: Send + 'static, -{ - let (task, handle) = LightProc::recoverable(future, schedule); - task.schedule(); - handle -} - -#[derive(Debug)] -struct BlockingRunner { - // We keep a handle to the tokio runtime here to make sure - // it will never be dropped while the DynamicPoolManager is alive, - // In case we need to spin up some threads. - #[cfg(feature = "tokio-runtime")] - runtime_handle: tokio::runtime::Handle, -} - -impl DynamicRunner for BlockingRunner { - fn run_static(&self, park_timeout: Duration) -> ! { - loop { - while let Ok(task) = POOL.receiver.recv_timeout(THREAD_RECV_TIMEOUT) { - trace!("static thread: running task"); - self.run(task); - } - - trace!("static: empty queue, parking with timeout"); - thread::park_timeout(park_timeout); - } - } - fn run_dynamic(&self, parker: impl Fn()) -> ! { - loop { - while let Ok(task) = POOL.receiver.recv_timeout(THREAD_RECV_TIMEOUT) { - trace!("dynamic thread: running task"); - self.run(task); - } - trace!( - "dynamic thread: parking - {:?}", - std::thread::current().id() - ); - parker(); - } - } - fn run_standalone(&self) { - while let Ok(task) = POOL.receiver.recv_timeout(THREAD_RECV_TIMEOUT) { - self.run(task); - } - trace!("standalone thread: quitting."); - } -} - -impl BlockingRunner { - fn run(&self, task: LightProc) { - #[cfg(feature = "tokio-runtime")] - { - self.runtime_handle.spawn_blocking(|| task.run()); - } - #[cfg(not(feature = "tokio-runtime"))] - { - task.run(); - } - } -} - -/// Pool interface between the scheduler and thread pool -struct Pool { - sender: Sender, - receiver: Receiver, -} - -static DYNAMIC_POOL_MANAGER: OnceCell> = OnceCell::new(); - -static POOL: Lazy = Lazy::new(|| { - #[cfg(feature = "tokio-runtime")] - { - let runner = BlockingRunner { - // We use current() here instead of try_current() - // because we want bastion to crash as soon as possible - // if there is no available runtime. - runtime_handle: tokio::runtime::Handle::current(), - }; - - DYNAMIC_POOL_MANAGER - .set(DynamicPoolManager::new(*low_watermark() as usize, runner)) - .expect("couldn't create dynamic pool manager"); - } - #[cfg(not(feature = "tokio-runtime"))] - { - let runner = BlockingRunner {}; - - DYNAMIC_POOL_MANAGER - .set(DynamicPoolManager::new(*low_watermark() as usize, runner)) - .expect("couldn't create dynamic pool manager"); - } - - DYNAMIC_POOL_MANAGER - .get() - .expect("couldn't get static pool manager") - .initialize(); - - let (sender, receiver) = unbounded(); - Pool { sender, receiver } -}); - -/// Enqueues work, attempting to send to the thread pool in a -/// nonblocking way and spinning up needed amount of threads -/// based on the previous statistics without relying on -/// if there is not a thread ready to accept the work or not. -fn schedule(t: LightProc) { - if let Err(err) = POOL.sender.try_send(t) { - // We were not able to send to the channel without - // blocking. - POOL.sender.send(err.into_inner()).unwrap(); - } - - // Add up for every incoming scheduled task - DYNAMIC_POOL_MANAGER.get().unwrap().increment_frequency(); -} - -/// -/// Low watermark value, defines the bare minimum of the pool. -/// Spawns initial thread set. -/// Can be configurable with env var `BASTION_BLOCKING_THREADS` at runtime. -#[inline] -fn low_watermark() -> &'static u64 { - lazy_static! { - static ref LOW_WATERMARK: u64 = { - env::var_os("BASTION_BLOCKING_THREADS") - .map(|x| x.to_str().unwrap().parse::().unwrap()) - .unwrap_or(DEFAULT_LOW_WATERMARK) - }; - } - - &*LOW_WATERMARK -} diff --git a/runtime/executor/src/proc_stack.rs b/runtime/executor/src/proc_stack.rs deleted file mode 100644 index 05b612c..0000000 --- a/runtime/executor/src/proc_stack.rs +++ /dev/null @@ -1,5 +0,0 @@ - -#[derive(Debug)] -pub struct ProcStack { - -} diff --git a/runtime/executor/src/sleepers.rs b/runtime/executor/src/sleepers.rs deleted file mode 100644 index 04811bb..0000000 --- a/runtime/executor/src/sleepers.rs +++ /dev/null @@ -1,67 +0,0 @@ -//! -//! Where workers went to parking while no workload is in their worker queue. -//! -//! If a workload received pool will wake them up. -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Condvar, Mutex}; - -/// The place where worker threads go to sleep. -/// -/// Similar to how thread parking works, if a notification comes up while no threads are sleeping, -/// the next thread that attempts to go to sleep will pick up the notification immediately. -#[derive(Debug)] -#[allow(clippy::mutex_atomic)] -pub struct Sleepers { - /// How many threads are currently a sleep. - sleep: Mutex, - - /// A condvar for notifying sleeping threads. - wake: Condvar, - - /// Set to `true` if a notification came up while nobody was sleeping. - notified: AtomicBool, -} - -#[allow(clippy::mutex_atomic)] -impl Default for Sleepers { - /// Creates a new `Sleepers`. - fn default() -> Self { - Self { - sleep: Mutex::new(0), - wake: Condvar::new(), - notified: AtomicBool::new(false), - } - } -} - -#[allow(clippy::mutex_atomic)] -impl Sleepers { - /// Creates a new `Sleepers`. - pub fn new() -> Self { - Self::default() - } - - /// Puts the current thread to sleep. - pub fn wait(&self) { - let mut sleep = self.sleep.lock().unwrap(); - - if !self.notified.swap(false, Ordering::SeqCst) { - *sleep += 1; - std::mem::drop(self.wake.wait(sleep).unwrap()); - } - } - - /// Notifies one thread. - pub fn notify_one(&self) { - if !self.notified.load(Ordering::SeqCst) { - let mut sleep = self.sleep.lock().unwrap(); - - if *sleep > 0 { - *sleep -= 1; - self.wake.notify_one(); - } else { - self.notified.store(true, Ordering::SeqCst); - } - } - } -} diff --git a/runtime/executor/src/worker.rs b/runtime/executor/src/worker.rs index b5f24ef..e69de29 100644 --- a/runtime/executor/src/worker.rs +++ b/runtime/executor/src/worker.rs @@ -1,93 +0,0 @@ -//! -//! SMP parallelism based cache affine worker implementation -//! -//! This worker implementation relies on worker run queue statistics which are hold in the pinned global memory -//! where workload distribution calculated and amended to their own local queues. - -use crate::pool; - -use lightproc::prelude::*; -use std::cell::Cell; -use std::ptr; -use std::time::Duration; -use crossbeam_deque::{Stealer, Worker}; -use crate::proc_stack::ProcStack; - -/// The timeout we'll use when parking before an other Steal attempt -pub const THREAD_PARK_TIMEOUT: Duration = Duration::from_millis(1); - -thread_local! { - static STACK: Cell<*const ProcStack> = Cell::new(ptr::null_mut()); -} - -/// -/// Set the current process's stack during the run of the future. -pub(crate) fn set_stack(stack: *const ProcStack, f: F) -> R -where - F: FnOnce() -> R, -{ - struct ResetStack<'a>(&'a Cell<*const ProcStack>); - - impl Drop for ResetStack<'_> { - fn drop(&mut self) { - self.0.set(ptr::null()); - } - } - - STACK.with(|st| { - st.set(stack); - // create a guard to reset STACK even if the future panics. This is important since we - // must not drop the pointed-to ProcStack here in any case. - let _guard = ResetStack(st); - - f() - }) -} - -/* -pub(crate) fn get_proc_stack(f: F) -> Option -where - F: FnOnce(&ProcStack) -> R, -{ - let res = STACK.try_with(|st| unsafe { st.get().as_ref().map(f) }); - - match res { - Ok(Some(val)) => Some(val), - Ok(None) | Err(_) => None, - } -} - -/// -/// Get the stack currently in use for this thread -pub fn current() -> ProcStack { - get_proc_stack(|proc| proc.clone()) - .expect("`proc::current()` called outside the context of the proc") -} - */ - -pub(crate) fn schedule(proc: LightProc) { - pool::schedule(proc) -} - -/// A worker thread running futures locally and stealing work from other workers if it runs empty. -pub struct WorkerThread { - queue: Worker, -} - -impl WorkerThread { - pub fn new() -> Self { - Self { - queue: Worker::new_fifo(), - } - } - - pub fn stealer(&self) -> Stealer { - self.queue.stealer() - } - - pub fn tick(&self) { - if let Some(lightproc) = self.queue.pop() { - lightproc.run() - } - } -} \ No newline at end of file diff --git a/runtime/executor/tests/lib.rs b/runtime/executor/tests/lib.rs deleted file mode 100644 index 416c571..0000000 --- a/runtime/executor/tests/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -#[cfg(test)] -mod tests { - use bastion_executor::{placement, pool}; - - #[test] - fn affinity_replacement() { - let core_ids = placement::get_core_ids().unwrap(); - dbg!(core_ids); - } - - #[cfg(feature = "tokio-runtime")] - mod tokio_tests { - #[tokio::test] - async fn pool_check() { - super::pool::get(); - } - } - - #[cfg(not(feature = "tokio-runtime"))] - mod no_tokio_tests { - #[test] - fn pool_check() { - super::pool::get(); - } - } -} diff --git a/runtime/lightproc/examples/state_change.rs b/runtime/lightproc/examples/state_change.rs deleted file mode 100644 index b27ccfc..0000000 --- a/runtime/lightproc/examples/state_change.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crossbeam::channel::{unbounded, Sender}; -use futures_executor as executor; -use lazy_static::lazy_static; -use lightproc::prelude::*; - -use std::future::Future; -use std::sync::{Arc, Mutex}; -use std::thread; - -#[derive(Copy, Clone)] -pub struct GlobalState { - pub amount: usize, -} - -fn spawn_on_thread(future: F, gs: Arc>) - -> RecoverableHandle>, R> -where - F: Future + Send + 'static, - R: Send + 'static, -{ - lazy_static! { - // A channel that holds scheduled procs. - static ref QUEUE: Sender = { - let (sender, receiver) = unbounded::(); - - // Start the executor thread. - thread::spawn(move || { - for proc in receiver { - proc.run(); - } - }); - - sender - }; - } - - let stack = ProcStack::build(Box::new(gs)) - .initialize(Callback::wrap(|s: &mut Arc>| { - println!("initializing"); - s.clone().lock().unwrap().amount += 1; - })) - .completed(Callback::wrap(|s: &mut Arc>| { - println!("completed"); - s.clone().lock().unwrap().amount += 2; - })); - - let schedule = |t| QUEUE.send(t).unwrap(); - let (proc, handle) = LightProc::recoverable(future, schedule, stack); - let handle = handle - .on_panic(|s: &mut Arc>, _e| { - println!("panicked"); - s.clone().lock().unwrap().amount += 3; - }); - - proc.schedule(); - - handle -} - -fn main() { - let gs = Arc::new(Mutex::new(GlobalState { amount: 0 })); - let handle = spawn_on_thread( - async { - panic!("Panic here!"); - }, - gs.clone(), - ); - - executor::block_on(handle); - - // 0 at the start - // +1 before the start - // +2 after panic occurs and completion triggers - // +3 after panic triggers - let amount = gs.lock().unwrap().amount; - assert_eq!(amount, 6); - println!("Amount: {}", amount); -} diff --git a/runtime/lightproc/tests/stack.rs b/runtime/lightproc/tests/stack.rs deleted file mode 100644 index 6618d44..0000000 --- a/runtime/lightproc/tests/stack.rs +++ /dev/null @@ -1,15 +0,0 @@ -use lightproc::proc_stack::ProcStack; -use lightproc::proc_state::EmptyProcState; - -#[test] -fn stack_copy() { - let stack = ProcStack::default() - .with_pid(12) - .with_after_panic(|_s: &mut EmptyProcState| { - println!("After panic!"); - }); - - let stack2 = stack; - - assert_eq!(stack2.get_pid(), 12); -}