1#![allow(clippy::cast_ptr_alignment)]
8
9use std::collections::BTreeMap;
10use std::fs::File;
11use std::io;
12use std::os::unix::io::AsRawFd;
13use std::os::unix::io::FromRawFd;
14use std::os::unix::io::RawFd;
15use std::pin::Pin;
16use std::ptr::null;
17use std::sync::atomic::AtomicPtr;
18use std::sync::atomic::AtomicU32;
19use std::sync::atomic::Ordering;
20
21use base::AsRawDescriptor;
22use base::EventType;
23use base::IoBufMut;
24use base::MappedRegion;
25use base::MemoryMapping;
26use base::MemoryMappingBuilder;
27use base::Protection;
28use base::RawDescriptor;
29use libc::c_void;
30use remain::sorted;
31use sync::Mutex;
32use thiserror::Error as ThisError;
33
34use crate::bindings::*;
35use crate::syscalls::*;
36
37pub type UserData = u64;
40
41#[sorted]
42#[derive(Debug, ThisError)]
43pub enum Error {
44 #[error("Failed to mmap completion ring {0}")]
46 MappingCompleteRing(base::MmapError),
47 #[error("Failed to mmap submit entries {0}")]
49 MappingSubmitEntries(base::MmapError),
50 #[error("Failed to mmap submit ring {0}")]
52 MappingSubmitRing(base::MmapError),
53 #[error("No space for more ring entries, try increasing the size passed to `new`")]
55 NoSpace,
56 #[error("Failed to enter io uring: {0}")]
58 RingEnter(libc::c_int),
59 #[error("Failed to register operations for io uring: {0}")]
61 RingRegister(libc::c_int),
62 #[error("Failed to setup io uring {0}")]
64 Setup(libc::c_int),
65}
66pub type Result<T> = std::result::Result<T, Error>;
67
68impl From<Error> for io::Error {
69 fn from(e: Error) -> Self {
70 use Error::*;
71 match e {
72 RingEnter(errno) => io::Error::from_raw_os_error(errno),
73 Setup(errno) => io::Error::from_raw_os_error(errno),
74 e => io::Error::other(e),
75 }
76 }
77}
78
79pub struct SubmitQueue {
80 submit_ring: SubmitQueueState,
81 submit_queue_entries: SubmitQueueEntries,
82 submitting: usize, pub added: usize, num_sqes: usize, }
86
87impl io_uring_sqe {
89 pub fn set_addr(&mut self, val: u64) {
90 self.__bindgen_anon_2.addr = val;
91 }
92 pub fn set_off(&mut self, val: u64) {
93 self.__bindgen_anon_1.off = val;
94 }
95
96 pub fn set_buf_index(&mut self, val: u16) {
97 self.__bindgen_anon_4.buf_index = val;
98 }
99
100 pub fn set_rw_flags(&mut self, val: libc::c_int) {
101 self.__bindgen_anon_3.rw_flags = val;
102 }
103
104 pub fn set_poll_events(&mut self, val: u32) {
105 let val = if cfg!(target_endian = "big") {
106 val.rotate_left(16)
109 } else {
110 val
111 };
112 self.__bindgen_anon_3.poll32_events = val;
113 }
114}
115
116fn file_offset_to_raw_offset(offset: Option<u64>) -> u64 {
120 const USE_CURRENT_FILE_POS: libc::off64_t = -1;
123 offset.unwrap_or(USE_CURRENT_FILE_POS as u64)
124}
125
126impl SubmitQueue {
127 fn prep_next_sqe<F>(&mut self, mut f: F) -> Result<()>
130 where
131 F: FnMut(&mut io_uring_sqe),
132 {
133 if self.added == self.num_sqes {
134 return Err(Error::NoSpace);
135 }
136
137 let tail = self.submit_ring.pointers.tail(Ordering::Relaxed);
141 let next_tail = tail.wrapping_add(1);
142 if next_tail == self.submit_ring.pointers.head(Ordering::Acquire) {
143 return Err(Error::NoSpace);
144 }
145 let index = (tail & self.submit_ring.ring_mask) as usize;
147 let sqe = self.submit_queue_entries.get_mut(index).unwrap();
148
149 f(sqe);
150
151 self.submit_ring.set_array_entry(index, index as u32);
153 self.submit_ring.pointers.set_tail(next_tail);
156
157 self.added += 1;
158
159 Ok(())
160 }
161
162 fn prepare_submit(&mut self) -> usize {
165 let out = self.added - self.submitting;
166 self.submitting = self.added;
167
168 out
169 }
170
171 fn fail_submit(&mut self, count: usize) {
174 debug_assert!(count <= self.submitting);
175 self.submitting -= count;
176 }
177
178 fn complete_submit(&mut self, count: usize) {
181 debug_assert!(count <= self.submitting);
182 self.submitting -= count;
183 self.added -= count;
184 }
185}
186
187#[repr(u32)]
189pub enum URingOperation {
190 Nop = io_uring_op_IORING_OP_NOP,
191 Readv = io_uring_op_IORING_OP_READV,
192 Writev = io_uring_op_IORING_OP_WRITEV,
193 Fsync = io_uring_op_IORING_OP_FSYNC,
194 ReadFixed = io_uring_op_IORING_OP_READ_FIXED,
195 WriteFixed = io_uring_op_IORING_OP_WRITE_FIXED,
196 PollAdd = io_uring_op_IORING_OP_POLL_ADD,
197 PollRemove = io_uring_op_IORING_OP_POLL_REMOVE,
198 SyncFileRange = io_uring_op_IORING_OP_SYNC_FILE_RANGE,
199 Sendmsg = io_uring_op_IORING_OP_SENDMSG,
200 Recvmsg = io_uring_op_IORING_OP_RECVMSG,
201 Timeout = io_uring_op_IORING_OP_TIMEOUT,
202 TimeoutRemove = io_uring_op_IORING_OP_TIMEOUT_REMOVE,
203 Accept = io_uring_op_IORING_OP_ACCEPT,
204 AsyncCancel = io_uring_op_IORING_OP_ASYNC_CANCEL,
205 LinkTimeout = io_uring_op_IORING_OP_LINK_TIMEOUT,
206 Connect = io_uring_op_IORING_OP_CONNECT,
207 Fallocate = io_uring_op_IORING_OP_FALLOCATE,
208 Openat = io_uring_op_IORING_OP_OPENAT,
209 Close = io_uring_op_IORING_OP_CLOSE,
210 FilesUpdate = io_uring_op_IORING_OP_FILES_UPDATE,
211 Statx = io_uring_op_IORING_OP_STATX,
212 Read = io_uring_op_IORING_OP_READ,
213 Write = io_uring_op_IORING_OP_WRITE,
214 Fadvise = io_uring_op_IORING_OP_FADVISE,
215 Madvise = io_uring_op_IORING_OP_MADVISE,
216 Send = io_uring_op_IORING_OP_SEND,
217 Recv = io_uring_op_IORING_OP_RECV,
218 Openat2 = io_uring_op_IORING_OP_OPENAT2,
219 EpollCtl = io_uring_op_IORING_OP_EPOLL_CTL,
220 Splice = io_uring_op_IORING_OP_SPLICE,
221 ProvideBuffers = io_uring_op_IORING_OP_PROVIDE_BUFFERS,
222 RemoveBuffers = io_uring_op_IORING_OP_REMOVE_BUFFERS,
223 Tee = io_uring_op_IORING_OP_TEE,
224 Shutdown = io_uring_op_IORING_OP_SHUTDOWN,
225 Renameat = io_uring_op_IORING_OP_RENAMEAT,
226 Unlinkat = io_uring_op_IORING_OP_UNLINKAT,
227 Mkdirat = io_uring_op_IORING_OP_MKDIRAT,
228 Symlinkat = io_uring_op_IORING_OP_SYMLINKAT,
229 Linkat = io_uring_op_IORING_OP_LINKAT,
230}
231
232#[derive(Default)]
234pub struct URingAllowlist(Vec<io_uring_restriction>);
235
236impl URingAllowlist {
237 pub fn new() -> Self {
239 URingAllowlist::default()
240 }
241
242 pub fn allow_submit_operation(&mut self, operation: URingOperation) -> &mut Self {
244 self.0.push(io_uring_restriction {
245 opcode: io_uring_register_restriction_op_IORING_RESTRICTION_SQE_OP as u16,
246 __bindgen_anon_1: io_uring_restriction__bindgen_ty_1 {
247 sqe_op: operation as u8,
248 },
249 ..Default::default()
250 });
251 self
252 }
253}
254
255pub struct URingContext {
279 ring_file: File, pub submit_ring: Mutex<SubmitQueue>,
281 pub complete_ring: CompleteQueueState,
282}
283
284impl URingContext {
285 pub fn new(num_entries: usize, allowlist: Option<&URingAllowlist>) -> Result<URingContext> {
289 let mut ring_params = io_uring_params::default();
290 if allowlist.is_some() {
291 ring_params.flags |= IORING_SETUP_R_DISABLED;
293 }
294
295 unsafe {
301 let fd = io_uring_setup(num_entries, &ring_params).map_err(Error::Setup)?;
304 let ring_file = File::from_raw_fd(fd);
305
306 if let Some(restrictions) = allowlist {
308 io_uring_register(
311 fd,
312 io_uring_register_op_IORING_REGISTER_RESTRICTIONS,
313 restrictions.0.as_ptr() as *const c_void,
314 restrictions.0.len() as u32,
315 )
316 .map_err(Error::RingRegister)?;
317
318 io_uring_register(
321 fd,
322 io_uring_register_op_IORING_REGISTER_ENABLE_RINGS,
323 null::<c_void>(),
324 0,
325 )
326 .map_err(Error::RingRegister)?;
327 }
328
329 let submit_ring = SubmitQueueState::new(
333 MemoryMappingBuilder::new(
334 ring_params.sq_off.array as usize
335 + ring_params.sq_entries as usize * std::mem::size_of::<u32>(),
336 )
337 .from_file(&ring_file)
338 .offset(u64::from(IORING_OFF_SQ_RING))
339 .protection(Protection::read_write())
340 .populate()
341 .build()
342 .map_err(Error::MappingSubmitRing)?,
343 &ring_params,
344 );
345
346 let num_sqe = ring_params.sq_entries as usize;
347 let submit_queue_entries = SubmitQueueEntries {
348 mmap: MemoryMappingBuilder::new(
349 ring_params.sq_entries as usize * std::mem::size_of::<io_uring_sqe>(),
350 )
351 .from_file(&ring_file)
352 .offset(u64::from(IORING_OFF_SQES))
353 .protection(Protection::read_write())
354 .populate()
355 .build()
356 .map_err(Error::MappingSubmitEntries)?,
357 len: num_sqe,
358 };
359
360 let complete_ring = CompleteQueueState::new(
361 MemoryMappingBuilder::new(
362 ring_params.cq_off.cqes as usize
363 + ring_params.cq_entries as usize * std::mem::size_of::<io_uring_cqe>(),
364 )
365 .from_file(&ring_file)
366 .offset(u64::from(IORING_OFF_CQ_RING))
367 .protection(Protection::read_write())
368 .populate()
369 .build()
370 .map_err(Error::MappingCompleteRing)?,
371 &ring_params,
372 );
373
374 Ok(URingContext {
375 ring_file,
376 submit_ring: Mutex::new(SubmitQueue {
377 submit_ring,
378 submit_queue_entries,
379 submitting: 0,
380 added: 0,
381 num_sqes: ring_params.sq_entries as usize,
382 }),
383 complete_ring,
384 })
385 }
386 }
387
388 pub unsafe fn add_writev_iter<I>(
392 &self,
393 iovecs: I,
394 fd: RawFd,
395 offset: Option<u64>,
396 user_data: UserData,
397 ) -> Result<()>
398 where
399 I: Iterator<Item = libc::iovec>,
400 {
401 self.add_writev(
402 Pin::from(
403 iovecs
407 .map(|iov| IoBufMut::from_raw_parts(iov.iov_base as *mut u8, iov.iov_len))
408 .collect::<Vec<_>>()
409 .into_boxed_slice(),
410 ),
411 fd,
412 offset,
413 user_data,
414 )
415 }
416
417 pub unsafe fn add_writev(
426 &self,
427 iovecs: Pin<Box<[IoBufMut<'static>]>>,
428 fd: RawFd,
429 offset: Option<u64>,
430 user_data: UserData,
431 ) -> Result<()> {
432 self.submit_ring.lock().prep_next_sqe(|sqe| {
433 sqe.opcode = io_uring_op_IORING_OP_WRITEV as u8;
434 sqe.set_addr(iovecs.as_ptr() as *const _ as *const libc::c_void as u64);
435 sqe.len = iovecs.len() as u32;
436 sqe.set_off(file_offset_to_raw_offset(offset));
437 sqe.set_buf_index(0);
438 sqe.ioprio = 0;
439 sqe.user_data = user_data;
440 sqe.flags = 0;
441 sqe.fd = fd;
442 })?;
443 self.complete_ring.add_op_data(user_data, iovecs);
444 Ok(())
445 }
446
447 pub unsafe fn add_readv_iter<I>(
451 &self,
452 iovecs: I,
453 fd: RawFd,
454 offset: Option<u64>,
455 user_data: UserData,
456 ) -> Result<()>
457 where
458 I: Iterator<Item = libc::iovec>,
459 {
460 self.add_readv(
461 Pin::from(
462 iovecs
466 .map(|iov| IoBufMut::from_raw_parts(iov.iov_base as *mut u8, iov.iov_len))
467 .collect::<Vec<_>>()
468 .into_boxed_slice(),
469 ),
470 fd,
471 offset,
472 user_data,
473 )
474 }
475
476 pub unsafe fn add_readv(
485 &self,
486 iovecs: Pin<Box<[IoBufMut<'static>]>>,
487 fd: RawFd,
488 offset: Option<u64>,
489 user_data: UserData,
490 ) -> Result<()> {
491 self.submit_ring.lock().prep_next_sqe(|sqe| {
492 sqe.opcode = io_uring_op_IORING_OP_READV as u8;
493 sqe.set_addr(iovecs.as_ptr() as *const _ as *const libc::c_void as u64);
494 sqe.len = iovecs.len() as u32;
495 sqe.set_off(file_offset_to_raw_offset(offset));
496 sqe.set_buf_index(0);
497 sqe.ioprio = 0;
498 sqe.user_data = user_data;
499 sqe.flags = 0;
500 sqe.fd = fd;
501 })?;
502 self.complete_ring.add_op_data(user_data, iovecs);
503 Ok(())
504 }
505
506 pub fn add_nop(&self, user_data: UserData) -> Result<()> {
509 self.submit_ring.lock().prep_next_sqe(|sqe| {
510 sqe.opcode = io_uring_op_IORING_OP_NOP as u8;
511 sqe.fd = -1;
512 sqe.user_data = user_data;
513
514 sqe.set_addr(0);
515 sqe.len = 0;
516 sqe.set_off(0);
517 sqe.set_buf_index(0);
518 sqe.set_rw_flags(0);
519 sqe.ioprio = 0;
520 sqe.flags = 0;
521 })
522 }
523
524 pub fn add_fsync(&self, fd: RawFd, user_data: UserData) -> Result<()> {
527 self.submit_ring.lock().prep_next_sqe(|sqe| {
528 sqe.opcode = io_uring_op_IORING_OP_FSYNC as u8;
529 sqe.fd = fd;
530 sqe.user_data = user_data;
531
532 sqe.set_addr(0);
533 sqe.len = 0;
534 sqe.set_off(0);
535 sqe.set_buf_index(0);
536 sqe.set_rw_flags(0);
537 sqe.ioprio = 0;
538 sqe.flags = 0;
539 })
540 }
541
542 pub fn add_fallocate(
544 &self,
545 fd: RawFd,
546 offset: u64,
547 len: u64,
548 mode: u32,
549 user_data: UserData,
550 ) -> Result<()> {
551 self.submit_ring.lock().prep_next_sqe(|sqe| {
554 sqe.opcode = io_uring_op_IORING_OP_FALLOCATE as u8;
555
556 sqe.fd = fd;
557 sqe.set_addr(len);
558 sqe.len = mode;
559 sqe.set_off(offset);
560 sqe.user_data = user_data;
561
562 sqe.set_buf_index(0);
563 sqe.set_rw_flags(0);
564 sqe.ioprio = 0;
565 sqe.flags = 0;
566 })
567 }
568
569 pub fn add_poll_fd(&self, fd: RawFd, events: EventType, user_data: UserData) -> Result<()> {
575 self.submit_ring.lock().prep_next_sqe(|sqe| {
576 sqe.opcode = io_uring_op_IORING_OP_POLL_ADD as u8;
577 sqe.fd = fd;
578 sqe.user_data = user_data;
579 sqe.set_poll_events(events.into());
580
581 sqe.set_addr(0);
582 sqe.len = 0;
583 sqe.set_off(0);
584 sqe.set_buf_index(0);
585 sqe.ioprio = 0;
586 sqe.flags = 0;
587 })
588 }
589
590 pub fn remove_poll_fd(&self, fd: RawFd, events: EventType, user_data: UserData) -> Result<()> {
592 self.submit_ring.lock().prep_next_sqe(|sqe| {
593 sqe.opcode = io_uring_op_IORING_OP_POLL_REMOVE as u8;
594 sqe.fd = fd;
595 sqe.user_data = user_data;
596 sqe.set_poll_events(events.into());
597
598 sqe.set_addr(0);
599 sqe.len = 0;
600 sqe.set_off(0);
601 sqe.set_buf_index(0);
602 sqe.ioprio = 0;
603 sqe.flags = 0;
604 })
605 }
606
607 pub fn async_cancel(&self, addr: UserData, user_data: UserData) -> Result<()> {
615 self.submit_ring.lock().prep_next_sqe(|sqe| {
616 sqe.opcode = io_uring_op_IORING_OP_ASYNC_CANCEL as u8;
617 sqe.user_data = user_data;
618 sqe.set_addr(addr);
619
620 sqe.len = 0;
621 sqe.fd = 0;
622 sqe.set_off(0);
623 sqe.set_buf_index(0);
624 sqe.ioprio = 0;
625 sqe.flags = 0;
626 })
627 }
628
629 fn enter(&self, wait_nr: u64) -> Result<()> {
632 let added = self.submit_ring.lock().prepare_submit();
633 if added == 0 && wait_nr == 0 {
634 return Ok(());
635 }
636
637 let flags = if wait_nr > 0 {
638 IORING_ENTER_GETEVENTS
639 } else {
640 0
641 };
642 let res =
643 unsafe { io_uring_enter(self.ring_file.as_raw_fd(), added as u64, wait_nr, flags) };
646
647 if res.is_ok() || res == Err(libc::EINTR) {
649 self.submit_ring.lock().complete_submit(added);
650 } else {
651 self.submit_ring.lock().fail_submit(added);
652 }
653
654 match res {
655 Ok(()) => Ok(()),
656 Err(libc::EBUSY) | Err(libc::EINTR) if wait_nr != 0 => {
660 loop {
661 let res =
662 unsafe { io_uring_enter(self.ring_file.as_raw_fd(), 0, wait_nr, flags) };
665 if res != Err(libc::EINTR) {
666 return res.map_err(Error::RingEnter);
667 }
668 }
669 }
670 Err(e) => Err(Error::RingEnter(e)),
671 }
672 }
673
674 pub fn submit(&self) -> Result<()> {
676 self.enter(0)
677 }
678
679 pub fn wait(&self) -> Result<impl Iterator<Item = (UserData, std::io::Result<u32>)> + '_> {
684 let wait_nr = if self.complete_ring.num_ready() > 0 {
686 0
687 } else {
688 1
689 };
690
691 match self.enter(wait_nr) {
693 Ok(()) => Ok(&self.complete_ring),
694 Err(Error::RingEnter(libc::EBUSY)) => Ok(&self.complete_ring),
698 Err(e) => Err(e),
699 }
700 }
701}
702
703impl AsRawFd for URingContext {
704 fn as_raw_fd(&self) -> RawFd {
705 self.ring_file.as_raw_fd()
706 }
707}
708
709impl AsRawDescriptor for URingContext {
710 fn as_raw_descriptor(&self) -> RawDescriptor {
711 self.ring_file.as_raw_descriptor()
712 }
713}
714
715struct SubmitQueueEntries {
716 mmap: MemoryMapping,
717 len: usize,
718}
719
720impl SubmitQueueEntries {
721 fn get_mut(&mut self, index: usize) -> Option<&mut io_uring_sqe> {
722 if index >= self.len {
723 return None;
724 }
725 let mut_ref = unsafe { &mut *(self.mmap.as_ptr() as *mut io_uring_sqe).add(index) };
729 *mut_ref = io_uring_sqe::default();
731 Some(mut_ref)
732 }
733}
734
735struct SubmitQueueState {
736 _mmap: MemoryMapping,
737 pointers: QueuePointers,
738 ring_mask: u32,
739 array: AtomicPtr<u32>,
740}
741
742impl SubmitQueueState {
743 unsafe fn new(mmap: MemoryMapping, params: &io_uring_params) -> SubmitQueueState {
747 let ptr = mmap.as_ptr();
748 let head = ptr.add(params.sq_off.head as usize) as *const AtomicU32;
751 let tail = ptr.add(params.sq_off.tail as usize) as *const AtomicU32;
752 let ring_mask = mmap.read_obj(params.sq_off.ring_mask as usize).unwrap();
754 let array = AtomicPtr::new(ptr.add(params.sq_off.array as usize) as *mut u32);
755 SubmitQueueState {
756 _mmap: mmap,
757 pointers: QueuePointers { head, tail },
758 ring_mask,
759 array,
760 }
761 }
762
763 fn set_array_entry(&self, index: usize, value: u32) {
765 unsafe {
769 std::ptr::write_volatile(self.array.load(Ordering::Relaxed).add(index), value);
770 }
771 }
772}
773
774#[derive(Default)]
775struct CompleteQueueData {
776 pending_op_addrs: BTreeMap<UserData, Pin<Box<[IoBufMut<'static>]>>>,
779}
780
781pub struct CompleteQueueState {
782 mmap: MemoryMapping,
783 pointers: QueuePointers,
784 ring_mask: u32,
785 cqes_offset: u32,
786 data: Mutex<CompleteQueueData>,
787}
788
789impl CompleteQueueState {
790 unsafe fn new(mmap: MemoryMapping, params: &io_uring_params) -> CompleteQueueState {
794 let ptr = mmap.as_ptr();
795 let head = ptr.add(params.cq_off.head as usize) as *const AtomicU32;
796 let tail = ptr.add(params.cq_off.tail as usize) as *const AtomicU32;
797 let ring_mask = mmap.read_obj(params.cq_off.ring_mask as usize).unwrap();
798 CompleteQueueState {
799 mmap,
800 pointers: QueuePointers { head, tail },
801 ring_mask,
802 cqes_offset: params.cq_off.cqes,
803 data: Default::default(),
804 }
805 }
806
807 fn add_op_data(&self, user_data: UserData, addrs: Pin<Box<[IoBufMut<'static>]>>) {
808 self.data.lock().pending_op_addrs.insert(user_data, addrs);
809 }
810
811 fn get_cqe(&self, head: u32) -> &io_uring_cqe {
812 unsafe {
816 let cqes = (self.mmap.as_ptr() as *const u8).add(self.cqes_offset as usize)
817 as *const io_uring_cqe;
818
819 let index = head & self.ring_mask;
820
821 &*cqes.add(index as usize)
822 }
823 }
824
825 pub fn num_ready(&self) -> u32 {
826 let tail = self.pointers.tail(Ordering::Acquire);
827 let head = self.pointers.head(Ordering::Relaxed);
828
829 tail.saturating_sub(head)
830 }
831
832 fn pop_front(&self) -> Option<(UserData, std::io::Result<u32>)> {
833 let mut data = self.data.lock();
836
837 let head = self.pointers.head(Ordering::Relaxed);
840
841 if head == self.pointers.tail(Ordering::Acquire) {
843 return None;
844 }
845
846 let cqe = self.get_cqe(head);
847 let user_data = cqe.user_data;
848 let res = cqe.res;
849
850 let _ = data.pending_op_addrs.remove(&user_data);
852
853 let new_head = head.wrapping_add(1);
856 self.pointers.set_head(new_head);
857
858 let io_res = match res {
859 r if r < 0 => Err(std::io::Error::from_raw_os_error(-r)),
860 r => Ok(r as u32),
861 };
862 Some((user_data, io_res))
863 }
864}
865
866impl Iterator for &CompleteQueueState {
868 type Item = (UserData, std::io::Result<u32>);
869
870 fn next(&mut self) -> Option<Self::Item> {
871 self.pop_front()
872 }
873}
874
875struct QueuePointers {
876 head: *const AtomicU32,
877 tail: *const AtomicU32,
878}
879
880unsafe impl Send for QueuePointers {}
884unsafe impl Sync for QueuePointers {}
886
887impl QueuePointers {
888 fn tail(&self, ordering: Ordering) -> u32 {
890 unsafe { (*self.tail).load(ordering) }
894 }
895
896 fn set_tail(&self, next_tail: u32) {
900 unsafe { (*self.tail).store(next_tail, Ordering::Release) }
904 }
905
906 fn head(&self, ordering: Ordering) -> u32 {
908 unsafe { (*self.head).load(ordering) }
912 }
913
914 fn set_head(&self, next_head: u32) {
918 unsafe { (*self.head).store(next_head, Ordering::Release) }
922 }
923}