1
0
Fork 0
mirror of https://github.com/ii64/gouring.git synced 2025-04-01 03:41:44 +02:00

chore: sync liburing

* support for sqe128, cqe32
* using unsafe.Add
* compile-time size guard/check
* added support for uring_cmd
This commit is contained in:
Xeffy Chen 2023-04-16 03:56:10 +07:00
parent bd50af5d21
commit 0e7d4faaff
Signed by: Xeffy
GPG key ID: E41C08AD390E7C49
6 changed files with 174 additions and 82 deletions

53
hdr.go
View file

@ -20,6 +20,12 @@ type IoUringSqe_Union1 uint64
func (u *IoUringSqe_Union1) SetOffset(v uint64) { *u = IoUringSqe_Union1(v) }
func (u *IoUringSqe_Union1) SetOffset_RawPtr(v unsafe.Pointer) { *u = IoUringSqe_Union1((uintptr)(v)) }
func (u *IoUringSqe_Union1) SetAddr2(v uint64) { *u = IoUringSqe_Union1(v) }
func (u *IoUringSqe_Union1) SetCmdOp(v uint32) {
(*struct {
CmdOp uint32
__pad1 uint32
})(unsafe.Pointer(u)).CmdOp = v
}
type IoUringSqe_Union2 uint64
@ -47,6 +53,7 @@ func (u *IoUringSqe_Union3) SetUnlinkFlags(v uint32) { *u = IoUringSqe_Union3
func (u *IoUringSqe_Union3) SetHardlinkFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetXattrFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetMsgRingFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetUringCmdFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetOpFlags(v uint32) { *u = IoUringSqe_Union3(v) } //generic
func (u IoUringSqe_Union3) GetOpFlags() uint32 { return uint32(u) } //generic
@ -67,17 +74,13 @@ func (u *IoUringSqe_Union5) SetAddrLen(v uint16) {
type IoUringSqe_Union6 [2]uint64
func (u *IoUringSqe_Union6) SetAddr3(v uint64) {
u[0] = v
}
func (u *IoUringSqe_Union6) SetAddr3(v uint64) { u[0] = v }
/*
* If the ring is initialized with IORING_SETUP_SQE128, then
* this field is used for 80 bytes of arbitrary command data
*/
func (u *IoUringSqe_Union6) GetCmd() *byte {
return (*byte)(unsafe.Pointer(u))
}
func (u *IoUringSqe_Union6) GetCmd() unsafe.Pointer { return unsafe.Pointer(u) }
type IoUringSqe struct {
Opcode IoUringOp /* type of operation for this sqe */
@ -88,6 +91,10 @@ type IoUringSqe struct {
// union {
// __u64 off; /* offset into file */
// __u64 addr2;
// struct {
// __u32 cmd_op;
// __u32 __pad1;
// };
// };
IoUringSqe_Union1
@ -117,7 +124,8 @@ type IoUringSqe struct {
// __u32 unlink_flags;
// __u32 hardlink_flags;
// __u32 xattr_flags;
// __u32 msg_ring_flags;
// __u32 msg_ring_flags;
// __u32 uring_cmd_flags;
// };
IoUringSqe_Union3
@ -300,6 +308,13 @@ const (
IORING_OP_LAST
)
/*
* sqe->uring_cmd_flags
* IORING_URING_CMD_FIXED use registered buffer; pass thig flag
* along with setting sqe->buf_index.
*/
const IORING_URING_CMD_FIXED = (1 << 0)
/*
* sqe->fsync_flags
*/
@ -375,9 +390,11 @@ const (
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
* the buf_index field.
*/
const IORING_RECVSEND_POLL_FIRST = (1 << 0)
const IORING_RECV_MULTISHOT = (1 << 1)
const IORING_RECVSEND_FIXED_BUF = (1 << 2)
const (
IORING_RECVSEND_POLL_FIRST = (1 << 0)
IORING_RECV_MULTISHOT = (1 << 1)
IORING_RECVSEND_FIXED_BUF = (1 << 2)
)
/*
* accept flags stored in sqe->ioprio
@ -595,7 +612,7 @@ const (
type IoUringFilesUpdate struct {
Offset uint32
resv uint32
Fds uint64 // __aligned_u64/* __s32 * */
Fds uint64 // TODO: __aligned_u64/* __s32 * */
}
/*
@ -608,21 +625,21 @@ type IoUringRsrcRegister struct {
Nr uint32
Flags uint32
resv2 uint64
Data uint64 // __aligned_u64
Tags uint64 // __aligned_u64
Data uint64 // TODO: __aligned_u64
Tags uint64 // TODO: __aligned_u64
}
type IoUringRsrcUpdate struct {
Offset uint32
resv uint32
Data uint64 // __aligned_u64
Data uint64 // TODO: __aligned_u64
}
type IoUringRsrcUpdate2 struct {
Offset uint32
resv uint32
Data uint64 // __aligned_u64
Tags uint64 // __aligned_u64
Data uint64 // TODO: __aligned_u64
Tags uint64 // TODO: __aligned_u64
Nr uint32
resv2 uint32
}
@ -658,7 +675,7 @@ type IoUringProbe struct {
resv uint16
resv2 [3]uint32
// TODO: FAM access.
// IMPLEMENTED ON hdr_extra
// ops [0]IoUringProbeOp
}
@ -693,6 +710,8 @@ type IoUringBufRing struct {
resv3 uint16
Tail uint16
}
// IMPLEMENTED ON hdr_extra
// bufs [0]IoUringBuf
// };
}

View file

@ -5,9 +5,29 @@ import (
)
/*
* GetBigCqe
*
* If the ring is initialized with IORING_SETUP_CQE32, then this field
* contains 16-bytes of padding, doubling the size of the CQE.
*/
func (cqe *IoUringCqe) GetBigCqe() *[2]uint64 {
return (*[2]uint64)(unsafe.Pointer(uintptr(unsafe.Pointer(cqe)) + SizeofIoUringSqe))
func (cqe *IoUringCqe) GetBigCqe() unsafe.Pointer {
return unsafe.Add(unsafe.Pointer(cqe), SizeofIoUringCqe)
}
/*
* GetOps
*
* Get io_uring probe ops
*/
func (probe *IoUringProbe) GetOps() unsafe.Pointer {
return unsafe.Add(unsafe.Pointer(probe), SizeofIoUringProbe)
}
/*
* GetBufs
*
* Get io_uring buf_ring bufs
*/
func (bring *IoUringBufRing) GetBufs() unsafe.Pointer {
return unsafe.Add(unsafe.Pointer(bring), SizeofIoUringBufRing)
}

View file

@ -9,8 +9,21 @@ const (
Align128IoUringSqe = 64
SizeofIoUringCqe = unsafe.Sizeof(IoUringCqe{})
Align32IoUringCqe = SizeofIoUringCqe
SizeofIoUringProbe = unsafe.Sizeof(IoUringProbe{})
SizeofIoUringProbeOp = unsafe.Sizeof(IoUringProbeOp{})
SizeofIoUringBufRing = unsafe.Sizeof(IoUringBufRing{})
)
func _SizeChecker() {
var x [1]struct{}
_ = x[SizeofIoUringSqe-64]
_ = x[SizeofIoUringCqe-16]
_ = x[SizeofIoUringProbe-16]
_ = x[SizeofIoUringProbeOp-8]
_ = x[SizeofIoUringBufRing-16]
}
type IoUring struct {
Sq IoUringSq
Cq IoUringCq
@ -26,12 +39,12 @@ type IoUring struct {
}
type IoUringSq struct {
head unsafe.Pointer // *uint32
tail unsafe.Pointer // *uint32
ringMask unsafe.Pointer // *uint32
ringEntries unsafe.Pointer // *uint32
flags unsafe.Pointer // *uint32
dropped unsafe.Pointer // *uint32
khead unsafe.Pointer // *uint32
ktail unsafe.Pointer // *uint32
kringMask unsafe.Pointer // *uint32
kringEntries unsafe.Pointer // *uint32
kflags unsafe.Pointer // *uint32
kdropped unsafe.Pointer // *uint32
Array uint32Array //ptr arith
Sqes ioUringSqeArray //ptr arith
@ -42,35 +55,39 @@ type IoUringSq struct {
RingSz uint32
RingPtr unsafe.Pointer
pad [4]uint32
RingMask, RingEntries uint32
pad [2]uint32
}
func (sq *IoUringSq) _Head() *uint32 { return (*uint32)(sq.head) }
func (sq *IoUringSq) _Tail() *uint32 { return (*uint32)(sq.tail) }
func (sq *IoUringSq) _RingMask() *uint32 { return (*uint32)(sq.ringMask) }
func (sq *IoUringSq) _RingEntries() *uint32 { return (*uint32)(sq.ringEntries) }
func (sq *IoUringSq) _Flags() *uint32 { return (*uint32)(sq.flags) }
func (sq *IoUringSq) _Dropped() *uint32 { return (*uint32)(sq.dropped) }
func (sq *IoUringSq) _KHead() *uint32 { return (*uint32)(sq.khead) }
func (sq *IoUringSq) _KTail() *uint32 { return (*uint32)(sq.ktail) }
func (sq *IoUringSq) _KRingMask() *uint32 { return (*uint32)(sq.kringMask) }
func (sq *IoUringSq) _KRingEntries() *uint32 { return (*uint32)(sq.kringEntries) }
func (sq *IoUringSq) _KFlags() *uint32 { return (*uint32)(sq.kflags) }
func (sq *IoUringSq) _KDropped() *uint32 { return (*uint32)(sq.kdropped) }
type IoUringCq struct {
head unsafe.Pointer // *uint32
tail unsafe.Pointer // *uint32
ringMask unsafe.Pointer // *uint32
ringEntries unsafe.Pointer // *uint32
flags unsafe.Pointer // *uint32
overflow unsafe.Pointer // *uint32
khead unsafe.Pointer // *uint32
ktail unsafe.Pointer // *uint32
kringMask unsafe.Pointer // *uint32
kringEntries unsafe.Pointer // *uint32
kflags unsafe.Pointer // *uint32
koverflow unsafe.Pointer // *uint32
Cqes ioUringCqeArray //ptr arith
RingSz uint32
RingPtr unsafe.Pointer
pad [4]uint32
RingMask, RingEntries uint32
pad [2]uint32
}
func (cq *IoUringCq) _Head() *uint32 { return (*uint32)(cq.head) }
func (cq *IoUringCq) _Tail() *uint32 { return (*uint32)(cq.tail) }
func (cq *IoUringCq) _RingMask() *uint32 { return (*uint32)(cq.ringMask) }
func (cq *IoUringCq) _RingEntries() *uint32 { return (*uint32)(cq.ringEntries) }
func (cq *IoUringCq) _Flags() *uint32 { return (*uint32)(cq.flags) }
func (cq *IoUringCq) _Overflow() *uint32 { return (*uint32)(cq.overflow) }
func (cq *IoUringCq) _KHead() *uint32 { return (*uint32)(cq.khead) }
func (cq *IoUringCq) _KTail() *uint32 { return (*uint32)(cq.ktail) }
func (cq *IoUringCq) _KRingMask() *uint32 { return (*uint32)(cq.kringMask) }
func (cq *IoUringCq) _KRingEntries() *uint32 { return (*uint32)(cq.kringEntries) }
func (cq *IoUringCq) _KFlags() *uint32 { return (*uint32)(cq.kflags) }
func (cq *IoUringCq) _KOverflow() *uint32 { return (*uint32)(cq.koverflow) }

View file

@ -22,7 +22,7 @@ func (ring *IoUring) sq_ring_needs_enter(flags *uint32) bool {
// FIXME: Extra call - no inline asm.
io_uring_smp_mb()
if atomic.LoadUint32(ring.Sq._Flags())&IORING_SQ_NEED_WAKEUP != 0 {
if atomic.LoadUint32(ring.Sq._KFlags())&IORING_SQ_NEED_WAKEUP != 0 {
*flags |= IORING_ENTER_SQ_WAKEUP
return true
}
@ -30,7 +30,7 @@ func (ring *IoUring) sq_ring_needs_enter(flags *uint32) bool {
}
func (ring *IoUring) cq_ring_needs_flush() bool {
return atomic.LoadUint32(ring.Sq._Flags())&(IORING_SQ_CQ_OVERFLOW|IORING_SQ_TASKRUN) != 0
return atomic.LoadUint32(ring.Sq._KFlags())&(IORING_SQ_CQ_OVERFLOW|IORING_SQ_TASKRUN) != 0
}
func (ring *IoUring) cq_ring_needs_enter() bool {
@ -119,8 +119,8 @@ func (ring *IoUring) io_uring_peek_batch_cqe(cqes []*IoUringCqe, count uint32) u
again:
ready = ring.io_uring_cq_ready()
if ready > 0 {
var head = *ring.Cq._Head()
var mask = *ring.Cq._RingMask()
var head = *ring.Cq._KHead()
var mask = *ring.Cq._KRingMask()
var last uint32
if count > ready {
count = ready
@ -159,8 +159,8 @@ done:
*/
func (ring *IoUring) __io_uring_flush_sq() uint32 {
sq := &ring.Sq
var mask = *sq._RingMask()
var ktail = *sq._Tail()
var mask = *sq._KRingMask()
var ktail = *sq._KTail()
var toSubmit = sq.SqeTail - sq.SqeHead
if toSubmit < 1 {
@ -180,7 +180,7 @@ func (ring *IoUring) __io_uring_flush_sq() uint32 {
* Ensure that the kernel sees the SQE updates before it sees the tail
* update.
*/
atomic.StoreUint32(sq._Tail(), ktail)
atomic.StoreUint32(sq._KTail(), ktail)
out:
/*
@ -194,7 +194,7 @@ out:
* we can submit. The point is, we need to be able to deal with this
* situation regardless of any perceived atomicity.
*/
return ktail - *sq._Head()
return ktail - *sq._KHead()
}
/*
@ -357,7 +357,7 @@ func (ring *IoUring) io_uring_get_sqe() *IoUringSqe {
*/
func (ring *IoUring) _io_uring_get_sqe() (sqe *IoUringSqe) {
sq := &ring.Sq
var head = atomic.LoadUint32(sq._Head())
var head = atomic.LoadUint32(sq._KHead())
var next = sq.SqeTail + 1
var shift uint32 = 0
@ -365,8 +365,8 @@ func (ring *IoUring) _io_uring_get_sqe() (sqe *IoUringSqe) {
shift = 1
}
if next-head <= *sq._RingEntries() {
sqe = ioUringSqeArray_Index(sq.Sqes, uintptr((sq.SqeTail&*sq._RingMask())<<shift))
if next-head <= *sq._KRingEntries() {
sqe = ioUringSqeArray_Index(sq.Sqes, uintptr((sq.SqeTail&*sq._KRingMask())<<shift))
sq.SqeTail = next
return
}
@ -376,7 +376,7 @@ func (ring *IoUring) _io_uring_get_sqe() (sqe *IoUringSqe) {
}
func (ring *IoUring) io_uring_cq_ready() uint32 {
return atomic.LoadUint32(ring.Cq._Tail()) - *ring.Cq._Head()
return atomic.LoadUint32(ring.Cq._KTail()) - *ring.Cq._KHead()
}
func (ring *IoUring) __io_uring_peek_cqe(cqePtr **IoUringCqe, nrAvail *uint32) error {
@ -384,7 +384,7 @@ func (ring *IoUring) __io_uring_peek_cqe(cqePtr **IoUringCqe, nrAvail *uint32) e
var err int32 = 0
var avail int
var mask = *ring.Cq._RingMask()
var mask = *ring.Cq._KRingMask()
var shift uint32 = 0
if ring.Flags&IORING_SETUP_CQE32 != 0 {
@ -392,8 +392,8 @@ func (ring *IoUring) __io_uring_peek_cqe(cqePtr **IoUringCqe, nrAvail *uint32) e
}
for {
var tail = atomic.LoadUint32(ring.Cq._Tail())
var head = *ring.Cq._Head()
var tail = atomic.LoadUint32(ring.Cq._KTail())
var head = *ring.Cq._KHead()
cqe = nil
avail = int(tail - head)
@ -431,7 +431,7 @@ func (ring *IoUring) __io_uring_peek_cqe(cqePtr **IoUringCqe, nrAvail *uint32) e
func (ring *IoUring) io_uring_cq_advance(nr uint32) {
if nr > 0 {
atomic.StoreUint32(ring.Cq._Head(), *ring.Cq._Head()+nr)
atomic.StoreUint32(ring.Cq._KHead(), *ring.Cq._KHead()+nr)
}
}

View file

@ -20,6 +20,14 @@ func io_uring_queue_init_params(entries uint32, ring *IoUring, p *IoUringParams)
if err != nil {
return err
}
// Directly map SQ slots to SQEs
sqArray := ring.Sq.Array
sqEntries := *ring.Sq._KRingEntries()
var index uint32
for index = 0; index < sqEntries; index++ {
*uint32Array_Index(sqArray, uintptr(index)) = index
}
ring.Features = p.Features
return nil
}
@ -31,7 +39,7 @@ func (ring *IoUring) io_uring_queue_exit() {
if ring.Flags&IORING_SETUP_SQE128 != 0 {
sqeSize += Align128IoUringSqe
}
munmap(unsafe.Pointer(sq.Sqes), sqeSize*uintptr(*sq._RingEntries()))
munmap(unsafe.Pointer(sq.Sqes), sqeSize*uintptr(*sq._KRingEntries()))
io_uring_unmap_rings(sq, cq)
/*
* Not strictly required, but frees up the slot we used now rather
@ -67,7 +75,7 @@ func io_uring_mmap(fd int, p *IoUringParams, sq *IoUringSq, cq *IoUringCq) (err
if cq.RingSz > sq.RingSz {
sq.RingSz = cq.RingSz
}
// cq.RingSz = sq.RingSz
cq.RingSz = sq.RingSz
}
// alloc sq ring
sq.RingPtr, err = mmap(nil, uintptr(sq.RingSz),
@ -94,13 +102,13 @@ func io_uring_mmap(fd int, p *IoUringParams, sq *IoUringSq, cq *IoUringCq) (err
}
//sq
sq.head = (unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.Head)))
sq.tail = (unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.Tail)))
sq.ringMask = (unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.RingMask)))
sq.ringEntries = (unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.RingEntries)))
sq.flags = (unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.Flags)))
sq.dropped = (unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.Dropped)))
sq.Array = (uint32Array)(unsafe.Pointer(uintptr(sq.RingPtr) + uintptr(p.SqOff.Array)))
sq.khead = unsafe.Add(sq.RingPtr, p.SqOff.Head)
sq.ktail = unsafe.Add(sq.RingPtr, p.SqOff.Tail)
sq.kringMask = unsafe.Add(sq.RingPtr, p.SqOff.RingMask)
sq.kringEntries = unsafe.Add(sq.RingPtr, p.SqOff.RingEntries)
sq.kflags = unsafe.Add(sq.RingPtr, p.SqOff.Flags)
sq.kdropped = unsafe.Add(sq.RingPtr, p.SqOff.Dropped)
sq.Array = (uint32Array)(unsafe.Add(sq.RingPtr, p.SqOff.Array))
size = SizeofIoUringSqe
if p.Flags&IORING_SETUP_SQE128 != 0 {
@ -119,15 +127,21 @@ func io_uring_mmap(fd int, p *IoUringParams, sq *IoUringSq, cq *IoUringCq) (err
sq.Sqes = (ioUringSqeArray)(sqeAddr)
//cq
cq.head = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.Head)))
cq.tail = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.Tail)))
cq.ringMask = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.RingMask)))
cq.ringEntries = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.RingEntries)))
cq.overflow = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.Overflow)))
cq.Cqes = (ioUringCqeArray)(unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.Cqes)))
cq.khead = unsafe.Add(cq.RingPtr, p.CqOff.Head)
cq.ktail = unsafe.Add(cq.RingPtr, p.CqOff.Tail)
cq.kringMask = unsafe.Add(cq.RingPtr, p.CqOff.RingMask)
cq.kringEntries = unsafe.Add(cq.RingPtr, p.CqOff.RingEntries)
cq.koverflow = unsafe.Add(cq.RingPtr, p.CqOff.Overflow)
cq.Cqes = (ioUringCqeArray)(unsafe.Add(cq.RingPtr, p.CqOff.Cqes))
if p.CqOff.Flags != 0 {
cq.flags = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.Flags)))
cq.kflags = (unsafe.Pointer(uintptr(cq.RingPtr) + uintptr(p.CqOff.Flags)))
}
sq.RingMask = *sq._KRingMask()
sq.RingEntries = *sq._KRingEntries()
cq.RingMask = *cq._KRingMask()
cq.RingEntries = *cq._KRingEntries()
return nil
}
@ -138,3 +152,17 @@ func io_uring_unmap_rings(sq *IoUringSq, cq *IoUringCq) error {
}
return nil
}
func io_uring_get_probe_ring(ring *IoUring) (probe *IoUringProbe) {
// len := SizeofIoUringProbe + 256*SizeofIouringProbeOp
probe = new(IoUringProbe)
r := ring.io_uring_register_probe(probe, 256)
if r >= 0 {
return
}
return nil
}
func (ring *IoUring) io_uring_get_probe_ring() (probe *IoUringProbe) {
return io_uring_get_probe_ring(ring)
}

View file

@ -7,29 +7,37 @@ import (
type uint32Array = unsafe.Pointer // *uint32
func uint32Array_Index(u uint32Array, i uintptr) *uint32 {
return (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(u)) + SizeofUint32*i))
return (*uint32)(unsafe.Add(u, SizeofUint32*i))
}
type ioUringSqeArray = unsafe.Pointer // *IoUringSqe
// ioUringSqeArray_Index OR SQE64
func ioUringSqeArray_Index(u ioUringSqeArray, i uintptr) *IoUringSqe {
return (*IoUringSqe)(unsafe.Pointer(uintptr(unsafe.Pointer(u)) + SizeofIoUringSqe*i))
return (*IoUringSqe)(unsafe.Add(u, SizeofIoUringSqe*i))
}
// ioUringSqe128Array_Index OR SQE128
func ioUringSqe128Array_Index(u ioUringSqeArray, i uintptr) *IoUringSqe {
return (*IoUringSqe)(unsafe.Pointer(uintptr(unsafe.Pointer(u)) + (SizeofIoUringSqe+64)*i))
return (*IoUringSqe)(unsafe.Add(u, (SizeofIoUringSqe+Align128IoUringSqe)*i))
}
//
type ioUringCqeArray = unsafe.Pointer // *IoUringCqe
// ioUringCqeArray_Index OR CQE16
func ioUringCqeArray_Index(u ioUringCqeArray, i uintptr) *IoUringCqe {
return (*IoUringCqe)(unsafe.Pointer(uintptr(unsafe.Pointer(u)) + SizeofIoUringCqe*i))
return (*IoUringCqe)(unsafe.Add(u, SizeofIoUringCqe*i))
}
// ioUringCqe32Array_Index OR CQE32
func ioUringCqe32Array_Index(u ioUringCqeArray, i uintptr) *IoUringCqe {
return (*IoUringCqe)(unsafe.Pointer(uintptr(unsafe.Pointer(u)) + (SizeofIoUringCqe+SizeofIoUringCqe)*i))
return (*IoUringCqe)(unsafe.Add(u, (SizeofIoUringCqe+Align32IoUringCqe)*i))
}
//