1
0
Fork 0
mirror of https://github.com/ii64/gouring.git synced 2025-04-01 03:41:44 +02:00

feat: sync liburing

revision: 1c469ee303d0f458a
This commit is contained in:
Xeffy Chen 2022-10-13 22:07:26 +07:00
parent 8d37b05bc1
commit 527c11cde6
Signed by: Xeffy
GPG key ID: E41C08AD390E7C49
4 changed files with 341 additions and 33 deletions

157
hdr.go
View file

@ -7,7 +7,10 @@
*/
package gouring
import "unsafe"
import (
"syscall"
"unsafe"
)
/*
* IO submission data structure (Submission Queue Entry)
@ -26,6 +29,7 @@ func (u *IoUringSqe_Union2) SetSpliceOffsetIn(v uint64) { *u = IoUringSqe_Union2
type IoUringSqe_Union3 uint32
func (u *IoUringSqe_Union3) SetRwFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetFsyncFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetPollEvents(v uint16) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetPoll32Events(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetSyncRangeFlags(v uint32) { *u = IoUringSqe_Union3(v) }
@ -41,6 +45,7 @@ func (u *IoUringSqe_Union3) SetRenameFlags(v uint32) { *u = IoUringSqe_Union3
func (u *IoUringSqe_Union3) SetUnlinkFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetHardlinkFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetXattrFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetMsgRingFlags(v uint32) { *u = IoUringSqe_Union3(v) }
func (u *IoUringSqe_Union3) SetOpFlags(v uint32) { *u = IoUringSqe_Union3(v) } //generic
func (u IoUringSqe_Union3) GetOpFlags() uint32 { return uint32(u) } //generic
@ -53,6 +58,25 @@ type IoUringSqe_Union5 uint32
func (u *IoUringSqe_Union5) SetSpliceFdIn(v int32) { *u = IoUringSqe_Union5(v) }
func (u *IoUringSqe_Union5) SetFileIndex(v uint32) { *u = IoUringSqe_Union5(v) }
func (u *IoUringSqe_Union5) SetAddrLen(v uint16) {
s := (*[2]uint16)(unsafe.Pointer(u))
s[0] = v // addr_len
// s[1] = 0 // __pad3[1]
}
type IoUringSqe_Union6 [2]uint64
func (u *IoUringSqe_Union6) SetAddr3(v uint64) {
u[0] = v
}
/*
* If the ring is initialized with IORING_SETUP_SQE128, then
* this field is used for 80 bytes of arbitrary command data
*/
func (u *IoUringSqe_Union6) GetCmd() *byte {
return (*byte)(unsafe.Pointer(u))
}
type IoUringSqe struct {
Opcode IoUringOp /* type of operation for this sqe */
@ -92,6 +116,7 @@ type IoUringSqe struct {
// __u32 unlink_flags;
// __u32 hardlink_flags;
// __u32 xattr_flags;
// __u32 msg_ring_flags;
// };
IoUringSqe_Union3
@ -109,14 +134,28 @@ type IoUringSqe struct {
/* personality to use, if used */
Personality uint16
// union {
// __s32 splice_fd_in;
// __u32 file_index;
// };
// union {
// __s32 splice_fd_in;
// __u32 file_index;
// struct {
// __u16 addr_len;
// __u16 __pad3[1];
// };
// };
IoUringSqe_Union5
Addr3 uint64
__pad2 [1]uint64
// union {
// struct {
// __u64 addr3;
// __u64 __pad2[1];
// };
// /*
// * If the ring is initialized with IORING_SETUP_SQE128, then
// * this field is used for 80 bytes of arbitrary command data
// */
// __u8 cmd[0];
// };
IoUringSqe_Union6
}
/*
@ -187,9 +226,19 @@ const IORING_SETUP_COOP_TASKRUN = (1 << 8)
* IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
*/
const IORING_SETUP_TASKRUN_FLAG = (1 << 9)
const IORING_SETUP_SQE128 = (1 << 10) /* SQEs are 128 byte */
const IORING_SETUP_CQE32 = (1 << 11) /* CQEs are 32 byte */
/*
* Only one task is allowed to submit requests
*/
const IORING_SETUP_SINGLE_ISSUER = (1 << 12)
/*
* Defer running task work to get events.
* Rather than running bits of task work whenever the task transitions
* try to do it just before it is needed.
*/
const IORING_SETUP_DEFER_TASKRUN = (1 << 13)
type IoUringOp = uint8
@ -243,6 +292,8 @@ const (
IORING_OP_GETXATTR
IORING_OP_SOCKET
IORING_OP_URING_CMD
IORING_OP_SEND_ZC
IORING_OP_SENDMSG_ZC
/* this goes last, obviously */
IORING_OP_LAST
@ -283,11 +334,14 @@ const SPLICE_F_FD_IN_FIXED = (1 << 31) /* the last bit of __u32 */
*
* IORING_POLL_UPDATE Update existing poll request, matching
* sqe->addr as the old user_data field.
*
* IORING_POLL_LEVEL Level triggered poll.
*/
const (
IORING_POLL_ADD_MULTI = (1 << 0)
IORING_POLL_UPDATE_EVENTS = (1 << 1)
IORING_POLL_UPDATE_USER_DATA = (1 << 2)
IORING_POLL_ADD_LEVEL = (1 << 3)
)
/*
@ -297,11 +351,13 @@ const (
* IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
* request 'user_data'
* IORING_ASYNC_CANCEL_ANY Match any request
* IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
*/
const (
IORING_ASYNC_CANCEL_ALL = (1 << 0)
IORING_ASYNC_CANCEL_FD = (1 << 1)
IORING_ASYNC_CANCEL_ANY = (1 << 2)
IORING_ASYNC_CANCEL_ALL = (1 << 0)
IORING_ASYNC_CANCEL_FD = (1 << 1)
IORING_ASYNC_CANCEL_ANY = (1 << 2)
IORING_ASYNC_CANCEL_FD_FIXED = (1 << 3)
)
/*
@ -311,14 +367,38 @@ const (
* or receive and arm poll if that yields an
* -EAGAIN result, arm poll upfront and skip
* the initial transfer attempt.
* IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if
* the handler will continue to report
* CQEs on behalf of the same SQE.
*
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
* the buf_index field.
*/
const IORING_RECVSEND_POLL_FIRST = (1 << 0)
const IORING_RECV_MULTISHOT = (1 << 1)
const IORING_RECVSEND_FIXED_BUF = (1 << 2)
/*
* accept flags stored in sqe->ioprio
*/
const IORING_ACCEPT_MULTISHOT = (1 << 0)
/*
* IORING_OP_MSG_RING command types, stored in sqe->addr
*/
const (
IORING_MSG_DATA = iota /* pass sqe->len as 'res' and off as user_data */
IORING_MSG_SEND_FD /* send a registered fd to another ring */
)
/*
* IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
*
* IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
* applicable for IORING_MSG_DATA, obviously.
*/
const IORING_MSG_RING_CQE_SKIP = (1 << 0)
/*
* IO completion data structure (Completion Queue Entry)
*/
@ -332,8 +412,6 @@ type IoUringCqe struct {
* contains 16-bytes of padding, doubling the size of the CQE.
*/
// __u64 big_cqe[];
// 8+4+4 == 16 , correct
}
/*
@ -342,12 +420,15 @@ type IoUringCqe struct {
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
* IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
* them from sends.
*/
const (
IORING_CQE_F_BUFFER = (1 << 0)
IORING_CQE_F_MORE = (1 << 1)
IORING_CQE_F_SOCK_NONEMPTY = (1 << 2)
IORING_CQE_F_NOTIF = (1 << 3)
)
const (
@ -493,6 +574,12 @@ const (
IORING_REGISTER_PBUF_RING = 22
IORING_UNREGISTER_PBUF_RING = 23
/* sync cancelation API */
IORING_REGISTER_SYNC_CANCEL = 24
/* register a range of fixed file slots for automatic slot allocation */
IORING_REGISTER_FILE_ALLOC_RANGE = 25
/* this goes last */
IORING_REGISTER_LAST
)
@ -539,6 +626,19 @@ type IoUringRsrcUpdate2 struct {
resv2 uint32
}
type IoUringNotificationSlot struct {
tag uint64
resv [3]uint64
}
type IoUringNotificationRegister struct {
nr_slots uint32
resv uint32
resv2 uint64
data uint64
resv3 uint64
}
/* Skip updating fd indexes set to this value in the fd table */
const IORING_REGISTER_FILES_SKIP = (-2)
@ -556,7 +656,9 @@ type IoUringProbe struct {
uint8 /* length of ops[] array below */
resv uint16
resv2 [3]uint32
ops [0]IoUringProbeOp
// TODO: FAM access.
// ops [0]IoUringProbeOp
}
type IoUringRestriction struct {
@ -630,6 +732,29 @@ type IoUringGeteventsArg struct {
}
/*
* accept flags stored in sqe->ioprio
* Argument for IORING_REGISTER_SYNC_CANCEL
*/
// const IORING_ACCEPT_MULTISHOT = (1 << 0)
type IouringSyncCancelReg struct {
Addr uint64
Fd int32
Flags uint32
timeout syscall.Timespec
pad [4]uint64
}
/*
* Argument for IORING_REGISTER_FILE_ALLOC_RANGE
* The range is specified as [off, off + len)
*/
type IoUringFileIndexRange struct {
Offset uint32
Len uint32
resv uint64
}
type IoUringRecvmsgOut struct {
Namelen uint32
Controllen uint32
Payloadlen uint32
Flags uint32
}

View file

@ -3,10 +3,12 @@ package gouring
import "unsafe"
const (
SizeofUnsigned = unsafe.Sizeof(uint32(0))
SizeofUint32 = unsafe.Sizeof(uint32(0))
SizeofIoUringSqe = unsafe.Sizeof(IoUringSqe{})
SizeofIoUringCqe = unsafe.Sizeof(IoUringCqe{})
SizeofUnsigned = unsafe.Sizeof(uint32(0))
SizeofUint32 = unsafe.Sizeof(uint32(0))
SizeofIoUringSqe = unsafe.Sizeof(IoUringSqe{})
Align128IoUringSqe = 64
SizeofIoUringCqe = unsafe.Sizeof(IoUringCqe{})
Align32IoUringCqe = SizeofIoUringCqe
)
type IoUring struct {

203
prep.go
View file

@ -18,8 +18,7 @@ func PrepRW(op IoUringOp, sqe *IoUringSqe, fd int,
sqe.IoUringSqe_Union4 = 0 // sqe.SetBufIndex(0) // union4
sqe.Personality = 0
sqe.IoUringSqe_Union5 = 0 // sqe.SetFileIndex(0) // union5
sqe.Addr3 = 0
sqe.__pad2[0] = 0
sqe.IoUringSqe_Union6 = IoUringSqe_Union6{}
}
func PrepNop(sqe *IoUringSqe) {
@ -43,7 +42,23 @@ func PrepTimeoutUpdate(sqe *IoUringSqe, ts *syscall.Timespec, userData uint64, f
sqe.SetTimeoutFlags(flags | IORING_TIMEOUT_UPDATE)
}
// ** "Syscall" OP
/*
"Syscall" OP
*/
func PrepSplice(sqe *IoUringSqe, fdIn int, offIn uint64, fdOut int, offOut uint64, nb int, spliceFlags uint32) {
PrepRW(IORING_OP_SPLICE, sqe, fdOut, nil, nb, offOut)
sqe.SetSpliceOffsetIn(offIn)
sqe.SetSpliceFdIn(int32(fdIn))
sqe.SetSpliceFlags(spliceFlags)
}
func PrepTee(sqe *IoUringSqe, fdIn int, fdOut int, nb int, spliceFlags uint32) {
PrepRW(IORING_OP_TEE, sqe, fdOut, nil, nb, 0)
sqe.SetSpliceOffsetIn(0)
sqe.SetSpliceFdIn(int32(fdIn))
sqe.SetSpliceFlags(spliceFlags)
}
func PrepRead(sqe *IoUringSqe, fd int, buf *byte, nb int, offset uint64) {
PrepRW(IORING_OP_READ, sqe, fd, unsafe.Pointer(buf), nb, offset)
@ -59,6 +74,12 @@ func PrepReadv2(sqe *IoUringSqe, fd int,
PrepReadv(sqe, fd, iov, nrVecs, offset)
sqe.SetRwFlags(flags)
}
func PrepReadFixed(sqe *IoUringSqe, fd int,
buf *byte, nb int,
offset uint64, bufIndex uint16) {
PrepRW(IORING_OP_READ_FIXED, sqe, fd, unsafe.Pointer(buf), nb, offset)
sqe.SetBufIndex(bufIndex)
}
func PrepWrite(sqe *IoUringSqe, fd int, buf *byte, nb int, offset uint64) {
PrepRW(IORING_OP_WRITE, sqe, fd, unsafe.Pointer(buf), nb, offset)
@ -74,15 +95,24 @@ func PrepWritev2(sqe *IoUringSqe, fd int,
PrepWritev(sqe, fd, iov, nrVecs, offset)
sqe.SetRwFlags(flags)
}
func PrepWriteFixed(sqe *IoUringSqe, fd int,
buf *byte, nb int,
offset uint64, bufIndex uint16) {
PrepRW(IORING_OP_WRITE_FIXED, sqe, fd, unsafe.Pointer(buf), nb, offset)
sqe.SetBufIndex(bufIndex)
}
func PrepAccept(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz *uintptr, flags uint) {
// *rsaSz = syscall.SizeofSockaddrAny // leave this out to caller?
func PrepAccept(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz *uintptr, flags uint32) {
PrepRW(IORING_OP_ACCEPT, sqe, fd, unsafe.Pointer(rsa), 0, uint64(uintptr(unsafe.Pointer(rsaSz))))
sqe.SetAcceptFlags(uint32(flags))
}
func PrepAcceptDirect(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz *uintptr, flags uint32, fileIndex int) {
PrepAccept(sqe, fd, rsa, rsaSz, flags)
__io_uring_set_target_fixed_file(sqe, uint32(fileIndex))
}
func PrepClose(sqe *IoUringSqe, fd int) {
PrepRW(IORING_OP_CLOSE, sqe, fd, nil, 0, 0)
func PrepConnect(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz uintptr) {
PrepRW(IORING_OP_CONNECT, sqe, fd, unsafe.Pointer(rsa), 0, uint64(rsaSz))
}
func PrepRecvmsg(sqe *IoUringSqe, fd int, msg *syscall.Msghdr, flags uint) {
@ -90,14 +120,165 @@ func PrepRecvmsg(sqe *IoUringSqe, fd int, msg *syscall.Msghdr, flags uint) {
sqe.SetMsgFlags(uint32(flags))
}
func PrepSendmsg(sqe *IoUringSqe, fd int, msg *syscall.Msghdr, flags uint) {
func PrepSendmsg(sqe *IoUringSqe, fd int, msg *syscall.Msghdr, flags uint32) {
PrepRW(IORING_OP_SENDMSG, sqe, fd, unsafe.Pointer(msg), 1, 0)
sqe.SetMsgFlags(uint32(flags))
sqe.SetMsgFlags(flags)
}
func PrepSendmsgZc(sqe *IoUringSqe, fd int, msg *syscall.Msghdr, flags uint32) {
PrepSendmsg(sqe, fd, msg, flags)
sqe.Opcode |= IORING_OP_SENDMSG_ZC
}
// ** Multishot
func PrepClose(sqe *IoUringSqe, fd int) {
PrepRW(IORING_OP_CLOSE, sqe, fd, nil, 0, 0)
}
func PrepCloseDirect(sqe *IoUringSqe, fileIndex uint32) {
PrepClose(sqe, 0)
__io_uring_set_target_fixed_file(sqe, fileIndex)
}
func PrepMultishotAccept(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz *uintptr, flags uint) {
func PrepFilesUpdate(sqe *IoUringSqe, fds []int32, offset int) {
PrepRW(IORING_OP_FILES_UPDATE, sqe, -1, unsafe.Pointer(&fds[0]), len(fds), uint64(offset))
}
// func PrepFallocate(sqe *IoUringSqe, fd int, mode int, offset uint64, length uint64) {
// PrepRW(IORING_OP_FALLOCATE, sqe, fd, )
// }
func PrepOpenat(sqe *IoUringSqe, dfd int, path *byte, flags uint32, mode int) {
PrepRW(IORING_OP_OPENAT, sqe, dfd, unsafe.Pointer(path), mode, 0)
sqe.SetOpenFlags(flags)
}
func PrepOpenatDirect(sqe *IoUringSqe, dfd int, path *byte, flags uint32, mode int, fileIndex uint32) {
PrepOpenat(sqe, dfd, path, flags, mode)
__io_uring_set_target_fixed_file(sqe, fileIndex)
}
func PrepFadvise(sqe *IoUringSqe, fd int, offset uint64, length int, advice uint32) {
PrepRW(IORING_OP_FADVISE, sqe, fd, nil, length, offset)
sqe.SetFadviseAdvice(advice)
}
func PrepMadvise(sqe *IoUringSqe, addr unsafe.Pointer, length int, advice uint32) {
PrepRW(IORING_OP_MADVISE, sqe, -1, addr, length, 0)
sqe.SetFadviseAdvice(advice)
}
func PrepSend(sqe *IoUringSqe, sockfd int, buf *byte, length int, flags uint32) {
PrepRW(IORING_OP_SEND, sqe, sockfd, unsafe.Pointer(buf), length, 0)
sqe.SetMsgFlags(flags)
}
func PrepSendZc(sqe *IoUringSqe, sockfd int, buf *byte, length int, flags uint32, zcFlags uint16) {
PrepRW(IORING_OP_SEND_ZC, sqe, sockfd, unsafe.Pointer(buf), length, 0)
sqe.SetMsgFlags(flags)
sqe.IoPrio = uint16(zcFlags)
}
func PrepSendZcFixed(sqe *IoUringSqe, sockfd int, buf *byte, length int, flags uint32, zcFlags uint16, bufIndex uint16) {
PrepSendZc(sqe, sockfd, buf, length, flags, zcFlags)
sqe.IoPrio |= IORING_RECVSEND_FIXED_BUF
sqe.SetBufIndex(bufIndex)
}
// statx
//send
//recv
//openat2
//openat2Direct
//epollCtl
//provide_buffers
//remove_buffers
//shutdown
//unlinkat
//unlink
//renameat
//rename
//sync_file_range
//mkdirat
//mkdir
//symlinkat
//symlink
//linkat
//link
//msg_ring
//getxattr
//setxattr
//fgetxattr
//fsetxattr
func PrepSocket(sqe *IoUringSqe, domain int, _type int, protocol int, flags uint32) {
PrepRW(IORING_OP_SOCKET, sqe, domain, nil, protocol, uint64(_type))
sqe.SetRwFlags(flags)
}
func PrepSocketDirect(sqe *IoUringSqe, domain int, _type int, protocol int, fileIndex uint32, flags uint32) {
PrepRW(IORING_OP_SOCKET, sqe, domain, nil, protocol, uint64(_type))
sqe.SetRwFlags(flags)
__io_uring_set_target_fixed_file(sqe, fileIndex)
}
func PrepSocketDirectAlloc(sqe *IoUringSqe, domain int, _type int, protocol int, flags uint32) {
PrepRW(IORING_OP_SOCKET, sqe, domain, nil, protocol, uint64(_type))
sqe.SetRwFlags(flags)
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC-1)
}
/*
Poll
*/
func PrepPollAdd(sqe *IoUringSqe, fd int, pollMask uint32) {
PrepRW(IORING_OP_POLL_ADD, sqe, fd, nil, 0, 0)
sqe.SetPoll32Events(pollMask) // TODO: check endiannes
}
func PrepPollMultishot(sqe *IoUringSqe, fd int, pollMask uint32) {
PrepPollAdd(sqe, fd, pollMask)
sqe.Len = IORING_POLL_ADD_MULTI
}
func PrepPollRemove(sqe *IoUringSqe, userdata UserData) {
PrepRW(IORING_OP_POLL_REMOVE, sqe, -1, nil, 0, 0)
sqe.SetAddr(userdata.GetUnsafe())
}
func PrepPollUpdate(sqe *IoUringSqe, oldUserdata UserData, newUserdata UserData, pollMask uint32, flags int) {
PrepRW(IORING_OP_POLL_REMOVE, sqe, -1, nil, flags, newUserdata.GetUint64())
sqe.SetAddr(oldUserdata.GetUnsafe())
sqe.SetPoll32Events(pollMask) // TODO: check endiannes
}
func PrepFsync(sqe *IoUringSqe, fd int, fsyncFlags uint32) {
PrepRW(IORING_OP_FSYNC, sqe, fd, nil, 0, 0)
sqe.SetFsyncFlags(fsyncFlags)
}
func PrepCancel64(sqe *IoUringSqe, ud UserData, flags uint32) {
PrepRW(IORING_OP_ASYNC_CANCEL, sqe, -1, nil, 0, 0)
sqe.SetAddr(ud.GetUnsafe())
sqe.SetCancelFlags(flags)
}
func PrepCancel(sqe *IoUringSqe, ud UserData, flags uint32) {
PrepCancel64(sqe, UserData(ud.GetUintptr()), flags)
}
func PrepCancelFd(sqe *IoUringSqe, fd int, flags uint32) {
PrepRW(IORING_OP_ASYNC_CANCEL, sqe, fd, nil, 0, 0)
sqe.SetCancelFlags(flags | IORING_ASYNC_CANCEL_FD)
}
func PrepLinkTimeout(sqe *IoUringSqe, ts *syscall.Timespec, flags uint32) {
PrepRW(IORING_OP_LINK_TIMEOUT, sqe, -1, unsafe.Pointer(ts), 1, 0)
sqe.SetTimeoutFlags(flags)
}
/*
Multishot
*/
func PrepMultishotAccept(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz *uintptr, flags uint32) {
PrepAccept(sqe, fd, rsa, rsaSz, flags)
sqe.IoPrio |= IORING_ACCEPT_MULTISHOT
}
func PrepMultishotAcceptDirect(sqe *IoUringSqe, fd int, rsa *syscall.RawSockaddrAny, rsaSz *uintptr, flags uint32) {
PrepMultishotAccept(sqe, fd, rsa, rsaSz, flags)
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC-1)
}
//go:nosplit
func __io_uring_set_target_fixed_file(sqe *IoUringSqe, fileIndex uint32) {
sqe.SetFileIndex(fileIndex)
}

View file

@ -29,7 +29,7 @@ func (ring *IoUring) io_uring_queue_exit() {
cq := &ring.Cq
sqeSize := SizeofIoUringSqe
if ring.Flags&IORING_SETUP_SQE128 != 0 {
sqeSize += 64
sqeSize += Align128IoUringSqe
}
munmap(unsafe.Pointer(sq.Sqes), sqeSize*uintptr(*sq._RingEntries()))
io_uring_unmap_rings(sq, cq)
@ -104,7 +104,7 @@ func io_uring_mmap(fd int, p *IoUringParams, sq *IoUringSq, cq *IoUringCq) (err
size = SizeofIoUringSqe
if p.Flags&IORING_SETUP_SQE128 != 0 {
size += 64
size += Align128IoUringSqe
}
var sqeAddr unsafe.Pointer
sqeAddr, err = mmap(nil, size*uintptr(p.SqEntries),