1
0
Fork 0
mirror of https://github.com/ii64/gouring.git synced 2025-04-01 03:41:44 +02:00

feat(): no cqe sqe cache

Signed-off-by: MastahSenpai <26342994+ii64@users.noreply.github.com>
This commit is contained in:
MastahSenpai 2021-12-22 00:29:06 +07:00
parent d464bb436c
commit 292c65f6f5
Signed by untrusted user who does not match committer: Xeffy
GPG key ID: E41C08AD390E7C49
2 changed files with 23 additions and 21 deletions

View file

@ -86,7 +86,6 @@ func setup(r *Ring, entries uint, parmas *IOUringParams) (ringFd int, err error)
return
}
sq.sqes = sqeArray(r.sqesPtr)
sq.sqesSz = uintptr(p.SQEntries) // cache
//
@ -95,7 +94,6 @@ func setup(r *Ring, entries uint, parmas *IOUringParams) (ringFd int, err error)
cq.ringMask = cqRingPtr + uintptr(p.CQOff.RingMask)
cq.ringEntries = cqRingPtr + uintptr(p.CQOff.RingEntries)
cq.cqes = cqeArray(cqRingPtr + uintptr(p.CQOff.CQEs))
cq.cqesSz = uintptr(p.CQEntries) // cache
return
}
@ -130,7 +128,7 @@ func register(r *Ring, opcode UringRegisterOpcode, arg uintptr, nrArg uint) (ret
}
func enter(r *Ring, toSubmit, minComplete uint, flags UringEnterFlag, sig *Sigset_t) (ret int, err error) {
if ret, err = io_uring_enter(r.fd, toSubmit, minComplete, flags, sig); err != nil {
if ret, err = io_uring_enter(r.fd, toSubmit, minComplete, uint(flags), sig); err != nil {
err = errors.Wrap(err, "io_uring_enter")
return
}

40
ring.go
View file

@ -1,6 +1,9 @@
package gouring
import "unsafe"
import (
"sync/atomic"
"unsafe"
)
type Ring struct {
fd int
@ -24,13 +27,10 @@ type SQRing struct {
flags uintptr
array uint32Array
sqes sqeArray
// cache
sqesSz uintptr
}
func (sq SQRing) Get(idx uint32) *SQEvent {
if uintptr(idx) >= sq.sqesSz {
func (sq SQRing) Get(idx uint32) *SQEntry {
if uintptr(idx) >= uintptr(*sq.RingEntries()) {
return nil
}
return sq.sqes.Get(uintptr(idx))
@ -57,6 +57,13 @@ func (sq SQRing) Event() sqeArray {
return sq.sqes
}
func (sq SQRing) IsCQOverflow() bool {
return atomic.LoadUint32(sq.Flags())&IORING_SQ_CQ_OVERFLOW > 0
}
func (sq SQRing) IsNeedWakeup() bool {
return atomic.LoadUint32(sq.Flags())&IORING_SQ_NEED_WAKEUP > 0
}
//
type uint32Array uintptr
@ -65,16 +72,16 @@ func (a uint32Array) Get(idx uint32) *uint32 {
}
func (a uint32Array) Set(idx uint32, v uint32) {
*a.Get(idx) = v
atomic.StoreUint32(a.Get(idx), v)
}
type sqeArray uintptr
func (sa sqeArray) Get(idx uintptr) *SQEvent {
return (*SQEvent)(unsafe.Pointer(uintptr(sa) + idx*_sz_sqe))
func (sa sqeArray) Get(idx uintptr) *SQEntry {
return (*SQEntry)(unsafe.Pointer(uintptr(sa) + idx*_sz_sqe))
}
func (sa sqeArray) Set(idx uintptr, v SQEvent) {
func (sa sqeArray) Set(idx uintptr, v SQEntry) {
*sa.Get(idx) = v
}
@ -87,13 +94,10 @@ type CQRing struct {
ringMask uintptr
ringEntries uintptr
cqes cqeArray
// cache
cqesSz uintptr
}
func (cq CQRing) Get(idx uint32) *CQEvent {
if uintptr(idx) >= cq.cqesSz { // avoid lookup overflow
func (cq CQRing) Get(idx uint32) *CQEntry {
if uintptr(idx) >= uintptr(*cq.RingEntries()) { // avoid lookup overflow
return nil
}
return cq.cqes.Get(uintptr(idx))
@ -118,10 +122,10 @@ func (cq CQRing) Event() cqeArray {
type cqeArray uintptr
func (ca cqeArray) Get(idx uintptr) *CQEvent {
return (*CQEvent)(unsafe.Pointer(uintptr(ca) + idx*_sz_cqe))
func (ca cqeArray) Get(idx uintptr) *CQEntry {
return (*CQEntry)(unsafe.Pointer(uintptr(ca) + idx*_sz_cqe))
}
func (ca cqeArray) Set(idx uintptr, v CQEvent) {
func (ca cqeArray) Set(idx uintptr, v CQEntry) {
*ca.Get(idx) = v
}