feat(opcode): new opcode on Linux 5.16rc

Signed-off-by: MastahSenpai <26342994+ii64@users.noreply.github.com>
This commit is contained in:
MastahSenpai 2022-01-24 15:19:14 +07:00
parent 57604628c2
commit c87f2013c0
Signed by untrusted user who does not match committer: Xeffy
GPG key ID: E41C08AD390E7C49
5 changed files with 125 additions and 24 deletions

View file

@ -84,6 +84,13 @@ const (
IORING_OP_SYMLINKAT
IORING_OP_LINKAT
// 5.16rc
IORING_OP_GETDENTS
IORING_OP_FSETXATTR
IORING_OP_SETXATTR
IORING_OP_FGETXATTR
IORING_OP_GETXATTR
/* this goes last, obviously */
IORING_OP_LAST
)

View file

@ -63,7 +63,7 @@ func (q *Queue) _getSQEntry() *gouring.SQEntry {
head := atomic.LoadUint32(q.sq.Head())
next := q.sqeTail + 1
if (next - head) <= atomic.LoadUint32(q.sq.RingEntries()) {
sqe := q.sq.Get(q.sqeTail & atomic.LoadUint32(q.sq.RingMask()))
sqe := q.sq.Get(q.sqeTail & (*q.sq.RingMask()))
q.sqeTail = next
sqe.Reset()
return sqe
@ -77,7 +77,7 @@ func (q *Queue) GetSQEntry() (sqe *gouring.SQEntry) {
if sqe != nil {
return
}
runtime.Gosched()
// runtime.Gosched()
}
}
@ -86,23 +86,23 @@ func (q *Queue) sqFallback(d uint32) {
}
func (q *Queue) sqFlush() uint32 {
khead := atomic.LoadUint32(q.sq.Head())
ktail := atomic.LoadUint32(q.sq.Tail())
if q.sqeHead == q.sqeTail {
return atomic.LoadUint32(q.sq.Tail()) - atomic.LoadUint32(q.sq.Head())
return ktail - khead
}
ktail := atomic.LoadUint32(q.sq.Tail())
for toSubmit := q.sqeTail - q.sqeHead; toSubmit > 0; toSubmit-- {
*q.sq.Array().Get(ktail & (*q.sq.RingMask())) = q.sqeHead & (*q.sq.RingMask())
ktail++
q.sqeHead++
}
atomic.StoreUint32(q.sq.Tail(), ktail)
return ktail - *q.sq.Head()
return ktail - khead
}
func (q *Queue) isNeedEnter(flags *uint32) bool {
if (q.ring.Params().Features & gouring.IORING_SETUP_SQPOLL) > 0 {
if (q.ring.Params().Flags & gouring.IORING_SETUP_SQPOLL) == 0 {
return true
}
if q.sq.IsNeedWakeup() {
@ -139,9 +139,9 @@ func (q *Queue) SubmitAndWait(waitNr uint) (ret int, err error) {
//
func (q *Queue) cqPeek() (cqe *gouring.CQEntry) {
head := atomic.LoadUint32(q.cq.Head())
if head != atomic.LoadUint32(q.cq.Tail()) {
cqe = q.cq.Get(head & atomic.LoadUint32(q.cq.RingMask()))
khead := atomic.LoadUint32(q.cq.Head())
if khead != atomic.LoadUint32(q.cq.Tail()) {
cqe = q.cq.Get(khead & atomic.LoadUint32(q.cq.RingMask()))
}
return
}
@ -180,7 +180,7 @@ func (q *Queue) GetCQEntryWait(wait bool, waitNr uint) (cqe *gouring.CQEntry, er
}
if q.sq.IsCQOverflow() {
_, err = q.ring.Enter(0, waitNr, gouring.IORING_ENTER_GETEVENTS, nil)
_, err = q.ring.Enter(0, 0, gouring.IORING_ENTER_GETEVENTS, nil)
if err != nil {
return
}

View file

@ -7,6 +7,7 @@ import (
"syscall"
"testing"
"time"
"unsafe"
"github.com/ii64/gouring"
"github.com/stretchr/testify/assert"
@ -43,7 +44,13 @@ func TestQueue(t *testing.T) {
// create new queue
q := New(ring)
defer q.Close()
defer func() {
err := q.Close()
assert.NoError(t, err, "close queue")
}()
//
go func() {
for i, b := range btests {
sqe := q.GetSQEntry()
@ -85,7 +92,12 @@ func TestQueuePolling(t *testing.T) {
}()
q := New(ring)
defer q.Close()
defer func() {
err := q.Close()
assert.NoError(t, err, "close queue")
}()
// test data
var tb = []byte("write me on stdout\n")
var tu uint64 = 0xfafa
@ -117,3 +129,71 @@ func TestQueuePolling(t *testing.T) {
t.Fail()
}
}
//
var (
Entries = 512
ring *gouring.Ring
q *Queue
)
func init() {
var err error
ring, err = gouring.New(uint(Entries), nil)
if err != nil {
panic(err)
}
q = New(ring)
}
func BenchmarkQueueBatchingNOP(b *testing.B) {
var sqe *gouring.SQEntry
for j := 0; j < b.N; j++ {
for i := 0; i < Entries; i++ {
sqe = q.GetSQEntry()
sqe.Opcode = gouring.IORING_OP_NOP
sqe.UserData = uint64(i)
}
n, err := q.SubmitAndWait(uint(Entries))
assert.NoError(b, err, "submit")
assert.Equal(b, Entries, n, "submit result entries")
for i := 0; i < Entries; i++ {
v := uint64(i)
cqe, err := q.GetCQEntry(true)
assert.NoError(b, err, "cqe wait error")
assert.Equal(b, int32(0), cqe.Res)
assert.Equal(b, v, cqe.UserData)
}
}
}
//
var (
_sqe StructTest
_sz_sqe = unsafe.Sizeof(_sqe)
_sqe_mm = make([]byte, _sz_sqe)
m = &StructTest{}
)
type StructTest = gouring.SQEntry
func BenchmarkSetPtrVal(b *testing.B) {
for i := 0; i < b.N; i++ {
*m = _sqe
}
}
func BenchmarkSetPtrValEmpty(b *testing.B) {
for i := 0; i < b.N; i++ {
*m = StructTest{}
}
}
func BenchmarkSetPtrCpy(b *testing.B) {
for i := 0; i < b.N; i++ {
copy(*(*[]byte)(unsafe.Pointer(m)), _sqe_mm)
}
}

View file

@ -37,7 +37,9 @@ type SQEntry struct {
splice_fd_in__file_index int32 // union { __s32 splice_fd_in, __u32 file_index }
pad2 [2]uint64
addr3 uint64
pad2 [1]uint64
}
func (sqe *SQEntry) Offset() *uint64 {
@ -84,10 +86,17 @@ func (sqe *SQEntry) FileIndex() *uint32 {
return (*uint32)(unsafe.Pointer(&sqe.splice_fd_in__file_index))
}
func (sqe *SQEntry) Addr3() *uint64 {
return (*uint64)(unsafe.Pointer(&sqe.addr3))
}
func (sqe *SQEntry) SetAddr3(v interface{}) {
*sqe.Addr3() = (uint64)(reflect.ValueOf(v).Pointer())
}
//
func (sqe *SQEntry) Reset() {
*sqe = _sqe
*sqe = SQEntry{}
}
//-- CQEntry
@ -99,5 +108,5 @@ type CQEntry struct {
}
func (cqe *CQEntry) Reset() {
*cqe = _cqe
*cqe = CQEntry{}
}

View file

@ -6,14 +6,14 @@ import (
)
const (
// uring syscall no.
SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426
SYS_IO_URING_REGISTER = 427
)
//go:linkname errnoErr syscall.errnoErr
func errnoErr(e syscall.Errno) error
// SQOffsets represents submission queue
type SQOffsets struct {
Head uint32
Tail uint32
@ -26,6 +26,7 @@ type SQOffsets struct {
Resv2 uint64
}
// CQOffsets represents completion queue
type CQOffsets struct {
Head uint32
Tail uint32
@ -38,6 +39,7 @@ type CQOffsets struct {
Resv2 uint64
}
// IOUringParams io_uring_setup params
type IOUringParams struct {
SQEntries uint32 // sq_entries
CQEntries uint32 // cq_entries
@ -54,33 +56,36 @@ type IOUringParams struct {
}
//go:inline
func io_uring_setup(entries uint, params *IOUringParams) (fd int, err error) {
func io_uring_setup(entries uint, params *IOUringParams) (ret int, err error) {
r1, _, e1 := syscall.Syscall(SYS_IO_URING_SETUP, uintptr(entries), uintptr(unsafe.Pointer(params)), 0)
fd = int(r1)
ret = int(r1)
if e1 != 0 {
err = errnoErr(e1)
err = syscall.Errno(e1)
}
return
}
//go:inline
func io_uring_enter(ringFd int, toSubmit uint, minComplete uint, flags uint, sig *Sigset_t) (ret int, err error) {
return io_uring_enter2(ringFd, toSubmit, minComplete, flags, sig, NSIG/8)
}
//go:inline
func io_uring_enter2(ringFd int, toSubmit uint, minComplete uint, flags uint, sig *Sigset_t, sz int) (ret int, err error) {
r1, _, e1 := syscall.Syscall6(SYS_IO_URING_ENTER, uintptr(ringFd), uintptr(toSubmit), uintptr(minComplete), uintptr(flags), uintptr(unsafe.Pointer(sig)), uintptr(sz))
ret = int(r1)
if e1 != 0 {
err = errnoErr(e1)
err = syscall.Errno(e1)
}
return
}
//go:inline
func io_uring_register(ringFd int, opcode uint /*const*/, arg uintptr, nrArgs uint) (ret int, err error) {
r1, _, e1 := syscall.Syscall6(SYS_IO_URING_REGISTER, uintptr(ringFd), uintptr(opcode), arg, uintptr(nrArgs), 0, 0)
ret = int(r1)
if e1 != 0 {
err = errnoErr(e1)
err = syscall.Errno(e1)
}
return
}