From 7329d9f2fbbd1562aebaa2d0f374f239c4ae34d4 Mon Sep 17 00:00:00 2001 From: Nuno Cruces Date: Mon, 13 Feb 2023 13:53:32 +0000 Subject: [PATCH] Avoid writer starvation. --- tests/parallel_test.go | 5 --- vfs_lock.go | 83 ++++++++++++++++++++++++++++++------------ vfs_unix.go | 18 +++------ vfs_windows.go | 22 +++++------ 4 files changed, 76 insertions(+), 52 deletions(-) diff --git a/tests/parallel_test.go b/tests/parallel_test.go index df6f077..a81b577 100644 --- a/tests/parallel_test.go +++ b/tests/parallel_test.go @@ -3,7 +3,6 @@ package tests import ( "os" "path/filepath" - "runtime" "testing" "golang.org/x/sync/errgroup" @@ -13,10 +12,6 @@ import ( ) func TestParallel(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip() - } - dir, err := os.MkdirTemp("", "sqlite3-") if err != nil { t.Fatal(err) diff --git a/vfs_lock.go b/vfs_lock.go index fa1f75e..da46a67 100644 --- a/vfs_lock.go +++ b/vfs_lock.go @@ -64,7 +64,7 @@ type vfsFileLocker struct { } func vfsLock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockState) uint32 { - // SQLite never explicitly requests a pendig lock. + // Argument check. SQLite never explicitly requests a pendig lock. if eLock != _SHARED_LOCK && eLock != _RESERVED_LOCK && eLock != _EXCLUSIVE_LOCK { panic(assertErr()) } @@ -72,12 +72,10 @@ func vfsLock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockSta ptr := vfsFilePtr{mod, pFile} cLock := ptr.Lock() - // If we already have an equal or more restrictive lock, do nothing. - if cLock >= eLock { - return _OK - } - switch { + case cLock < _NO_LOCK || cLock > _EXCLUSIVE_LOCK: + // Connection state check. + panic(assertErr()) case cLock == _NO_LOCK && eLock > _SHARED_LOCK: // We never move from unlocked to anything higher than a shared lock. panic(assertErr()) @@ -86,31 +84,51 @@ func vfsLock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockSta panic(assertErr()) } + // If we already have an equal or more restrictive lock, do nothing. + if cLock >= eLock { + return _OK + } + fLock := ptr.Locker() fLock.Lock() defer fLock.Unlock() + // File state check. + switch { + case fLock.state < _NO_LOCK || fLock.state > _EXCLUSIVE_LOCK: + panic(assertErr()) + case fLock.state == _NO_LOCK && fLock.shared != 0: + panic(assertErr()) + case fLock.state == _EXCLUSIVE_LOCK && fLock.shared != 1: + panic(assertErr()) + case fLock.state != _NO_LOCK && fLock.shared <= 0: + panic(assertErr()) + case fLock.state < cLock: + panic(assertErr()) + } + // If some other connection has a lock that precludes the requested lock, return BUSY. if cLock != fLock.state && (eLock > _SHARED_LOCK || fLock.state >= _PENDING_LOCK) { return uint32(BUSY) } - // If a SHARED lock is requested, and some other connection has a SHARED or RESERVED lock, - // then increment the reference count and return OK. - if eLock == _SHARED_LOCK && (fLock.state == _SHARED_LOCK || fLock.state == _RESERVED_LOCK) { - if cLock != _NO_LOCK || fLock.shared <= 0 { - panic(assertErr()) - } - ptr.SetLock(_SHARED_LOCK) - fLock.shared++ - return _OK - } - - // If control gets to this point, then actually go ahead and make - // operating system calls for the specified lock. switch eLock { case _SHARED_LOCK: - if fLock.state != _NO_LOCK || fLock.shared != 0 { + // Test the PENDING lock before acquiring a new SHARED lock. + if locked, _ := fLock.CheckPending(); locked { + return uint32(BUSY) + } + + // If some other connection has a SHARED or RESERVED lock, + // increment the reference count and return OK. + if fLock.state == _SHARED_LOCK || fLock.state == _RESERVED_LOCK { + ptr.SetLock(_SHARED_LOCK) + fLock.shared++ + return _OK + } + + // Must be unlocked to get SHARED. + if fLock.state != _NO_LOCK { panic(assertErr()) } if rc := fLock.GetShared(); rc != _OK { @@ -122,7 +140,8 @@ func vfsLock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockSta return _OK case _RESERVED_LOCK: - if fLock.state != _SHARED_LOCK || fLock.shared <= 0 { + // Must be SHARED to get RESERVED. + if fLock.state != _SHARED_LOCK { panic(assertErr()) } if rc := fLock.GetReserved(); rc != _OK { @@ -133,7 +152,8 @@ func vfsLock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockSta return _OK case _EXCLUSIVE_LOCK: - if fLock.state <= _NO_LOCK || fLock.state >= _EXCLUSIVE_LOCK || fLock.shared <= 0 { + // Must be SHARED, PENDING or RESERVED to get EXCLUSIVE. + if fLock.state <= _NO_LOCK || fLock.state >= _EXCLUSIVE_LOCK { panic(assertErr()) } @@ -164,6 +184,7 @@ func vfsLock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockSta } func vfsUnlock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockState) uint32 { + // Argument check. if eLock != _NO_LOCK && eLock != _SHARED_LOCK { panic(assertErr()) } @@ -171,6 +192,11 @@ func vfsUnlock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockS ptr := vfsFilePtr{mod, pFile} cLock := ptr.Lock() + // Connection state check. + if cLock < _NO_LOCK || cLock > _EXCLUSIVE_LOCK { + panic(assertErr()) + } + // If we don't have a more restrictive lock, do nothing. if cLock <= eLock { return _OK @@ -180,10 +206,20 @@ func vfsUnlock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockS fLock.Lock() defer fLock.Unlock() - if fLock.shared <= 0 { + // File state check. + switch { + case fLock.state <= _NO_LOCK || fLock.state > _EXCLUSIVE_LOCK: + panic(assertErr()) + case fLock.state == _EXCLUSIVE_LOCK && fLock.shared != 1: + panic(assertErr()) + case fLock.shared <= 0: + panic(assertErr()) + case fLock.state < cLock: panic(assertErr()) } + if cLock > _SHARED_LOCK { + // The connection must own the lock to release it. if cLock != fLock.state { panic(assertErr()) } @@ -197,6 +233,7 @@ func vfsUnlock(ctx context.Context, mod api.Module, pFile uint32, eLock vfsLockS } } + // If we get here, make sure we're dropping all locks. if eLock != _NO_LOCK { panic(assertErr()) } diff --git a/vfs_unix.go b/vfs_unix.go index 3c24ee0..8a0ef8b 100644 --- a/vfs_unix.go +++ b/vfs_unix.go @@ -13,19 +13,8 @@ func deleteOnClose(f *os.File) { } func (l *vfsFileLocker) GetShared() xErrorCode { - // A PENDING lock is needed before acquiring a SHARED lock. - if rc := l.readLock(_PENDING_BYTE, 1); rc != _OK { - return rc - } - // Acquire the SHARED lock. - rc := l.readLock(_SHARED_FIRST, _SHARED_SIZE) - - // Drop the temporary PENDING lock. - if rc2 := l.unlock(_PENDING_BYTE, 1); rc == _OK { - return rc2 - } - return rc + return l.readLock(_SHARED_FIRST, _SHARED_SIZE) } func (l *vfsFileLocker) GetReserved() xErrorCode { @@ -68,6 +57,11 @@ func (l *vfsFileLocker) CheckReserved() (bool, xErrorCode) { return l.checkLock(_RESERVED_BYTE, 1) } +func (l *vfsFileLocker) CheckPending() (bool, xErrorCode) { + // Test the PENDING lock. + return l.checkLock(_PENDING_BYTE, 1) +} + func (l *vfsFileLocker) unlock(start, len int64) xErrorCode { err := l.fcntlSetLock(&syscall.Flock_t{ Type: syscall.F_UNLCK, diff --git a/vfs_windows.go b/vfs_windows.go index a48910d..e2b72bf 100644 --- a/vfs_windows.go +++ b/vfs_windows.go @@ -10,19 +10,8 @@ import ( func deleteOnClose(f *os.File) {} func (l *vfsFileLocker) GetShared() xErrorCode { - // A PENDING lock is needed before acquiring a SHARED lock. - if rc := l.readLock(_PENDING_BYTE, 1); rc != _OK { - return rc - } - // Acquire the SHARED lock. - rc := l.readLock(_SHARED_FIRST, _SHARED_SIZE) - - // Drop the temporary PENDING lock. - if rc2 := l.unlock(_PENDING_BYTE, 1); rc == _OK { - return rc2 - } - return rc + return l.readLock(_SHARED_FIRST, _SHARED_SIZE) } func (l *vfsFileLocker) GetReserved() xErrorCode { @@ -83,6 +72,15 @@ func (l *vfsFileLocker) CheckReserved() (bool, xErrorCode) { return rc != _OK, _OK } +func (l *vfsFileLocker) CheckPending() (bool, xErrorCode) { + // Test the PENDING lock. + rc := l.readLock(_PENDING_BYTE, 1) + if rc == _OK { + l.unlock(_PENDING_BYTE, 1) + } + return rc != _OK, _OK +} + func (l *vfsFileLocker) unlock(start, len uint32) xErrorCode { err := windows.UnlockFileEx(windows.Handle(l.file.Fd()), 0, len, 0, &windows.Overlapped{Offset: start})