Refactor.

This commit is contained in:
Nuno Cruces
2024-11-07 12:18:42 +00:00
parent 38da27b5d1
commit eba73a87d9
6 changed files with 134 additions and 156 deletions

View File

@@ -15,9 +15,15 @@ func osGetSharedLock(file *os.File) _ErrorCode {
func osGetReservedLock(file *os.File) _ErrorCode {
rc := osLock(file, unix.LOCK_EX|unix.LOCK_NB, _IOERR_LOCK)
if rc == _BUSY {
// The documentation states the lock is upgraded by releasing the previous lock,
// then acquiring the new lock.
// This is a race, so return BUSY_SNAPSHOT to ensure the transaction is aborted.
// The documentation states that a lock is upgraded by
// releasing the previous lock, then acquiring the new lock.
// Going over the source code of various BSDs, though,
// with LOCK_NB, the lock is not released,
// and EAGAIN is returned holding the shared lock.
// Still, if we're already in a transaction, we want to abort it,
// so return BUSY_SNAPSHOT here. If there's no transaction active,
// SQLite will change this back to SQLITE_BUSY,
// and invoke the busy handler if appropriate.
return _BUSY_SNAPSHOT
}
return rc
@@ -33,9 +39,11 @@ func osGetExclusiveLock(file *os.File, state *LockLevel) _ErrorCode {
func osDowngradeLock(file *os.File, _ LockLevel) _ErrorCode {
rc := osLock(file, unix.LOCK_SH|unix.LOCK_NB, _IOERR_RDLOCK)
if rc == _BUSY {
// The documentation states the lock is upgraded by releasing the previous lock,
// then acquiring the new lock.
// This is a race, so return IOERR_RDLOCK to ensure the transaction is aborted.
// The documentation states that a lock is downgraded by
// releasing the previous lock then acquiring the new lock.
// Going over the source code of various BSDs, though,
// with LOCK_SH|LOCK_NB this should never happen.
// Return IOERR_RDLOCK, as BUSY would cause an assert to fail.
return _IOERR_RDLOCK
}
return _OK
@@ -50,7 +58,10 @@ func osReleaseLock(file *os.File, _ LockLevel) _ErrorCode {
}
func osCheckReservedLock(file *os.File) (bool, _ErrorCode) {
// Test the RESERVED lock.
// Test the RESERVED lock with fcntl(F_GETLK).
// This only works on systems where fcntl and flock are compatible.
// However, SQLite only calls this while holding a shared lock,
// so the difference is immaterial.
lock, rc := osTestLock(file, _RESERVED_BYTE, 1)
return lock == unix.F_WRLCK, rc
}

View File

@@ -50,14 +50,17 @@ func osGetExclusiveLock(file *os.File, state *LockLevel) _ErrorCode {
if rc != _OK {
// Reacquire the SHARED lock.
osReadLock(file, _SHARED_FIRST, _SHARED_SIZE, 0)
if rc := osReadLock(file, _SHARED_FIRST, _SHARED_SIZE, 0); rc != _OK {
// notest // this should never happen
return _IOERR_RDLOCK
}
}
return rc
}
func osDowngradeLock(file *os.File, state LockLevel) _ErrorCode {
if state >= LOCK_EXCLUSIVE {
// Release the EXCLUSIVE lock.
// Release the EXCLUSIVE lock while holding the PENDING lock.
osUnlock(file, _SHARED_FIRST, _SHARED_SIZE)
// Reacquire the SHARED lock.
@@ -78,7 +81,7 @@ func osDowngradeLock(file *os.File, state LockLevel) _ErrorCode {
}
func osReleaseLock(file *os.File, state LockLevel) _ErrorCode {
// Release all locks.
// Release all locks, PENDING must be last.
if state >= LOCK_RESERVED {
osUnlock(file, _RESERVED_BYTE, 1)
}

View File

@@ -14,52 +14,52 @@ import (
"github.com/ncruces/go-sqlite3/internal/util"
)
type vfsShmFile struct {
type vfsShmParent struct {
*os.File
info os.FileInfo
refs int // +checklocks:vfsShmFilesMtx
refs int // +checklocks:vfsShmListMtx
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
}
var (
// +checklocks:vfsShmFilesMtx
vfsShmFiles []*vfsShmFile
vfsShmFilesMtx sync.Mutex
// +checklocks:vfsShmListMtx
vfsShmList []*vfsShmParent
vfsShmListMtx sync.Mutex
)
type vfsShm struct {
*vfsShmFile
*vfsShmParent
path string
lock [_SHM_NLOCK]bool
regions []*util.MappedRegion
}
func (s *vfsShm) Close() error {
if s.vfsShmFile == nil {
if s.vfsShmParent == nil {
return nil
}
vfsShmFilesMtx.Lock()
defer vfsShmFilesMtx.Unlock()
vfsShmListMtx.Lock()
defer vfsShmListMtx.Unlock()
// Unlock everything.
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
if s.vfsShmFile.refs > 0 {
s.vfsShmFile.refs--
s.vfsShmFile = nil
if s.vfsShmParent.refs > 0 {
s.vfsShmParent.refs--
s.vfsShmParent = nil
return nil
}
err := s.File.Close()
for i, g := range vfsShmFiles {
if g == s.vfsShmFile {
vfsShmFiles[i] = nil
s.vfsShmFile = nil
for i, g := range vfsShmList {
if g == s.vfsShmParent {
vfsShmList[i] = nil
s.vfsShmParent = nil
return err
}
}
@@ -67,7 +67,7 @@ func (s *vfsShm) Close() error {
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.vfsShmFile != nil {
if s.vfsShmParent != nil {
return _OK
}
@@ -85,13 +85,13 @@ func (s *vfsShm) shmOpen() _ErrorCode {
return _IOERR_FSTAT
}
vfsShmFilesMtx.Lock()
defer vfsShmFilesMtx.Unlock()
vfsShmListMtx.Lock()
defer vfsShmListMtx.Unlock()
// Find a shared file, increase the reference count.
for _, g := range vfsShmFiles {
for _, g := range vfsShmList {
if g != nil && os.SameFile(fi, g.info) {
s.vfsShmFile = g
s.vfsShmParent = g
g.refs++
return _OK
}
@@ -107,18 +107,18 @@ func (s *vfsShm) shmOpen() _ErrorCode {
}
// Add the new shared file.
s.vfsShmFile = &vfsShmFile{
s.vfsShmParent = &vfsShmParent{
File: f,
info: fi,
}
f = nil // Don't close the file.
for i, g := range vfsShmFiles {
for i, g := range vfsShmList {
if g == nil {
vfsShmFiles[i] = s.vfsShmFile
vfsShmList[i] = s.vfsShmParent
return _OK
}
}
vfsShmFiles = append(vfsShmFiles, s.vfsShmFile)
vfsShmList = append(vfsShmList, s.vfsShmParent)
return _OK
}
@@ -157,57 +157,11 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.Lock()
defer s.Unlock()
switch {
case flags&_SHM_UNLOCK != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
if s.vfsShmFile.lock[i] == 0 {
panic(util.AssertErr())
}
if s.vfsShmFile.lock[i] <= 0 {
s.vfsShmFile.lock[i] = 0
} else {
s.vfsShmFile.lock[i]--
}
s.lock[i] = false
}
}
case flags&_SHM_SHARED != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmFile.lock[i]+1 <= 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmFile.lock[i]++
s.lock[i] = true
}
case flags&_SHM_EXCLUSIVE != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmFile.lock[i] != 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmFile.lock[i] = -1
s.lock[i] = true
}
default:
panic(util.AssertErr())
}
return _OK
return s.shmMemLock(offset, n, flags)
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.vfsShmFile == nil {
if s.vfsShmParent == nil {
return
}

View File

@@ -13,22 +13,22 @@ import (
"github.com/tetratelabs/wazero/api"
)
type vfsShmBuffer struct {
type vfsShmParent struct {
shared [][_WALINDEX_PGSZ]byte
refs int // +checklocks:vfsShmBuffersMtx
refs int // +checklocks:vfsShmListMtx
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
}
var (
// +checklocks:vfsShmBuffersMtx
vfsShmBuffers = map[string]*vfsShmBuffer{}
vfsShmBuffersMtx sync.Mutex
// +checklocks:vfsShmListMtx
vfsShmList = map[string]*vfsShmParent{}
vfsShmListMtx sync.Mutex
)
type vfsShm struct {
*vfsShmBuffer
*vfsShmParent
mod api.Module
alloc api.Function
free api.Function
@@ -40,20 +40,20 @@ type vfsShm struct {
}
func (s *vfsShm) Close() error {
if s.vfsShmBuffer == nil {
if s.vfsShmParent == nil {
return nil
}
vfsShmBuffersMtx.Lock()
defer vfsShmBuffersMtx.Unlock()
vfsShmListMtx.Lock()
defer vfsShmListMtx.Unlock()
// Unlock everything.
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
if s.vfsShmBuffer.refs > 0 {
s.vfsShmBuffer.refs--
s.vfsShmBuffer = nil
if s.vfsShmParent.refs > 0 {
s.vfsShmParent.refs--
s.vfsShmParent = nil
return nil
}
@@ -61,22 +61,22 @@ func (s *vfsShm) Close() error {
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return _IOERR_UNLOCK
}
delete(vfsShmBuffers, s.path)
s.vfsShmBuffer = nil
delete(vfsShmList, s.path)
s.vfsShmParent = nil
return nil
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.vfsShmBuffer != nil {
if s.vfsShmParent != nil {
return _OK
}
vfsShmBuffersMtx.Lock()
defer vfsShmBuffersMtx.Unlock()
vfsShmListMtx.Lock()
defer vfsShmListMtx.Unlock()
// Find a shared buffer, increase the reference count.
if g, ok := vfsShmBuffers[s.path]; ok {
s.vfsShmBuffer = g
if g, ok := vfsShmList[s.path]; ok {
s.vfsShmParent = g
g.refs++
return _OK
}
@@ -92,8 +92,8 @@ func (s *vfsShm) shmOpen() _ErrorCode {
}
// Add the new shared buffer.
s.vfsShmBuffer = &vfsShmBuffer{}
vfsShmBuffers[s.path] = s.vfsShmBuffer
s.vfsShmParent = &vfsShmParent{}
vfsShmList[s.path] = s.vfsShmParent
return _OK
}
@@ -155,56 +155,11 @@ func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.shmRelease()
}
switch {
case flags&_SHM_UNLOCK != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
if s.vfsShmBuffer.lock[i] == 0 {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i] <= 0 {
s.vfsShmBuffer.lock[i] = 0
} else {
s.vfsShmBuffer.lock[i]--
}
s.lock[i] = false
}
}
case flags&_SHM_SHARED != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i]+1 <= 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmBuffer.lock[i]++
s.lock[i] = true
}
case flags&_SHM_EXCLUSIVE != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i] != 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmBuffer.lock[i] = -1
s.lock[i] = true
}
default:
panic(util.AssertErr())
}
return _OK
return s.shmMemLock(offset, n, flags)
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.vfsShmBuffer == nil {
if s.vfsShmParent == nil {
return
}
defer s.Close()

55
vfs/shm_memlk.go Normal file
View File

@@ -0,0 +1,55 @@
//go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk
package vfs
import "github.com/ncruces/go-sqlite3/internal/util"
// +checklocks:s.Mutex
func (s *vfsShm) shmMemLock(offset, n int32, flags _ShmFlag) _ErrorCode {
switch {
case flags&_SHM_UNLOCK != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
if s.vfsShmParent.lock[i] == 0 {
panic(util.AssertErr())
}
if s.vfsShmParent.lock[i] <= 0 {
s.vfsShmParent.lock[i] = 0
} else {
s.vfsShmParent.lock[i]--
}
s.lock[i] = false
}
}
case flags&_SHM_SHARED != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmParent.lock[i]+1 <= 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmParent.lock[i]++
s.lock[i] = true
}
case flags&_SHM_EXCLUSIVE != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmParent.lock[i] != 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmParent.lock[i] = -1
s.lock[i] = true
}
default:
panic(util.AssertErr())
}
return _OK
}

View File

@@ -127,6 +127,11 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_LOCK != 0:
defer s.shmAcquire()
@@ -134,11 +139,6 @@ func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.shmRelease()
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+uint32(offset), uint32(n))