Compare commits

...

11 Commits

Author SHA1 Message Date
Nuno Cruces
be2f3036b4 SQLite 3.50.2. 2025-06-30 12:29:54 +01:00
Nuno Cruces
784f82f42f Avoid UB. 2025-06-25 15:27:11 +01:00
Nuno Cruces
cd6ba43e77 Less SIMD. 2025-06-24 02:23:54 +01:00
Nuno Cruces
d7aef63844 Naming, volatile. 2025-06-20 12:45:42 +01:00
Nuno Cruces
64e5046f10 Improved byteset search. 2025-06-17 11:36:53 +01:00
Nuno Cruces
0bdce8aa68 Avoid overflow. 2025-06-12 15:12:20 +01:00
Nuno Cruces
69a2881a10 SQLite 3.50.1. 2025-06-08 00:38:01 +01:00
dependabot[bot]
24ad4445f1 Bump golang.org/x/crypto from 0.38.0 to 0.39.0 (#285)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.38.0 to 0.39.0.
- [Commits](https://github.com/golang/crypto/compare/v0.38.0...v0.39.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-version: 0.39.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-05 23:49:59 +01:00
Nuno Cruces
c159bbd88f Docs, tweaks. 2025-06-04 12:19:01 +01:00
Nuno Cruces
c90f8205f7 Remove. 2025-06-03 12:54:56 +01:00
Nuno Cruces
b64b9b0415 Better strcasestr. 2025-06-02 10:25:10 +01:00
33 changed files with 768 additions and 4075 deletions

View File

@@ -1,6 +1,6 @@
# Embeddable Wasm build of SQLite
This folder includes an embeddable Wasm build of SQLite 3.50.0 for use with
This folder includes an embeddable Wasm build of SQLite 3.50.2 for use with
[`github.com/ncruces/go-sqlite3`](https://pkg.go.dev/github.com/ncruces/go-sqlite3).
The following optional features are compiled in:

Binary file not shown.

View File

@@ -53,7 +53,7 @@ func Test_bcw2(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if version != "3.50.0" {
if version != "3.51.0" {
t.Error(version)
}
}

View File

@@ -13,8 +13,8 @@ mkdir -p build/ext/
cp "$ROOT"/sqlite3/*.[ch] build/
cp "$ROOT"/sqlite3/*.patch build/
# https://sqlite.org/src/info/54b8888080d99a87
curl -# https://sqlite.org/src/tarball/sqlite.tar.gz?r=54b8888080 | tar xz
# https://sqlite.org/src/info/a6f6fbe6173de8a2
curl -# https://sqlite.org/src/tarball/sqlite.tar.gz?r=a6f6fbe617 | tar xz
cd sqlite
if [[ "$OSTYPE" == "msys" || "$OSTYPE" == "cygwin" ]]; then

View File

@@ -4,11 +4,11 @@ go 1.23.0
toolchain go1.24.0
require github.com/ncruces/go-sqlite3 v0.25.0
require github.com/ncruces/go-sqlite3 v0.26.2
require (
github.com/ncruces/julianday v1.0.0 // indirect
github.com/ncruces/sort v0.1.5 // indirect
github.com/tetratelabs/wazero v1.9.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/sys v0.33.0 // indirect
)

View File

@@ -1,12 +1,12 @@
github.com/ncruces/go-sqlite3 v0.25.0 h1:trugKUs98Zwy9KwRr/EUxZHL92LYt7UqcKqAfpGpK+I=
github.com/ncruces/go-sqlite3 v0.25.0/go.mod h1:n6Z7036yFilJx04yV0mi5JWaF66rUmXn1It9Ux8dx68=
github.com/ncruces/go-sqlite3 v0.26.2 h1:5UkIBwdfMN2irpVI1dgi9TjTUlxNI06Rti1C8O7ZKVg=
github.com/ncruces/go-sqlite3 v0.26.2/go.mod h1:XFTPtFIo1DmGCh+XVP8KGn9b/o2f+z0WZuT09x2N6eo=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
github.com/ncruces/sort v0.1.5 h1:fiFWXXAqKI8QckPf/6hu/bGFwcEPrirIOFaJqWujs4k=
github.com/ncruces/sort v0.1.5/go.mod h1:obJToO4rYr6VWP0Uw5FYymgYGt3Br4RXcs/JdKaXAPk=
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=

View File

@@ -19,7 +19,7 @@ func Test_init(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if version != "3.50.0" {
if version != "3.50.2" {
t.Error(version)
}
}

Binary file not shown.

View File

@@ -30,7 +30,7 @@ you can load into your database connections.
- [`github.com/ncruces/go-sqlite3/ext/statement`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/ext/statement)
creates [parameterized views](https://github.com/0x09/sqlite-statement-vtab).
- [`github.com/ncruces/go-sqlite3/ext/stats`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/ext/stats)
provides [statistics](https://www.oreilly.com/library/view/sql-in-a/9780596155322/ch04s02.html) functions.
provides [statistics](https://oreilly.com/library/view/sql-in-a/9780596155322/ch04s02.html) functions.
- [`github.com/ncruces/go-sqlite3/ext/unicode`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/ext/unicode)
provides [Unicode aware](https://sqlite.org/src/dir/ext/icu) functions.
- [`github.com/ncruces/go-sqlite3/ext/uuid`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/ext/uuid)

View File

@@ -38,7 +38,7 @@ func TestRegister(t *testing.T) {
{`regexp_instr('Hello', '.', 6)`, ""},
{`regexp_substr('Hello', 'el.')`, "ell"},
{`regexp_replace('Hello', 'llo', 'll')`, "Hell"},
// https://www.postgresql.org/docs/current/functions-matching.html
// https://postgresql.org/docs/current/functions-matching.html
{`regexp_count('ABCABCAXYaxy', 'A.')`, "3"},
{`regexp_count('ABCABCAXYaxy', '(?i)A.', 1)`, "4"},
{`regexp_instr('number of your street, town zip, FR', '[^,]+', 1, 2)`, "23"},

View File

@@ -1,6 +1,6 @@
# ANSI SQL Aggregate Functions
https://www.oreilly.com/library/view/sql-in-a/9780596155322/ch04s02.html
https://oreilly.com/library/view/sql-in-a/9780596155322/ch04s02.html
## Built in aggregates

View File

@@ -47,7 +47,7 @@
//
// [Built-in Aggregate Functions]: https://sqlite.org/lang_aggfunc.html
// [Built-in Window Functions]: https://sqlite.org/windowfunctions.html#builtins
// [ANSI SQL Aggregate Functions]: https://www.oreilly.com/library/view/sql-in-a/9780596155322/ch04s02.html
// [ANSI SQL Aggregate Functions]: https://oreilly.com/library/view/sql-in-a/9780596155322/ch04s02.html
package stats
import (

6
go.mod
View File

@@ -8,7 +8,7 @@ require (
github.com/ncruces/julianday v1.0.0
github.com/ncruces/sort v0.1.5
github.com/tetratelabs/wazero v1.9.0
golang.org/x/crypto v0.38.0
golang.org/x/crypto v0.39.0
golang.org/x/sys v0.33.0
)
@@ -16,8 +16,8 @@ require (
github.com/dchest/siphash v1.2.3 // ext/bloom
github.com/google/uuid v1.6.0 // ext/uuid
github.com/psanford/httpreadat v0.1.0 // example
golang.org/x/sync v0.14.0 // test
golang.org/x/text v0.25.0 // ext/unicode
golang.org/x/sync v0.15.0 // test
golang.org/x/text v0.26.0 // ext/unicode
lukechampine.com/adiantum v1.1.1 // vfs/adiantum
)

12
go.sum
View File

@@ -10,13 +10,13 @@ github.com/psanford/httpreadat v0.1.0 h1:VleW1HS2zO7/4c7c7zNl33fO6oYACSagjJIyMIw
github.com/psanford/httpreadat v0.1.0/go.mod h1:Zg7P+TlBm3bYbyHTKv/EdtSJZn3qwbPwpfZ/I9GKCRE=
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
lukechampine.com/adiantum v1.1.1 h1:4fp6gTxWCqpEbLy40ExiYDDED3oUNWx5cTqBCtPdZqA=
lukechampine.com/adiantum v1.1.1/go.mod h1:LrAYVnTYLnUtE/yMp5bQr0HstAf060YUF8nM0B6+rUw=

View File

@@ -5,8 +5,8 @@ go 1.23.0
toolchain go1.24.0
require (
github.com/ncruces/go-sqlite3 v0.25.0
gorm.io/gorm v1.25.12
github.com/ncruces/go-sqlite3 v0.26.2
gorm.io/gorm v1.30.0
)
require (
@@ -14,6 +14,6 @@ require (
github.com/jinzhu/now v1.1.5 // indirect
github.com/ncruces/julianday v1.0.0 // indirect
github.com/tetratelabs/wazero v1.9.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.26.0 // indirect
)

View File

@@ -2,15 +2,15 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/ncruces/go-sqlite3 v0.25.0 h1:trugKUs98Zwy9KwRr/EUxZHL92LYt7UqcKqAfpGpK+I=
github.com/ncruces/go-sqlite3 v0.25.0/go.mod h1:n6Z7036yFilJx04yV0mi5JWaF66rUmXn1It9Ux8dx68=
github.com/ncruces/go-sqlite3 v0.26.2 h1:5UkIBwdfMN2irpVI1dgi9TjTUlxNI06Rti1C8O7ZKVg=
github.com/ncruces/go-sqlite3 v0.26.2/go.mod h1:XFTPtFIo1DmGCh+XVP8KGn9b/o2f+z0WZuT09x2N6eo=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=

View File

@@ -1,4 +1,3 @@
// Package gormlite provides a GORM driver for SQLite.
package gormlite
import (
@@ -52,7 +51,9 @@ func (dialector _Dialector) Initialize(db *gorm.DB) (err error) {
})
for k, v := range dialector.ClauseBuilders() {
db.ClauseBuilders[k] = v
if _, ok := db.ClauseBuilders[k]; !ok {
db.ClauseBuilders[k] = v
}
}
return
}

View File

@@ -7,7 +7,7 @@ rm -rf gorm/ tests/
go work use -r .
go test
git clone --branch v1.25.12 --filter=blob:none https://github.com/go-gorm/gorm.git
git clone --branch v1.30.0 --filter=blob:none https://github.com/go-gorm/gorm.git
mv gorm/tests tests
rm -rf gorm/

View File

@@ -3,12 +3,12 @@ set -euo pipefail
cd -P -- "$(dirname -- "$0")"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/ddlmod.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/ddlmod_test.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/ddlmod_parse_all_columns.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/ddlmod_parse_all_columns_test.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/error_translator.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/migrator.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/sqlite.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.5.7/sqlite_test.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/ddlmod.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/ddlmod_test.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/ddlmod_parse_all_columns.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/ddlmod_parse_all_columns_test.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/error_translator.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/migrator.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/sqlite.go"
curl -#OL "https://github.com/go-gorm/sqlite/raw/v1.6.0/sqlite_test.go"
curl -#L "https://github.com/glebarez/sqlite/raw/v1.11.0/sqlite_error_translator_test.go" > error_translator_test.go

View File

@@ -2,7 +2,7 @@
# handle, and interrupt, sqlite3_busy_timeout.
--- sqlite3.c.orig
+++ sqlite3.c
@@ -183364,7 +183364,7 @@
@@ -184433,7 +184433,7 @@
if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
#endif
if( ms>0 ){
@@ -10,4 +10,4 @@
+ sqlite3_busy_handler(db, (int(*)(void*,int))sqliteBusyCallback,
(void*)db);
db->busyTimeout = ms;
}else{
#ifdef SQLITE_ENABLE_SETLK_TIMEOUT

View File

@@ -3,7 +3,7 @@ set -euo pipefail
cd -P -- "$(dirname -- "$0")"
curl -#OL "https://sqlite.org/2025/sqlite-amalgamation-3500000.zip"
curl -#OL "https://sqlite.org/2025/sqlite-amalgamation-3500200.zip"
unzip -d . sqlite-amalgamation-*.zip
mv sqlite-amalgamation-*/sqlite3.c .
mv sqlite-amalgamation-*/sqlite3.h .
@@ -19,30 +19,30 @@ rm -rf sqlite-amalgamation-*
mkdir -p ext/
cd ext/
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/anycollseq.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/base64.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/decimal.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/ieee754.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/regexp.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/series.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/spellfix.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/ext/misc/uint.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/anycollseq.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/base64.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/decimal.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/ieee754.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/regexp.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/series.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/spellfix.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/ext/misc/uint.c"
cd ~-
cd ../vfs/tests/mptest/testdata/
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/mptest/config01.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/mptest/config02.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/mptest/crash01.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/mptest/crash02.subtest"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/mptest/multiwrite01.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/mptest/config01.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/mptest/config02.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/mptest/crash01.test"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/mptest/crash02.subtest"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/mptest/multiwrite01.test"
cd ~-
cd ../vfs/tests/mptest/wasm/
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/mptest/mptest.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/mptest/mptest.c"
cd ~-
cd ../vfs/tests/speedtest1/wasm/
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.0/test/speedtest1.c"
curl -#OL "https://github.com/sqlite/sqlite/raw/version-3.50.2/test/speedtest1.c"
cd ~-
cat *.patch | patch -p0 --no-backup-if-mismatch

View File

@@ -28,31 +28,18 @@ EOF
-Wl,--stack-first \
-Wl,--import-undefined \
-Wl,--initial-memory=16777216 \
-Wl,--export=memccpy \
-Wl,--export=memchr \
-Wl,--export=memcmp \
-Wl,--export=memcpy \
-Wl,--export=memmem \
-Wl,--export=memmove \
-Wl,--export=memrchr \
-Wl,--export=memset \
-Wl,--export=stpcpy \
-Wl,--export=stpncpy \
-Wl,--export=strcasecmp \
-Wl,--export=strcasestr \
-Wl,--export=strchr \
-Wl,--export=strchrnul \
-Wl,--export=strcmp \
-Wl,--export=strcpy \
-Wl,--export=strcspn \
-Wl,--export=strlen \
-Wl,--export=strncasecmp \
-Wl,--export=strncat \
-Wl,--export=strncmp \
-Wl,--export=strncpy \
-Wl,--export=strrchr \
-Wl,--export=strspn \
-Wl,--export=strstr \
-Wl,--export=qsort
"$BINARYEN/wasm-ctor-eval" -g -c _initialize libc.wasm -o libc.tmp

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
_ "embed"
"math"
"os"
"strings"
"testing"
@@ -24,25 +25,18 @@ const (
)
var (
memory []byte
module api.Module
memset api.Function
memcpy api.Function
memchr api.Function
memcmp api.Function
memmem api.Function
strlen api.Function
strchr api.Function
strcmp api.Function
strstr api.Function
strspn api.Function
strrchr api.Function
strncmp api.Function
strcspn api.Function
strcasecmp api.Function
strcasestr api.Function
strncasecmp api.Function
stack [8]uint64
memory []byte
module api.Module
memset api.Function
memcpy api.Function
memchr api.Function
memcmp api.Function
strlen api.Function
strchr api.Function
strspn api.Function
strrchr api.Function
strcspn api.Function
stack [8]uint64
)
func call(fn api.Function, arg ...uint64) uint64 {
@@ -68,18 +62,11 @@ func TestMain(m *testing.M) {
memcpy = mod.ExportedFunction("memcpy")
memchr = mod.ExportedFunction("memchr")
memcmp = mod.ExportedFunction("memcmp")
memmem = mod.ExportedFunction("memmem")
strlen = mod.ExportedFunction("strlen")
strchr = mod.ExportedFunction("strchr")
strcmp = mod.ExportedFunction("strcmp")
strstr = mod.ExportedFunction("strstr")
strspn = mod.ExportedFunction("strspn")
strrchr = mod.ExportedFunction("strrchr")
strncmp = mod.ExportedFunction("strncmp")
strcspn = mod.ExportedFunction("strcspn")
strcasecmp = mod.ExportedFunction("strcasecmp")
strcasestr = mod.ExportedFunction("strcasestr")
strncasecmp = mod.ExportedFunction("strncasecmp")
memory, _ = mod.Memory().Read(0, mod.Memory().Size())
os.Exit(m.Run())
@@ -166,58 +153,6 @@ func Benchmark_memcmp(b *testing.B) {
}
}
func Benchmark_strcmp(b *testing.B) {
clear(memory)
fill(memory[ptr1:ptr1+size-1], 7)
fill(memory[ptr2:ptr2+size/2], 7)
fill(memory[ptr2+size/2:ptr2+size-1], 5)
b.SetBytes(size/2 + 1)
b.ResetTimer()
for range b.N {
call(strcmp, ptr1, ptr2, size)
}
}
func Benchmark_strncmp(b *testing.B) {
clear(memory)
fill(memory[ptr1:ptr1+size-1], 7)
fill(memory[ptr2:ptr2+size/2], 7)
fill(memory[ptr2+size/2:ptr2+size-1], 5)
b.SetBytes(size/2 + 1)
b.ResetTimer()
for range b.N {
call(strncmp, ptr1, ptr2, size-1)
}
}
func Benchmark_strcasecmp(b *testing.B) {
clear(memory)
fill(memory[ptr1:ptr1+size-1], 7)
fill(memory[ptr2:ptr2+size/2], 7)
fill(memory[ptr2+size/2:ptr2+size-1], 5)
b.SetBytes(size/2 + 1)
b.ResetTimer()
for range b.N {
call(strcasecmp, ptr1, ptr2, size)
}
}
func Benchmark_strncasecmp(b *testing.B) {
clear(memory)
fill(memory[ptr1:ptr1+size-1], 7)
fill(memory[ptr2:ptr2+size/2], 7)
fill(memory[ptr2+size/2:ptr2+size-1], 5)
b.SetBytes(size/2 + 1)
b.ResetTimer()
for range b.N {
call(strncasecmp, ptr1, ptr2, size-1)
}
}
func Benchmark_strspn(b *testing.B) {
clear(memory)
fill(memory[ptr1:ptr1+size/2], 7)
@@ -248,51 +183,6 @@ func Benchmark_strcspn(b *testing.B) {
}
}
//go:embed string.h
var source string
func Benchmark_memmem(b *testing.B) {
needle := "memcpy(dest, src, slen)"
clear(memory)
copy(memory[ptr1:], source)
copy(memory[ptr2:], needle)
b.SetBytes(int64(len(source)))
b.ResetTimer()
for range b.N {
call(memmem, ptr1, uint64(len(source)), ptr2, uint64(len(needle)))
}
}
func Benchmark_strstr(b *testing.B) {
needle := "memcpy(dest, src, slen)"
clear(memory)
copy(memory[ptr1:], source)
copy(memory[ptr2:], needle)
b.SetBytes(int64(len(source)))
b.ResetTimer()
for range b.N {
call(strstr, ptr1, ptr2)
}
}
func Benchmark_strcasestr(b *testing.B) {
needle := "MEMCPY(dest, src, slen)"
clear(memory)
copy(memory[ptr1:], source)
copy(memory[ptr2:], needle)
b.SetBytes(int64(len(source)))
b.ResetTimer()
for range b.N {
call(strcasestr, ptr1, ptr2)
}
}
func Test_strlen(t *testing.T) {
for length := range 64 {
for alignment := range 24 {
@@ -341,6 +231,10 @@ func Test_memchr(t *testing.T) {
fill(memory[ptr:ptr+max(pos, length)], 5)
memory[ptr+pos] = 7
if pos >= 0 {
memory[ptr+pos+2] = 7
}
got := call(memchr, uint64(ptr), 7, uint64(length))
if uint32(got) != uint32(want) {
t.Errorf("memchr(%d, %d, %d) = %d, want %d",
@@ -354,9 +248,14 @@ func Test_memchr(t *testing.T) {
fill(memory[ptr:ptr+length], 5)
memory[len(memory)-1] = 7
want := len(memory) - 1
if length == 0 {
want = 0
var want int
if length != 0 {
want = len(memory) - 1
got := call(memchr, uint64(ptr), 7, math.MaxUint32)
if uint32(got) != uint32(want) {
t.Errorf("memchr(%d, %d, %d) = %d, want %d",
ptr, 7, uint32(math.MaxUint32), uint32(got), uint32(want))
}
}
got := call(memchr, uint64(ptr), 7, uint64(length))
@@ -498,48 +397,6 @@ func Test_memcmp(t *testing.T) {
}
}
func Test_strcmp(t *testing.T) {
const s1 = compareTest1
const s2 = compareTest2
ptr2 := len(memory) - len(s2) - 1
clear(memory)
copy(memory[ptr1:], s1)
copy(memory[ptr2:], s2)
for i := range len(s1) + 1 {
want := strings.Compare(term(s1[i:]), term(s2[i:]))
got := call(strcmp, uint64(ptr1+i), uint64(ptr2+i))
if sign(int32(got)) != want {
t.Errorf("strcmp(%d, %d) = %d, want %d",
ptr1+i, ptr2+i, int32(got), want)
}
}
}
func Test_strncmp(t *testing.T) {
const s1 = compareTest1
const s2 = compareTest2
ptr2 := len(memory) - len(s2) - 1
clear(memory)
copy(memory[ptr1:], s1)
copy(memory[ptr2:], s2)
for i := range len(s1) + 1 {
for j := range len(s1) - i + 1 {
want := strings.Compare(term(s1[i:i+j]), term(s2[i:i+j]))
got := call(strncmp, uint64(ptr1+i), uint64(ptr2+i), uint64(j))
if sign(int32(got)) != want {
t.Errorf("strncmp(%d, %d, %d) = %d, want %d",
ptr1+i, ptr2+i, j, int32(got), want)
}
}
}
}
func Test_strspn(t *testing.T) {
for length := range 64 {
for pos := range length + 2 {
@@ -782,102 +639,6 @@ var searchTests = []searchTest{
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
}
func Test_memmem(t *testing.T) {
tt := append(searchTests,
searchTest{"abcABCabc", "A", 3},
searchTest{"fofofofofofo\x00foffofoobar", "foffof", 13},
searchTest{"0000000000000000\x000123456789012345678901234567890", "0123456789012345", 17},
)
for i := range tt {
ptr1 := uint64(len(memory) - len(tt[i].haystk))
clear(memory)
copy(memory[ptr1:], tt[i].haystk)
copy(memory[ptr2:], tt[i].needle)
var want uint64
if tt[i].out >= 0 {
want = ptr1 + uint64(tt[i].out)
}
got := call(memmem,
uint64(ptr1), uint64(len(tt[i].haystk)),
uint64(ptr2), uint64(len(tt[i].needle)))
if got != want {
t.Errorf("memmem(%q, %q) = %d, want %d",
tt[i].haystk, tt[i].needle,
uint32(got), uint32(want))
}
}
}
func Test_strstr(t *testing.T) {
tt := append(searchTests,
searchTest{"abcABCabc", "A", 3},
searchTest{"fofofofofofo\x00foffofoobar", "foffof", -1},
searchTest{"0000000000000000\x000123456789012345678901234567890", "0123456789012345", -1},
)
for i := range tt {
ptr1 := uint64(len(memory) - len(tt[i].haystk) - 1)
clear(memory)
copy(memory[ptr1:], tt[i].haystk)
copy(memory[ptr2:], tt[i].needle)
var want uint64
if tt[i].out >= 0 {
want = ptr1 + uint64(tt[i].out)
}
got := call(strstr, uint64(ptr1), uint64(ptr2))
if got != want {
t.Errorf("strstr(%q, %q) = %d, want %d",
tt[i].haystk, tt[i].needle,
uint32(got), uint32(want))
}
}
}
func Test_strcasestr(t *testing.T) {
tt := append(searchTests[1:],
searchTest{"A", "a", 0},
searchTest{"a", "A", 0},
searchTest{"Z", "z", 0},
searchTest{"z", "Z", 0},
searchTest{"@", "`", -1},
searchTest{"`", "@", -1},
searchTest{"[", "{", -1},
searchTest{"{", "[", -1},
searchTest{"abcABCabc", "A", 0},
searchTest{"fofofofofofofoffofoobarfoo", "FoFFoF", 12},
searchTest{"fofofofofofofOffOfoobarfoo", "FoFFoF", 12},
searchTest{"fofofofofofo\x00foffofoobar", "foffof", -1},
searchTest{"0000000000000000\x000123456789012345678901234567890", "0123456789012345", -1},
)
for i := range tt {
ptr1 := uint64(len(memory) - len(tt[i].haystk) - 1)
clear(memory)
copy(memory[ptr1:], tt[i].haystk)
copy(memory[ptr2:], tt[i].needle)
var want uint64
if tt[i].out >= 0 {
want = ptr1 + uint64(tt[i].out)
}
got := call(strcasestr, uint64(ptr1), uint64(ptr2))
if got != want {
t.Errorf("strcasestr(%q, %q) = %d, want %d",
tt[i].haystk, tt[i].needle,
uint32(got), uint32(want))
}
}
}
func Fuzz_memchr(f *testing.F) {
f.Fuzz(func(t *testing.T, s string, c, i byte) {
if len(s) > 128 || int(i) > len(s) {
@@ -971,120 +732,6 @@ func Fuzz_memcmp(f *testing.F) {
})
}
func Fuzz_strcmp(f *testing.F) {
const s1 = compareTest1
const s2 = compareTest2
for i := range len(compareTest1) + 1 {
f.Add(term(s1[i:]), term(s2[i:]))
}
f.Fuzz(func(t *testing.T, s1, s2 string) {
if len(s1) > 128 || len(s2) > 128 {
t.SkipNow()
}
copy(memory[ptr1:], s1)
copy(memory[ptr2:], s2)
memory[ptr1+len(s1)] = 0
memory[ptr2+len(s2)] = 0
got := call(strcmp, uint64(ptr1), uint64(ptr2))
want := strings.Compare(term(s1), term(s2))
if sign(int32(got)) != want {
t.Errorf("strcmp(%q, %q) = %d, want %d",
s1, s2, uint32(got), uint32(want))
}
})
}
func Fuzz_strncmp(f *testing.F) {
const s1 = compareTest1
const s2 = compareTest2
for i := range len(compareTest1) + 1 {
f.Add(term(s1[i:]), term(s2[i:]), byte(len(s1)))
}
f.Fuzz(func(t *testing.T, s1, s2 string, n byte) {
if len(s1) > 128 || len(s2) > 128 {
t.SkipNow()
}
copy(memory[ptr1:], s1)
copy(memory[ptr2:], s2)
memory[ptr1+len(s1)] = 0
memory[ptr2+len(s2)] = 0
got := call(strncmp, uint64(ptr1), uint64(ptr2), uint64(n))
want := bytes.Compare(
term(memory[ptr1:][:n]),
term(memory[ptr2:][:n]))
if sign(int32(got)) != want {
t.Errorf("strncmp(%q, %q, %d) = %d, want %d",
s1, s2, n, uint32(got), uint32(want))
}
})
}
func Fuzz_strcasecmp(f *testing.F) {
const s1 = compareTest1
const s2 = compareTest2
for i := range len(compareTest1) + 1 {
f.Add(term(s1[i:]), term(s2[i:]))
}
f.Fuzz(func(t *testing.T, s1, s2 string) {
if len(s1) > 128 || len(s2) > 128 {
t.SkipNow()
}
copy(memory[ptr1:], s1)
copy(memory[ptr2:], s2)
memory[ptr1+len(s1)] = 0
memory[ptr2+len(s2)] = 0
got := call(strcasecmp, uint64(ptr1), uint64(ptr2))
want := bytes.Compare(
lower(term(memory[ptr1:])),
lower(term(memory[ptr2:])))
if sign(int32(got)) != want {
t.Errorf("strcasecmp(%q, %q) = %d, want %d",
s1, s2, uint32(got), uint32(want))
}
})
}
func Fuzz_strncasecmp(f *testing.F) {
const s1 = compareTest1
const s2 = compareTest2
for i := range len(compareTest1) + 1 {
f.Add(term(s1[i:]), term(s2[i:]), byte(len(s1)))
}
f.Fuzz(func(t *testing.T, s1, s2 string, n byte) {
if len(s1) > 128 || len(s2) > 128 {
t.SkipNow()
}
copy(memory[ptr1:], s1)
copy(memory[ptr2:], s2)
memory[ptr1+len(s1)] = 0
memory[ptr2+len(s2)] = 0
got := call(strncasecmp, uint64(ptr1), uint64(ptr2), uint64(n))
want := bytes.Compare(
lower(term(memory[ptr1:][:n])),
lower(term(memory[ptr2:][:n])))
if sign(int32(got)) != want {
t.Errorf("strncasecmp(%q, %q, %d) = %d, want %d",
s1, s2, n, uint32(got), uint32(want))
}
})
}
func Fuzz_strspn(f *testing.F) {
for _, t := range searchTests {
f.Add(t.haystk, t.needle)
@@ -1155,129 +802,6 @@ func Fuzz_strcspn(f *testing.F) {
})
}
func Fuzz_memmem(f *testing.F) {
tt := append(searchTests,
searchTest{"abcABCabc", "A", 3},
searchTest{"fofofofofofo\x00foffofoobar", "foffof", 13},
searchTest{"0000000000000000\x000123456789012345678901234567890", "0123456789012345", 17},
)
for _, t := range tt {
f.Add(t.haystk, t.needle)
}
f.Fuzz(func(t *testing.T, haystk, needle string) {
if len(haystk) > 128 || len(needle) > 128 {
t.SkipNow()
}
copy(memory[ptr1:], haystk)
copy(memory[ptr2:], needle)
got := call(memmem,
uint64(ptr1), uint64(len(haystk)),
uint64(ptr2), uint64(len(needle)))
want := strings.Index(haystk, needle)
if want >= 0 {
want = ptr1 + want
} else {
want = 0
}
if uint32(got) != uint32(want) {
t.Errorf("memmem(%q, %q) = %d, want %d",
haystk, needle, uint32(got), uint32(want))
}
})
}
func Fuzz_strstr(f *testing.F) {
tt := append(searchTests,
searchTest{"abcABCabc", "A", 3},
searchTest{"fofofofofofo\x00foffofoobar", "foffof", -1},
searchTest{"0000000000000000\x000123456789012345678901234567890", "0123456789012345", -1},
)
for _, t := range tt {
f.Add(t.haystk, t.needle)
}
f.Fuzz(func(t *testing.T, haystk, needle string) {
if len(haystk) > 128 || len(needle) > 128 {
t.SkipNow()
}
copy(memory[ptr1:], haystk)
copy(memory[ptr2:], needle)
memory[ptr1+len(haystk)] = 0
memory[ptr2+len(needle)] = 0
got := call(strstr, uint64(ptr1), uint64(ptr2))
want := strings.Index(term(haystk), term(needle))
if want >= 0 {
want = ptr1 + want
} else {
want = 0
}
if uint32(got) != uint32(want) {
t.Errorf("strstr(%q, %q) = %d, want %d",
haystk, needle, uint32(got), uint32(want))
}
})
}
func Fuzz_strcasestr(f *testing.F) {
tt := append(searchTests,
searchTest{"A", "a", 0},
searchTest{"a", "A", 0},
searchTest{"Z", "z", 0},
searchTest{"z", "Z", 0},
searchTest{"@", "`", -1},
searchTest{"`", "@", -1},
searchTest{"[", "{", -1},
searchTest{"{", "[", -1},
searchTest{"abcABCabc", "A", 0},
searchTest{"fofofofofofofoffofoobarfoo", "FoFFoF", 12},
searchTest{"fofofofofofofOffOfoobarfoo", "FoFFoF", 12},
searchTest{"fofofofofofo\x00foffofoobar", "foffof", -1},
searchTest{"0000000000000000\x000123456789012345678901234567890", "0123456789012345", -1},
)
for _, t := range tt {
f.Add(t.haystk, t.needle)
}
f.Fuzz(func(t *testing.T, haystk, needle string) {
if len(haystk) > 128 || len(needle) > 128 {
t.SkipNow()
}
if len(needle) == 0 {
t.Skip("musl bug")
}
copy(memory[ptr1:], haystk)
copy(memory[ptr2:], needle)
memory[ptr1+len(haystk)] = 0
memory[ptr2+len(needle)] = 0
got := call(strcasestr, uint64(ptr1), uint64(ptr2))
want := bytes.Index(
lower(term(memory[ptr1:])),
lower(term(memory[ptr2:])))
if want >= 0 {
want = ptr1 + want
} else {
want = 0
}
if uint32(got) != uint32(want) {
t.Errorf("strcasestr(%q, %q) = %d, want %d",
haystk, needle, uint32(got), uint32(want))
}
})
}
func sign(x int32) int {
switch {
case x > 0:
@@ -1295,15 +819,6 @@ func fill(s []byte, v byte) {
}
}
func lower(s []byte) []byte {
for i, c := range s {
if 'A' <= c && c <= 'Z' {
s[i] = c - 'A' + 'a'
}
}
return s
}
func term[T interface{ []byte | string }](s T) T {
for i, c := range []byte(s) {
if c == 0 {

View File

@@ -17,19 +17,18 @@ extern "C" {
// Use the builtins if compiled with bulk memory operations.
// Clang will intrinsify using SIMD for small, constant N.
// For everything else, this helps inlining.
__attribute__((weak))
__attribute__((weak, always_inline))
void *memset(void *dest, int c, size_t n) {
return __builtin_memset(dest, c, n);
}
__attribute__((weak))
__attribute__((weak, always_inline))
void *memcpy(void *__restrict dest, const void *__restrict src, size_t n) {
return __builtin_memcpy(dest, src, n);
}
__attribute__((weak))
__attribute__((weak, always_inline))
void *memmove(void *dest, const void *src, size_t n) {
return __builtin_memmove(dest, src, n);
}
@@ -39,11 +38,11 @@ void *memmove(void *dest, const void *src, size_t n) {
#ifdef __wasm_simd128__
__attribute__((weak))
int memcmp(const void *v1, const void *v2, size_t n) {
int memcmp(const void *vl, const void *vr, size_t n) {
// Scalar algorithm.
if (n < sizeof(v128_t)) {
const unsigned char *u1 = (unsigned char *)v1;
const unsigned char *u2 = (unsigned char *)v2;
const unsigned char *u1 = (unsigned char *)vl;
const unsigned char *u2 = (unsigned char *)vr;
while (n--) {
if (*u1 != *u2) return *u1 - *u2;
u1++;
@@ -56,16 +55,16 @@ int memcmp(const void *v1, const void *v2, size_t n) {
// Find the first different character in the objects.
// Unaligned loads handle the case where the objects
// have mismatching alignments.
const v128_t *w1 = (v128_t *)v1;
const v128_t *w2 = (v128_t *)v2;
const v128_t *v1 = (v128_t *)vl;
const v128_t *v2 = (v128_t *)vr;
while (n) {
const v128_t cmp = wasm_i8x16_eq(wasm_v128_load(w1), wasm_v128_load(w2));
const v128_t cmp = wasm_i8x16_eq(wasm_v128_load(v1), wasm_v128_load(v2));
// Bitmask is slow on AArch64, all_true is much faster.
if (!wasm_i8x16_all_true(cmp)) {
// Find the offset of the first zero bit (little-endian).
size_t ctz = __builtin_ctz(~wasm_i8x16_bitmask(cmp));
const unsigned char *u1 = (unsigned char *)w1 + ctz;
const unsigned char *u2 = (unsigned char *)w2 + ctz;
const unsigned char *u1 = (unsigned char *)v1 + ctz;
const unsigned char *u2 = (unsigned char *)v2 + ctz;
// This may help the compiler if the function is inlined.
__builtin_assume(*u1 - *u2 != 0);
return *u1 - *u2;
@@ -73,15 +72,15 @@ int memcmp(const void *v1, const void *v2, size_t n) {
// This makes n a multiple of sizeof(v128_t)
// for every iteration except the first.
size_t align = (n - 1) % sizeof(v128_t) + 1;
w1 = (v128_t *)((char *)w1 + align);
w2 = (v128_t *)((char *)w2 + align);
v1 = (v128_t *)((char *)v1 + align);
v2 = (v128_t *)((char *)v2 + align);
n -= align;
}
return 0;
}
__attribute__((weak))
void *memchr(const void *v, int c, size_t n) {
void *memchr(const void *s, int c, size_t n) {
// When n is zero, a function that locates a character finds no occurrence.
// Otherwise, decrement n to ensure sub_overflow overflows
// when n would go equal-to-or-below zero.
@@ -92,28 +91,30 @@ void *memchr(const void *v, int c, size_t n) {
// memchr must behave as if it reads characters sequentially
// and stops as soon as a match is found.
// Aligning ensures loads beyond the first match are safe.
uintptr_t align = (uintptr_t)v % sizeof(v128_t);
const v128_t *w = (v128_t *)((char *)v - align);
const v128_t wc = wasm_i8x16_splat(c);
// Casting through uintptr_t makes this implementation-defined,
// rather than undefined behavior.
uintptr_t align = (uintptr_t)s % sizeof(v128_t);
const v128_t *v = (v128_t *)((uintptr_t)s - align);
const v128_t vc = wasm_i8x16_splat(c);
for (;;) {
const v128_t cmp = wasm_i8x16_eq(*w, wc);
const v128_t cmp = wasm_i8x16_eq(*v, vc);
// Bitmask is slow on AArch64, any_true is much faster.
if (wasm_v128_any_true(cmp)) {
// Clear the bits corresponding to alignment (little-endian)
// Clear the bits corresponding to align (little-endian)
// so we can count trailing zeros.
int mask = wasm_i8x16_bitmask(cmp) >> align << align;
// At least one bit will be set, unless we cleared them.
// Knowing this helps the compiler.
// At least one bit will be set, unless align cleared them.
// Knowing this helps the compiler if it unrolls the loop.
__builtin_assume(mask || align);
// If the mask is zero because of alignment,
// If the mask became zero because of align,
// it's as if we didn't find anything.
if (mask) {
// Find the offset of the first one bit (little-endian).
// That's a match, unless it is beyond the end of the object.
// Recall that we decremented n, so less-than-or-equal-to is correct.
size_t ctz = __builtin_ctz(mask);
return ctz <= n + align ? (char *)w + ctz : NULL;
return ctz - align <= n ? (char *)v + ctz : NULL;
}
}
// Decrement n; if it overflows we're done.
@@ -121,28 +122,30 @@ void *memchr(const void *v, int c, size_t n) {
return NULL;
}
align = 0;
w++;
v++;
}
}
__attribute__((weak))
void *memrchr(const void *v, int c, size_t n) {
void *memrchr(const void *s, int c, size_t n) {
// memrchr is allowed to read up to n bytes from the object.
// Search backward for the last matching character.
const v128_t *w = (v128_t *)((char *)v + n);
const v128_t wc = wasm_i8x16_splat(c);
const v128_t *v = (v128_t *)((char *)s + n);
const v128_t vc = wasm_i8x16_splat(c);
for (; n >= sizeof(v128_t); n -= sizeof(v128_t)) {
const v128_t cmp = wasm_i8x16_eq(wasm_v128_load(--w), wc);
const v128_t cmp = wasm_i8x16_eq(wasm_v128_load(--v), vc);
// Bitmask is slow on AArch64, any_true is much faster.
if (wasm_v128_any_true(cmp)) {
// Find the offset of the last one bit (little-endian).
size_t clz = __builtin_clz(wasm_i8x16_bitmask(cmp)) - 15;
return (char *)(w + 1) - clz;
// The leading 16 bits of the bitmask are always zero,
// and to be ignored.
size_t clz = __builtin_clz(wasm_i8x16_bitmask(cmp)) - 16;
return (char *)(v + 1) - (clz + 1);
}
}
// Scalar algorithm.
const char *a = (char *)w;
const char *a = (char *)v;
while (n--) {
if (*(--a) == (char)c) return (char *)a;
}
@@ -153,140 +156,61 @@ __attribute__((weak))
size_t strlen(const char *s) {
// strlen must stop as soon as it finds the terminator.
// Aligning ensures loads beyond the terminator are safe.
// Casting through uintptr_t makes this implementation-defined,
// rather than undefined behavior.
uintptr_t align = (uintptr_t)s % sizeof(v128_t);
const v128_t *w = (v128_t *)(s - align);
const v128_t *v = (v128_t *)((uintptr_t)s - align);
for (;;) {
// Bitmask is slow on AArch64, all_true is much faster.
if (!wasm_i8x16_all_true(*w)) {
const v128_t cmp = wasm_i8x16_eq(*w, (v128_t){});
// Clear the bits corresponding to alignment (little-endian)
if (!wasm_i8x16_all_true(*v)) {
const v128_t cmp = wasm_i8x16_eq(*v, (v128_t){});
// Clear the bits corresponding to align (little-endian)
// so we can count trailing zeros.
int mask = wasm_i8x16_bitmask(cmp) >> align << align;
// At least one bit will be set, unless we cleared them.
// Knowing this helps the compiler.
// At least one bit will be set, unless align cleared them.
// Knowing this helps the compiler if it unrolls the loop.
__builtin_assume(mask || align);
// If the mask became zero because of align,
// it's as if we didn't find anything.
if (mask) {
// Find the offset of the first one bit (little-endian).
return (char *)w - s + __builtin_ctz(mask);
return (char *)v - s + __builtin_ctz(mask);
}
}
align = 0;
w++;
v++;
}
}
static int __strcmp_s(const char *s1, const char *s2) {
// Scalar algorithm.
const unsigned char *u1 = (unsigned char *)s1;
const unsigned char *u2 = (unsigned char *)s2;
for (;;) {
if (*u1 != *u2) return *u1 - *u2;
if (*u1 == 0) break;
u1++;
u2++;
}
return 0;
}
static int __strcmp(const char *s1, const char *s2) {
// How many bytes can be read before pointers go out of bounds.
size_t N = __builtin_wasm_memory_size(0) * PAGESIZE - //
(size_t)(s1 > s2 ? s1 : s2);
// Unaligned loads handle the case where the strings
// have mismatching alignments.
const v128_t *w1 = (v128_t *)s1;
const v128_t *w2 = (v128_t *)s2;
for (; N >= sizeof(v128_t); N -= sizeof(v128_t)) {
// Find any single bit difference.
if (wasm_v128_any_true(wasm_v128_load(w1) ^ wasm_v128_load(w2))) {
// The terminator may come before the difference.
break;
}
// We know all characters are equal.
// If any is a terminator the strings are equal.
if (!wasm_i8x16_all_true(wasm_v128_load(w1))) {
return 0;
}
w1++;
w2++;
}
return __strcmp_s((char *)w1, (char *)w2);
}
__attribute__((weak, always_inline))
int strcmp(const char *s1, const char *s2) {
// Skip the vector search when comparing against small literal strings.
if (__builtin_constant_p(strlen(s2)) && strlen(s2) < sizeof(v128_t)) {
return __strcmp_s(s1, s2);
}
return __strcmp(s1, s2);
}
__attribute__((weak))
int strncmp(const char *s1, const char *s2, size_t n) {
// How many bytes can be read before pointers go out of bounds.
size_t N = __builtin_wasm_memory_size(0) * PAGESIZE - //
(size_t)(s1 > s2 ? s1 : s2);
if (n > N) n = N;
// Unaligned loads handle the case where the strings
// have mismatching alignments.
const v128_t *w1 = (v128_t *)s1;
const v128_t *w2 = (v128_t *)s2;
for (; n >= sizeof(v128_t); n -= sizeof(v128_t)) {
// Find any single bit difference.
if (wasm_v128_any_true(wasm_v128_load(w1) ^ wasm_v128_load(w2))) {
// The terminator may come before the difference.
break;
}
// We know all characters are equal.
// If any is a terminator the strings are equal.
if (!wasm_i8x16_all_true(wasm_v128_load(w1))) {
return 0;
}
w1++;
w2++;
}
// Scalar algorithm.
const unsigned char *u1 = (unsigned char *)w1;
const unsigned char *u2 = (unsigned char *)w2;
while (n--) {
if (*u1 != *u2) return *u1 - *u2;
if (*u1 == 0) break;
u1++;
u2++;
}
return 0;
}
static char *__strchrnul(const char *s, int c) {
// strchrnul must stop as soon as a match is found.
// Aligning ensures loads beyond the first match are safe.
// strchrnul must stop as soon as it finds the terminator.
// Aligning ensures loads beyond the terminator are safe.
// Casting through uintptr_t makes this implementation-defined,
// rather than undefined behavior.
uintptr_t align = (uintptr_t)s % sizeof(v128_t);
const v128_t *w = (v128_t *)(s - align);
const v128_t wc = wasm_i8x16_splat(c);
const v128_t *v = (v128_t *)((uintptr_t)s - align);
const v128_t vc = wasm_i8x16_splat(c);
for (;;) {
const v128_t cmp = wasm_i8x16_eq(*w, (v128_t){}) | wasm_i8x16_eq(*w, wc);
const v128_t cmp = wasm_i8x16_eq(*v, (v128_t){}) | wasm_i8x16_eq(*v, vc);
// Bitmask is slow on AArch64, any_true is much faster.
if (wasm_v128_any_true(cmp)) {
// Clear the bits corresponding to alignment (little-endian)
// Clear the bits corresponding to align (little-endian)
// so we can count trailing zeros.
int mask = wasm_i8x16_bitmask(cmp) >> align << align;
// At least one bit will be set, unless we cleared them.
// Knowing this helps the compiler.
// At least one bit will be set, unless align cleared them.
// Knowing this helps the compiler if it unrolls the loop.
__builtin_assume(mask || align);
// If the mask became zero because of align,
// it's as if we didn't find anything.
if (mask) {
// Find the offset of the first one bit (little-endian).
return (char *)w + __builtin_ctz(mask);
return (char *)v + __builtin_ctz(mask);
}
}
align = 0;
w++;
v++;
}
}
@@ -321,7 +245,7 @@ char *strrchr(const char *s, int c) {
return (char *)memrchr(s, c, strlen(s) + 1);
}
// SIMDized check which bytes are in a set
// SIMDized check which bytes are in a set (Geoff Langdale)
// http://0x80.pl/notesen/2018-10-18-simd-byte-lookup.html
typedef struct {
@@ -337,327 +261,133 @@ static void __wasm_v128_setbit(__wasm_v128_bitmap256_t *bitmap, int i) {
bitmap->h[lo_nibble] |= 1 << (hi_nibble - 8);
}
__attribute__((always_inline))
static int __wasm_v128_chkbit(__wasm_v128_bitmap256_t bitmap, int i) {
uint8_t hi_nibble = (uint8_t)i >> 4;
uint8_t lo_nibble = (uint8_t)i & 0xf;
uint8_t bitmask = 1 << (hi_nibble & 0x7);
uint8_t bitset = (hi_nibble < 8 ? bitmap.l : bitmap.h)[lo_nibble];
return bitmask & bitset;
}
#ifndef __wasm_relaxed_simd__
#define wasm_i8x16_relaxed_laneselect wasm_v128_bitselect
#define wasm_i8x16_relaxed_swizzle wasm_i8x16_swizzle
#endif // __wasm_relaxed_simd__
__attribute__((always_inline))
static v128_t __wasm_v128_chkbits(__wasm_v128_bitmap256_t bitmap, v128_t v) {
v128_t hi_nibbles = wasm_u8x16_shr(v, 4);
v128_t lo_nibbles = v & wasm_u8x16_const_splat(0xf);
v128_t indices_0_7 = v & wasm_u8x16_const_splat(0x8f);
v128_t indices_8_15 = (v & wasm_u8x16_const_splat(0x80)) ^ indices_0_7;
v128_t row_0_7 = wasm_i8x16_swizzle(bitmap.l, indices_0_7);
v128_t row_8_15 = wasm_i8x16_swizzle(bitmap.h, indices_8_15);
v128_t bitsets = row_0_7 | row_8_15;
v128_t hi_nibbles = wasm_u8x16_shr(v, 4);
v128_t bitmask_lookup = wasm_u8x16_const(1, 2, 4, 8, 16, 32, 64, 128, //
1, 2, 4, 8, 16, 32, 64, 128);
v128_t bitmask = wasm_i8x16_relaxed_swizzle(bitmask_lookup, hi_nibbles);
v128_t bitsets = wasm_i8x16_relaxed_laneselect(
wasm_i8x16_relaxed_swizzle(bitmap.l, lo_nibbles),
wasm_i8x16_relaxed_swizzle(bitmap.h, lo_nibbles),
wasm_i8x16_lt(hi_nibbles, wasm_u8x16_const_splat(8)));
return wasm_i8x16_eq(bitsets & bitmask, bitmask);
}
#undef wasm_i8x16_relaxed_laneselect
#undef wasm_i8x16_relaxed_swizzle
__attribute__((weak))
size_t strspn(const char *s, const char *c) {
// How many bytes can be read before the pointer goes out of bounds.
size_t N = __builtin_wasm_memory_size(0) * PAGESIZE - (size_t)s;
const v128_t *w = (v128_t *)s;
const char *const a = s;
// strspn must stop as soon as it finds the terminator.
// Aligning ensures loads beyond the terminator are safe.
// Casting through uintptr_t makes this implementation-defined,
// rather than undefined behavior.
uintptr_t align = (uintptr_t)s % sizeof(v128_t);
const v128_t *v = (v128_t *)((uintptr_t)s - align);
if (!c[0]) return 0;
if (!c[1]) {
const v128_t wc = wasm_i8x16_splat(*c);
for (; N >= sizeof(v128_t); N -= sizeof(v128_t)) {
const v128_t cmp = wasm_i8x16_eq(wasm_v128_load(w), wc);
const v128_t vc = wasm_i8x16_splat(*c);
for (;;) {
const v128_t cmp = wasm_i8x16_eq(*v, vc);
// Bitmask is slow on AArch64, all_true is much faster.
if (!wasm_i8x16_all_true(cmp)) {
// Find the offset of the first zero bit (little-endian).
size_t ctz = __builtin_ctz(~wasm_i8x16_bitmask(cmp));
return (char *)w + ctz - s;
// Clear the bits corresponding to align (little-endian)
// so we can count trailing zeros.
int mask = (uint16_t)~wasm_i8x16_bitmask(cmp) >> align << align;
// At least one bit will be set, unless align cleared them.
// Knowing this helps the compiler if it unrolls the loop.
__builtin_assume(mask || align);
// If the mask became zero because of align,
// it's as if we didn't find anything.
if (mask) {
// Find the offset of the first one bit (little-endian).
return (char *)v - s + __builtin_ctz(mask);
}
}
w++;
align = 0;
v++;
}
// Scalar algorithm.
for (s = (char *)w; *s == *c; s++);
return s - a;
}
__wasm_v128_bitmap256_t bitmap = {};
for (; *c; c++) {
__wasm_v128_setbit(&bitmap, *c);
// Terminator IS NOT on the bitmap.
__wasm_v128_setbit(&bitmap, *c);
}
for (; N >= sizeof(v128_t); N -= sizeof(v128_t)) {
const v128_t cmp = __wasm_v128_chkbits(bitmap, wasm_v128_load(w));
for (;;) {
const v128_t cmp = __wasm_v128_chkbits(bitmap, *v);
// Bitmask is slow on AArch64, all_true is much faster.
if (!wasm_i8x16_all_true(cmp)) {
// Find the offset of the first zero bit (little-endian).
size_t ctz = __builtin_ctz(~wasm_i8x16_bitmask(cmp));
return (char *)w + ctz - s;
// Clear the bits corresponding to align (little-endian)
// so we can count trailing zeros.
int mask = (uint16_t)~wasm_i8x16_bitmask(cmp) >> align << align;
// At least one bit will be set, unless align cleared them.
// Knowing this helps the compiler if it unrolls the loop.
__builtin_assume(mask || align);
// If the mask became zero because of align,
// it's as if we didn't find anything.
if (mask) {
// Find the offset of the first one bit (little-endian).
return (char *)v - s + __builtin_ctz(mask);
}
}
w++;
align = 0;
v++;
}
// Scalar algorithm.
for (s = (char *)w; __wasm_v128_chkbit(bitmap, *s); s++);
return s - a;
}
__attribute__((weak))
size_t strcspn(const char *s, const char *c) {
if (!c[0] || !c[1]) return __strchrnul(s, *c) - s;
// How many bytes can be read before the pointer goes out of bounds.
size_t N = __builtin_wasm_memory_size(0) * PAGESIZE - (size_t)s;
const v128_t *w = (v128_t *)s;
const char *const a = s;
// strcspn must stop as soon as it finds the terminator.
// Aligning ensures loads beyond the terminator are safe.
// Casting through uintptr_t makes this implementation-defined,
// rather than undefined behavior.
uintptr_t align = (uintptr_t)s % sizeof(v128_t);
const v128_t *v = (v128_t *)((uintptr_t)s - align);
__wasm_v128_bitmap256_t bitmap = {};
for (;;) {
__wasm_v128_setbit(&bitmap, *c);
do {
// Terminator IS on the bitmap.
if (!*c++) break;
}
__wasm_v128_setbit(&bitmap, *c);
} while (*c++);
for (; N >= sizeof(v128_t); N -= sizeof(v128_t)) {
const v128_t cmp = __wasm_v128_chkbits(bitmap, wasm_v128_load(w));
for (;;) {
const v128_t cmp = __wasm_v128_chkbits(bitmap, *v);
// Bitmask is slow on AArch64, any_true is much faster.
if (wasm_v128_any_true(cmp)) {
// Find the offset of the first one bit (little-endian).
size_t ctz = __builtin_ctz(wasm_i8x16_bitmask(cmp));
return (char *)w + ctz - s;
}
w++;
}
// Scalar algorithm.
for (s = (char *)w; !__wasm_v128_chkbit(bitmap, *s); s++);
return s - a;
}
// SIMD-friendly algorithms for substring searching
// http://0x80.pl/notesen/2016-11-28-simd-strfind.html
// For haystacks of known length and large enough needles,
// Boyer-Moore's bad-character rule may be useful,
// as proposed by Horspool, Sunday and Raita.
//
// We augment the SIMD algorithm with Quick Search's
// bad-character shift.
//
// https://www-igm.univ-mlv.fr/~lecroq/string/node14.html
// https://www-igm.univ-mlv.fr/~lecroq/string/node18.html
// https://www-igm.univ-mlv.fr/~lecroq/string/node19.html
// https://www-igm.univ-mlv.fr/~lecroq/string/node22.html
static const char *__memmem(const char *haystk, size_t sh,
const char *needle, size_t sn,
uint8_t bmbc[256]) {
// We've handled empty and single character needles.
// The needle is not longer than the haystack.
__builtin_assume(2 <= sn && sn <= sh);
// Find the farthest character not equal to the first one.
size_t i = sn - 1;
while (i > 0 && needle[0] == needle[i]) i--;
if (i == 0) i = sn - 1;
// Subtracting ensures sub_overflow overflows
// when we reach the end of the haystack.
if (sh != SIZE_MAX) sh -= sn;
const v128_t fst = wasm_i8x16_splat(needle[0]);
const v128_t lst = wasm_i8x16_splat(needle[i]);
// The last haystack offset for which loading blk_lst is safe.
const char *H = (char *)(__builtin_wasm_memory_size(0) * PAGESIZE - i -
sizeof(v128_t));
while (haystk <= H) {
const v128_t blk_fst = wasm_v128_load((v128_t *)(haystk));
const v128_t blk_lst = wasm_v128_load((v128_t *)(haystk + i));
const v128_t eq_fst = wasm_i8x16_eq(fst, blk_fst);
const v128_t eq_lst = wasm_i8x16_eq(lst, blk_lst);
const v128_t cmp = eq_fst & eq_lst;
if (wasm_v128_any_true(cmp)) {
// The terminator may come before the match.
if (sh == SIZE_MAX && !wasm_i8x16_all_true(blk_fst)) break;
// Find the offset of the first one bit (little-endian).
// Each iteration clears that bit, tries again.
for (uint32_t mask = wasm_i8x16_bitmask(cmp); mask; mask &= mask - 1) {
size_t ctz = __builtin_ctz(mask);
// The match may be after the end of the haystack.
if (ctz > sh) return NULL;
// We know the first character matches.
if (!bcmp(haystk + ctz + 1, needle + 1, sn - 1)) {
return haystk + ctz;
}
// Clear the bits corresponding to align (little-endian)
// so we can count trailing zeros.
int mask = wasm_i8x16_bitmask(cmp) >> align << align;
// At least one bit will be set, unless align cleared them.
// Knowing this helps the compiler if it unrolls the loop.
__builtin_assume(mask || align);
// If the mask became zero because of align,
// it's as if we didn't find anything.
if (mask) {
// Find the offset of the first one bit (little-endian).
return (char *)v - s + __builtin_ctz(mask);
}
}
size_t skip = sizeof(v128_t);
if (sh == SIZE_MAX) {
// Have we reached the end of the haystack?
if (!wasm_i8x16_all_true(blk_fst)) return NULL;
} else {
// Apply the bad-character rule to the character to the right
// of the righmost character of the search window.
if (bmbc) skip += bmbc[(unsigned char)haystk[sn - 1 + sizeof(v128_t)]];
// Have we reached the end of the haystack?
if (__builtin_sub_overflow(sh, skip, &sh)) return NULL;
}
haystk += skip;
align = 0;
v++;
}
// Scalar algorithm.
for (size_t j = 0; j <= sh; j++) {
for (size_t i = 0;; i++) {
if (sn == i) return haystk;
if (sh == SIZE_MAX && !haystk[i]) return NULL;
if (needle[i] != haystk[i]) break;
}
haystk++;
}
return NULL;
}
__attribute__((weak))
void *memmem(const void *vh, size_t sh, const void *vn, size_t sn) {
// Return immediately on empty needle.
if (sn == 0) return (void *)vh;
// Return immediately when needle is longer than haystack.
if (sn > sh) return NULL;
// Skip to the first matching character using memchr,
// thereby handling single character needles.
const char *needle = (char *)vn;
const char *haystk = (char *)memchr(vh, *needle, sh);
if (!haystk || sn == 1) return (void *)haystk;
// The haystack got shorter, is the needle now longer than it?
sh -= haystk - (char *)vh;
if (sn > sh) return NULL;
// Is Boyer-Moore's bad-character rule useful?
if (sn < sizeof(v128_t) || sh - sn < sizeof(v128_t)) {
return (void *)__memmem(haystk, sh, needle, sn, NULL);
}
// Compute Boyer-Moore's bad-character shift function.
// Only the last 255 characters of the needle matter for shifts up to 255,
// which is good enough for most needles.
size_t c = sn;
size_t i = 0;
if (c >= 255) {
i = sn - 255;
c = 255;
}
#ifndef _REENTRANT
static
#endif
uint8_t bmbc[256];
memset(bmbc, c, sizeof(bmbc));
for (; i < sn; i++) {
// One less than the usual offset because
// we advance at least one vector at a time.
bmbc[(unsigned char)needle[i]] = sn - i - 1;
}
return (void *)__memmem(haystk, sh, needle, sn, bmbc);
}
__attribute__((weak))
char *strstr(const char *haystk, const char *needle) {
// Return immediately on empty needle.
if (!needle[0]) return (char *)haystk;
// Skip to the first matching character using strchr,
// thereby handling single character needles.
haystk = strchr(haystk, *needle);
if (!haystk || !needle[1]) return (char *)haystk;
return (char *)__memmem(haystk, SIZE_MAX, needle, strlen(needle), NULL);
}
__attribute__((weak))
char *strcasestr(const char *haystk, const char *needle) {
// Return immediately on empty needle.
if (!needle[0]) return (char *)haystk;
// We've handled empty needles.
size_t sn = strlen(needle);
__builtin_assume(sn >= 1);
// Find the farthest character not equal to the first one.
size_t i = sn - 1;
while (i > 0 && needle[0] == needle[i]) i--;
if (i == 0) i = sn - 1;
const v128_t fst = wasm_i8x16_splat(tolower(needle[0]));
const v128_t lst = wasm_i8x16_splat(tolower(needle[i]));
// The last haystk offset for which loading blk_lst is safe.
const char *H =
(char *)(__builtin_wasm_memory_size(0) * PAGESIZE - i - sizeof(v128_t));
while (haystk <= H) {
const v128_t blk_fst = __tolower8x16(wasm_v128_load((v128_t *)(haystk)));
const v128_t blk_lst = __tolower8x16(wasm_v128_load((v128_t *)(haystk + i)));
const v128_t eq_fst = wasm_i8x16_eq(fst, blk_fst);
const v128_t eq_lst = wasm_i8x16_eq(lst, blk_lst);
const v128_t cmp = eq_fst & eq_lst;
if (wasm_v128_any_true(cmp)) {
// The terminator may come before the match.
if (!wasm_i8x16_all_true(blk_fst)) break;
// Find the offset of the first one bit (little-endian).
// Each iteration clears that bit, tries again.
for (uint32_t mask = wasm_i8x16_bitmask(cmp); mask; mask &= mask - 1) {
size_t ctz = __builtin_ctz(mask);
if (!strncasecmp(haystk + ctz + 1, needle + 1, sn - 1)) {
return (char *)haystk + ctz;
}
}
}
// Have we reached the end of the haystack?
if (!wasm_i8x16_all_true(blk_fst)) return NULL;
haystk += sizeof(v128_t);
}
// Scalar algorithm.
for (;;) {
for (size_t i = 0;; i++) {
if (sn == i) return (char *)haystk;
if (!haystk[i]) return NULL;
if (tolower(needle[i]) != tolower(haystk[i])) break;
}
haystk++;
}
return NULL;
}
// Given the above SIMD implementations,
@@ -676,7 +406,8 @@ char *strcasestr(const char *haystk, const char *needle) {
// - strtok
__attribute__((weak))
void *memccpy(void *__restrict dest, const void *__restrict src, int c, size_t n) {
void *memccpy(void *__restrict dest, const void *__restrict src, int c,
size_t n) {
const void *m = memchr(src, c, n);
if (m != NULL) {
n = (char *)m - (char *)src + 1;
@@ -713,7 +444,8 @@ static char *__stpcpy(char *__restrict dest, const char *__restrict src) {
return dest + slen;
}
static char *__stpncpy(char *__restrict dest, const char *__restrict src, size_t n) {
static char *__stpncpy(char *__restrict dest, const char *__restrict src,
size_t n) {
size_t strnlen(const char *s, size_t n);
size_t slen = strnlen(src, n);
memcpy(dest, src, slen);

View File

@@ -1,171 +0,0 @@
#include_next <strings.h> // the system strings.h
#ifndef _WASM_SIMD128_STRINGS_H
#define _WASM_SIMD128_STRINGS_H
#include <ctype.h>
#include <stdint.h>
#include <wasm_simd128.h>
#include <__macro_PAGESIZE.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __wasm_simd128__
#ifdef __OPTIMIZE_SIZE__
// bcmp is the same as memcmp but only compares for equality.
int bcmp(const void *v1, const void *v2, size_t n);
#else // __OPTIMIZE_SIZE__
__attribute__((weak))
int bcmp(const void *v1, const void *v2, size_t n) {
// Scalar algorithm.
if (n < sizeof(v128_t)) {
const unsigned char *u1 = (unsigned char *)v1;
const unsigned char *u2 = (unsigned char *)v2;
while (n--) {
if (*u1 != *u2) return 1;
u1++;
u2++;
}
return 0;
}
// bcmp is allowed to read up to n bytes from each object.
// Unaligned loads handle the case where the objects
// have mismatching alignments.
const v128_t *w1 = (v128_t *)v1;
const v128_t *w2 = (v128_t *)v2;
while (n) {
// Find any single bit difference.
if (wasm_v128_any_true(wasm_v128_load(w1) ^ wasm_v128_load(w2))) {
return 1;
}
// This makes n a multiple of sizeof(v128_t)
// for every iteration except the first.
size_t align = (n - 1) % sizeof(v128_t) + 1;
w1 = (v128_t *)((char *)w1 + align);
w2 = (v128_t *)((char *)w2 + align);
n -= align;
}
return 0;
}
#endif // __OPTIMIZE_SIZE__
static v128_t __tolower8x16(v128_t v) {
__i8x16 i = v;
i = i + wasm_i8x16_splat(INT8_MAX - ('Z'));
i = i > wasm_i8x16_splat(INT8_MAX - ('Z' - 'A' + 1));
i = i & wasm_i8x16_splat('a' - 'A');
return v | i;
}
static int __strcasecmp_s(const char *s1, const char *s2) {
// Scalar algorithm.
const unsigned char *u1 = (unsigned char *)s1;
const unsigned char *u2 = (unsigned char *)s2;
for (;;) {
int c1 = tolower(*u1);
int c2 = tolower(*u2);
if (c1 != c2) return c1 - c2;
if (c1 == 0) break;
u1++;
u2++;
}
return 0;
}
static int __strcasecmp(const char *s1, const char *s2) {
// How many bytes can be read before pointers go out of bounds.
size_t N = __builtin_wasm_memory_size(0) * PAGESIZE - //
(size_t)(s1 > s2 ? s1 : s2);
// Unaligned loads handle the case where the strings
// have mismatching alignments.
const v128_t *w1 = (v128_t *)s1;
const v128_t *w2 = (v128_t *)s2;
for (; N >= sizeof(v128_t); N -= sizeof(v128_t)) {
v128_t v1 = __tolower8x16(wasm_v128_load(w1));
v128_t v2 = __tolower8x16(wasm_v128_load(w2));
// Find any single bit difference.
if (wasm_v128_any_true(v1 ^ v2)) {
// The terminator may come before the difference.
break;
}
// We know all characters are equal.
// If any is a terminator the strings are equal.
if (!wasm_i8x16_all_true(v1)) {
return 0;
}
w1++;
w2++;
}
return __strcasecmp_s((char *)w1, (char *)w2);
}
__attribute__((weak))
int strcasecmp(const char *s1, const char *s2) {
// Skip the vector search when comparing against small literal strings.
if (__builtin_constant_p(strlen(s2)) && strlen(s2) < sizeof(v128_t)) {
return __strcasecmp_s(s1, s2);
}
return __strcasecmp(s1, s2);
}
__attribute__((weak))
int strncasecmp(const char *s1, const char *s2, size_t n) {
// How many bytes can be read before pointers go out of bounds.
size_t N = __builtin_wasm_memory_size(0) * PAGESIZE - //
(size_t)(s1 > s2 ? s1 : s2);
if (n > N) n = N;
// Unaligned loads handle the case where the strings
// have mismatching alignments.
const v128_t *w1 = (v128_t *)s1;
const v128_t *w2 = (v128_t *)s2;
for (; n >= sizeof(v128_t); n -= sizeof(v128_t)) {
v128_t v1 = __tolower8x16(wasm_v128_load(w1));
v128_t v2 = __tolower8x16(wasm_v128_load(w2));
// Find any single bit difference.
if (wasm_v128_any_true(v1 ^ v2)) {
// The terminator may come before the difference.
break;
}
// We know all characters are equal.
// If any is a terminator the strings are equal.
if (!wasm_i8x16_all_true(v1)) {
return 0;
}
w1++;
w2++;
}
// Scalar algorithm.
const unsigned char *u1 = (unsigned char *)w1;
const unsigned char *u2 = (unsigned char *)w2;
while (n--) {
int c1 = tolower(*u1);
int c2 = tolower(*u2);
if (c1 != c2) return c1 - c2;
if (c1 == 0) break;
u1++;
u2++;
}
return 0;
}
#endif // __wasm_simd128__
#ifdef __cplusplus
} // extern "C"
#endif
#endif // _WASM_SIMD128_STRINGS_H

View File

@@ -1,41 +0,0 @@
# Use strcasecmp and strncasecmp.
--- sqlite3.c.orig
+++ sqlite3.c
@@ -35685,35 +35685,15 @@
return sqlite3StrICmp(zLeft, zRight);
}
SQLITE_PRIVATE int sqlite3StrICmp(const char *zLeft, const char *zRight){
- unsigned char *a, *b;
- int c, x;
- a = (unsigned char *)zLeft;
- b = (unsigned char *)zRight;
- for(;;){
- c = *a;
- x = *b;
- if( c==x ){
- if( c==0 ) break;
- }else{
- c = (int)UpperToLower[c] - (int)UpperToLower[x];
- if( c ) break;
- }
- a++;
- b++;
- }
- return c;
+ return strcasecmp(zLeft, zRight);
}
SQLITE_API int sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){
- register unsigned char *a, *b;
if( zLeft==0 ){
return zRight ? -1 : 0;
}else if( zRight==0 ){
return 1;
}
- a = (unsigned char *)zLeft;
- b = (unsigned char *)zRight;
- while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; }
- return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b];
+ return strncasecmp(zLeft, zRight, N);
}
/*

View File

@@ -1,7 +1,7 @@
# Remove VFS registration. Go handles it.
--- sqlite3.c.orig
+++ sqlite3.c
@@ -26726,7 +26726,7 @@
@@ -26883,7 +26883,7 @@
sqlite3_free(p);
return sqlite3_os_init();
}
@@ -10,7 +10,7 @@
/*
** The list of all registered VFS implementations.
*/
@@ -26823,7 +26823,7 @@
@@ -26980,7 +26980,7 @@
sqlite3_mutex_leave(mutex);
return SQLITE_OK;
}

View File

@@ -32,10 +32,3 @@ func (FS) Stat(name string) (fs.FileInfo, error) {
func (FS) ReadFile(name string) ([]byte, error) {
return os.ReadFile(name)
}
// OpenFile behaves the same as [os.OpenFile].
//
// Deprecated: use os.OpenFile instead.
func OpenFile(name string, flag int, perm fs.FileMode) (*os.File, error) {
return os.OpenFile(name, flag, perm)
}

View File

@@ -23,7 +23,7 @@ POSIX advisory locks,
which SQLite uses on [Unix](https://github.com/sqlite/sqlite/blob/5d60f4/src/os_unix.c#L13-L14),
are [broken by design](https://github.com/sqlite/sqlite/blob/5d60f4/src/os_unix.c#L1074-L1162).
Instead, on Linux and macOS, this package uses
[OFD locks](https://www.gnu.org/software/libc/manual/html_node/Open-File-Description-Locks.html)
[OFD locks](https://gnu.org/software/libc/manual/html_node/Open-File-Description-Locks.html)
to synchronize access to database files.
This package can also use

Binary file not shown.