Fix go mod
This commit is contained in:
parent
e323971011
commit
c8007cf32f
|
@ -105,7 +105,7 @@ steps:
|
||||||
TEST_MYSQL_PASSWORD:
|
TEST_MYSQL_PASSWORD:
|
||||||
commands:
|
commands:
|
||||||
- make test-mysql
|
- make test-mysql
|
||||||
- TEST_CACHE_ENABLE=true make test-mysql
|
- TEST_CACHE_CONNSTR=memory make test-mysql
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mysql
|
- TEST_QUOTE_POLICY=reserved make test-mysql
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -237,7 +237,7 @@ steps:
|
||||||
TEST_MSSQL_PASSWORD: "yourStrong(!)Password"
|
TEST_MSSQL_PASSWORD: "yourStrong(!)Password"
|
||||||
commands:
|
commands:
|
||||||
- make test-mssql
|
- make test-mssql
|
||||||
- TEST_CACHE_ENABLE=true make test-mssql
|
- TEST_CACHE_CONNSTR=memory make test-mssql
|
||||||
- TEST_QUOTE_POLICY=reserved make test-mssql
|
- TEST_QUOTE_POLICY=reserved make test-mssql
|
||||||
- TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql
|
- TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ steps:
|
||||||
TEST_TIDB_PASSWORD:
|
TEST_TIDB_PASSWORD:
|
||||||
commands:
|
commands:
|
||||||
- make test-tidb
|
- make test-tidb
|
||||||
- TEST_CACHE_ENABLE=true make test-tidb
|
- TEST_CACHE_CONNSTR=memory make test-tidb
|
||||||
- TEST_QUOTE_POLICY=reserved make test-tidb
|
- TEST_QUOTE_POLICY=reserved make test-tidb
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
@ -61,7 +61,7 @@ TEST_PGSQL_HOST= TEST_PGSQL_SCHEMA= TEST_PGSQL_DBNAME= TEST_PGSQL_USERNAME= TEST
|
||||||
TEST_TIDB_HOST= TEST_TIDB_DBNAME= TEST_TIDB_USERNAME= TEST_TIDB_PASSWORD= make test-tidb
|
TEST_TIDB_HOST= TEST_TIDB_DBNAME= TEST_TIDB_USERNAME= TEST_TIDB_PASSWORD= make test-tidb
|
||||||
```
|
```
|
||||||
|
|
||||||
And if your branch is related with cache, you could also enable it via `TEST_CACHE_ENABLE=true`.
|
And if your branch is related with cache, you could also enable it via `TEST_CACHE_CONNSTR=memory`.
|
||||||
|
|
||||||
### Patch review
|
### Patch review
|
||||||
|
|
||||||
|
|
58
Makefile
58
Makefile
|
@ -43,7 +43,7 @@ TEST_TIDB_DBNAME ?= xorm_test
|
||||||
TEST_TIDB_USERNAME ?= root
|
TEST_TIDB_USERNAME ?= root
|
||||||
TEST_TIDB_PASSWORD ?=
|
TEST_TIDB_PASSWORD ?=
|
||||||
|
|
||||||
TEST_CACHE_ENABLE ?= false
|
TEST_CACHE_CONNSTR ?=
|
||||||
TEST_QUOTE_POLICY ?= always
|
TEST_QUOTE_POLICY ?= always
|
||||||
|
|
||||||
.PHONY: all
|
.PHONY: all
|
||||||
|
@ -136,67 +136,67 @@ test: go-check
|
||||||
|
|
||||||
.PNONY: test-cockroach
|
.PNONY: test-cockroach
|
||||||
test-cockroach: go-check
|
test-cockroach: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_CONNSTR) \
|
||||||
-conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \
|
-conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \
|
||||||
-ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PHONY: test-cockroach\#%
|
.PHONY: test-cockroach\#%
|
||||||
test-cockroach\#%: go-check
|
test-cockroach\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_CONNSTR) \
|
||||||
-conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \
|
-conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \
|
||||||
-ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
.PNONY: test-mssql
|
.PNONY: test-mssql
|
||||||
test-mssql: go-check
|
test-mssql: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mssql -cache=$(TEST_CACHE_CONNSTR) -quote=$(TEST_QUOTE_POLICY) \
|
||||||
-conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \
|
-conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \
|
||||||
-default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \
|
-default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \
|
||||||
-do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \
|
-do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \
|
||||||
-coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PNONY: test-mssql\#%
|
.PNONY: test-mssql\#%
|
||||||
test-mssql\#%: go-check
|
test-mssql\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mssql -cache=$(TEST_CACHE_CONNSTR) -quote=$(TEST_QUOTE_POLICY) \
|
||||||
-conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \
|
-conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \
|
||||||
-default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \
|
-default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \
|
||||||
-do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \
|
-do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \
|
||||||
-coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
.PNONY: test-mymysql
|
.PNONY: test-mymysql
|
||||||
test-mymysql: go-check
|
test-mymysql: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mymysql -cache=$(TEST_CACHE_CONNSTR) -quote=$(TEST_QUOTE_POLICY) \
|
||||||
-conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \
|
-conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \
|
||||||
-coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PNONY: test-mymysql\#%
|
.PNONY: test-mymysql\#%
|
||||||
test-mymysql\#%: go-check
|
test-mymysql\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mymysql -cache=$(TEST_CACHE_CONNSTR) -quote=$(TEST_QUOTE_POLICY) \
|
||||||
-conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \
|
-conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \
|
||||||
-coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
.PNONY: test-mysql
|
.PNONY: test-mysql
|
||||||
test-mysql: go-check
|
test-mysql: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_CONNSTR) -quote=$(TEST_QUOTE_POLICY) \
|
||||||
-conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \
|
-conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \
|
||||||
-coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PHONY: test-mysql\#%
|
.PHONY: test-mysql\#%
|
||||||
test-mysql\#%: go-check
|
test-mysql\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_CONNSTR) -quote=$(TEST_QUOTE_POLICY) \
|
||||||
-conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \
|
-conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \
|
||||||
-coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
.PNONY: test-postgres
|
.PNONY: test-postgres
|
||||||
test-postgres: go-check
|
test-postgres: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_CONNSTR) \
|
||||||
-conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \
|
-conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PHONY: test-postgres\#%
|
.PHONY: test-postgres\#%
|
||||||
test-postgres\#%: go-check
|
test-postgres\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_CONNSTR) \
|
||||||
-conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \
|
-conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
.PHONY: test-sqlite3
|
.PHONY: test-sqlite3
|
||||||
test-sqlite3: go-check
|
test-sqlite3: go-check
|
||||||
|
@ -216,30 +216,30 @@ test-sqlite3\#%: go-check
|
||||||
.PHONY: test-sqlite
|
.PHONY: test-sqlite
|
||||||
test-sqlite: go-check
|
test-sqlite: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PHONY: test-sqlite-schema
|
.PHONY: test-sqlite-schema
|
||||||
test-sqlite-schema: go-check
|
test-sqlite-schema: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PHONY: test-sqlite\#%
|
.PHONY: test-sqlite\#%
|
||||||
test-sqlite\#%: go-check
|
test-sqlite\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -cache=$(TEST_CACHE_CONNSTR) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
|
|
||||||
.PNONY: test-tidb
|
.PNONY: test-tidb
|
||||||
test-tidb: go-check
|
test-tidb: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_CONNSTR) -ignore_select_update=true \
|
||||||
-conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \
|
-conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic -timeout=20m
|
||||||
|
|
||||||
.PHONY: test-tidb\#%
|
.PHONY: test-tidb\#%
|
||||||
test-tidb\#%: go-check
|
test-tidb\#%: go-check
|
||||||
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \
|
$(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_CONNSTR) -ignore_select_update=true \
|
||||||
-conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \
|
-conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \
|
||||||
-quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
|
-quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_CONNSTR).coverage.out -covermode=atomic
|
||||||
|
|
||||||
.PHONY: vet
|
.PHONY: vet
|
||||||
vet:
|
vet:
|
||||||
|
|
|
@ -47,17 +47,17 @@ type CacheStore interface {
|
||||||
// Cacher is an interface to provide cache
|
// Cacher is an interface to provide cache
|
||||||
// id format : u-<pk1>-<pk2>...
|
// id format : u-<pk1>-<pk2>...
|
||||||
type Cacher interface {
|
type Cacher interface {
|
||||||
GetIds(tableName, sql string) interface{}
|
GetIDs(tableName, sql string) interface{}
|
||||||
GetBean(tableName string, id string) interface{}
|
GetBean(tableName string, id string) interface{}
|
||||||
PutIds(tableName, sql string, ids interface{})
|
PutIDs(tableName, sql string, ids interface{})
|
||||||
PutBean(tableName string, id string, obj interface{})
|
PutBean(tableName string, id string, obj interface{})
|
||||||
DelIds(tableName, sql string)
|
DelIDs(tableName, sql string)
|
||||||
DelBean(tableName string, id string)
|
DelBean(tableName string, id string)
|
||||||
ClearIds(tableName string)
|
ClearIDs(tableName string)
|
||||||
ClearBeans(tableName string)
|
ClearBeans(tableName string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeIds(ids []schemas.PK) (string, error) {
|
func encodeIDs(ids []schemas.PK) (string, error) {
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
enc := gob.NewEncoder(buf)
|
enc := gob.NewEncoder(buf)
|
||||||
err := enc.Encode(ids)
|
err := enc.Encode(ids)
|
||||||
|
@ -65,7 +65,7 @@ func encodeIds(ids []schemas.PK) (string, error) {
|
||||||
return buf.String(), err
|
return buf.String(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeIds(s string) ([]schemas.PK, error) {
|
func decodeIDs(s string) ([]schemas.PK, error) {
|
||||||
pks := make([]schemas.PK, 0)
|
pks := make([]schemas.PK, 0)
|
||||||
|
|
||||||
dec := gob.NewDecoder(strings.NewReader(s))
|
dec := gob.NewDecoder(strings.NewReader(s))
|
||||||
|
@ -74,26 +74,26 @@ func decodeIds(s string) ([]schemas.PK, error) {
|
||||||
return pks, err
|
return pks, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCacheSql returns cacher PKs via SQL
|
// GetCacheSQL returns cacher PKs via SQL
|
||||||
func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) {
|
func GetCacheSQL(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) {
|
||||||
bytes := m.GetIds(tableName, GenSqlKey(sql, args))
|
bytes := m.GetIDs(tableName, GenSQLKey(sql, args))
|
||||||
if bytes == nil {
|
if bytes == nil {
|
||||||
return nil, errors.New("Not Exist")
|
return nil, errors.New("Not Exist")
|
||||||
}
|
}
|
||||||
return decodeIds(bytes.(string))
|
return decodeIDs(bytes.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutCacheSql puts cacher SQL and PKs
|
// PutCacheSQL puts cacher SQL and PKs
|
||||||
func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error {
|
func PutCacheSQL(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error {
|
||||||
bytes, err := encodeIds(ids)
|
bytes, err := encodeIDs(ids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.PutIds(tableName, GenSqlKey(sql, args), bytes)
|
m.PutIDs(tableName, GenSQLKey(sql, args), bytes)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenSqlKey generates cache key
|
// GenSQLKey generates cache key
|
||||||
func GenSqlKey(sql string, args interface{}) string {
|
func GenSQLKey(sql string, args interface{}) string {
|
||||||
return fmt.Sprintf("%v-%v", sql, args)
|
return fmt.Sprintf("%v-%v", sql, args)
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ func (m *LRUCacher) GC() {
|
||||||
removedNum++
|
removedNum++
|
||||||
next := e.Next()
|
next := e.Next()
|
||||||
node := e.Value.(*sqlNode)
|
node := e.Value.(*sqlNode)
|
||||||
m.delIds(node.tbName, node.sql)
|
m.delIDs(node.tbName, node.sql)
|
||||||
e = next
|
e = next
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
|
@ -83,7 +83,7 @@ func (m *LRUCacher) GC() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetIds returns all bean's ids according to sql and parameter from cache
|
// GetIds returns all bean's ids according to sql and parameter from cache
|
||||||
func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
|
func (m *LRUCacher) GetIDs(tableName, sql string) interface{} {
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
defer m.mutex.Unlock()
|
defer m.mutex.Unlock()
|
||||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||||
|
@ -97,7 +97,7 @@ func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
|
||||||
lastTime := el.Value.(*sqlNode).lastVisit
|
lastTime := el.Value.(*sqlNode).lastVisit
|
||||||
// if expired, remove the node and return nil
|
// if expired, remove the node and return nil
|
||||||
if time.Now().Sub(lastTime) > m.Expired {
|
if time.Now().Sub(lastTime) > m.Expired {
|
||||||
m.delIds(tableName, sql)
|
m.delIDs(tableName, sql)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
m.sqlList.MoveToBack(el)
|
m.sqlList.MoveToBack(el)
|
||||||
|
@ -106,7 +106,7 @@ func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
m.delIds(tableName, sql)
|
m.delIDs(tableName, sql)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,8 +140,8 @@ func (m *LRUCacher) GetBean(tableName string, id string) interface{} {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// clearIds clears all sql-ids mapping on table tableName from cache
|
// clearIDs clears all sql-ids mapping on table tableName from cache
|
||||||
func (m *LRUCacher) clearIds(tableName string) {
|
func (m *LRUCacher) clearIDs(tableName string) {
|
||||||
if tis, ok := m.sqlIndex[tableName]; ok {
|
if tis, ok := m.sqlIndex[tableName]; ok {
|
||||||
for sql, v := range tis {
|
for sql, v := range tis {
|
||||||
m.sqlList.Remove(v)
|
m.sqlList.Remove(v)
|
||||||
|
@ -151,10 +151,10 @@ func (m *LRUCacher) clearIds(tableName string) {
|
||||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearIds clears all sql-ids mapping on table tableName from cache
|
// ClearIDs clears all sql-ids mapping on table tableName from cache
|
||||||
func (m *LRUCacher) ClearIds(tableName string) {
|
func (m *LRUCacher) ClearIDs(tableName string) {
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
m.clearIds(tableName)
|
m.clearIDs(tableName)
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,8 +176,8 @@ func (m *LRUCacher) ClearBeans(tableName string) {
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutIds pus ids into table
|
// PutIDs pus ids into table
|
||||||
func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
|
func (m *LRUCacher) PutIDs(tableName, sql string, ids interface{}) {
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||||
|
@ -192,7 +192,7 @@ func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
|
||||||
if m.sqlList.Len() > m.MaxElementSize {
|
if m.sqlList.Len() > m.MaxElementSize {
|
||||||
e := m.sqlList.Front()
|
e := m.sqlList.Front()
|
||||||
node := e.Value.(*sqlNode)
|
node := e.Value.(*sqlNode)
|
||||||
m.delIds(node.tbName, node.sql)
|
m.delIDs(node.tbName, node.sql)
|
||||||
}
|
}
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) {
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *LRUCacher) delIds(tableName, sql string) {
|
func (m *LRUCacher) delIDs(tableName, sql string) {
|
||||||
if _, ok := m.sqlIndex[tableName]; ok {
|
if _, ok := m.sqlIndex[tableName]; ok {
|
||||||
if el, ok := m.sqlIndex[tableName][sql]; ok {
|
if el, ok := m.sqlIndex[tableName][sql]; ok {
|
||||||
delete(m.sqlIndex[tableName], sql)
|
delete(m.sqlIndex[tableName], sql)
|
||||||
|
@ -229,10 +229,10 @@ func (m *LRUCacher) delIds(tableName, sql string) {
|
||||||
m.store.Del(sql)
|
m.store.Del(sql)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DelIds deletes ids
|
// DelIDs deletes ids
|
||||||
func (m *LRUCacher) DelIds(tableName, sql string) {
|
func (m *LRUCacher) DelIDs(tableName, sql string) {
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
m.delIds(tableName, sql)
|
m.delIDs(tableName, sql)
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ func (m *LRUCacher) delBean(tableName string, id string) {
|
||||||
if el, ok := m.idIndex[tableName][id]; ok {
|
if el, ok := m.idIndex[tableName][id]; ok {
|
||||||
delete(m.idIndex[tableName], id)
|
delete(m.idIndex[tableName], id)
|
||||||
m.idList.Remove(el)
|
m.idList.Remove(el)
|
||||||
m.clearIds(tableName)
|
m.clearIDs(tableName)
|
||||||
}
|
}
|
||||||
m.store.Del(tid)
|
m.store.Del(tid)
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,12 +29,12 @@ func TestLRUCache(t *testing.T) {
|
||||||
sid, err := pk.ToString()
|
sid, err := pk.ToString()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
cacher.PutIds(tableName, "select * from cache_object1", sid)
|
cacher.PutIDs(tableName, "select * from cache_object1", sid)
|
||||||
ids := cacher.GetIds(tableName, "select * from cache_object1")
|
ids := cacher.GetIDs(tableName, "select * from cache_object1")
|
||||||
assert.EqualValues(t, sid, ids)
|
assert.EqualValues(t, sid, ids)
|
||||||
|
|
||||||
cacher.ClearIds(tableName)
|
cacher.ClearIDs(tableName)
|
||||||
ids2 := cacher.GetIds(tableName, "select * from cache_object1")
|
ids2 := cacher.GetIDs(tableName, "select * from cache_object1")
|
||||||
assert.Nil(t, ids2)
|
assert.Nil(t, ids2)
|
||||||
|
|
||||||
obj2 := cacher.GetBean(tableName, sid)
|
obj2 := cacher.GetBean(tableName, sid)
|
||||||
|
|
|
@ -0,0 +1,281 @@
|
||||||
|
package caches
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/v7"
|
||||||
|
|
||||||
|
"xorm.io/xorm/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultRedisExpiration defaults expiration, default will not expiration
|
||||||
|
DefaultRedisExpiration = time.Duration(0)
|
||||||
|
foreverExpiration = time.Duration(-1)
|
||||||
|
loggerPrefix = "[redis_cacher]"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Cacher = &RedisCacher{}
|
||||||
|
|
||||||
|
// RedisCacher wraps the Redis client to meet the Cache interface.
|
||||||
|
type RedisCacher struct {
|
||||||
|
client *redis.Client
|
||||||
|
defaultExpiration time.Duration
|
||||||
|
logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRedisCacher creates a Redis Cacher, host as IP endpoint, i.e., localhost:6379, provide empty string or nil if Redis server doesn't
|
||||||
|
// require AUTH command, defaultExpiration sets the expire duration for a key to live. Until redigo supports
|
||||||
|
// sharding/clustering, only one host will be in hostList
|
||||||
|
//
|
||||||
|
// engine.SetDefaultCacher(caches.NewRedisCacher("localhost:6379", "", caches.DefaultRedisExpiration, engine.Logger()))
|
||||||
|
//
|
||||||
|
// or set MapCacher
|
||||||
|
//
|
||||||
|
// engine.MapCacher(&user, caches.NewRedisCacher("localhost:6379", "", caches.DefaultRedisExpiration, engine.Logger()))
|
||||||
|
//
|
||||||
|
func NewRedisCacher(host string, password string, dbIdx int, defaultExpiration time.Duration, logger log.Logger) *RedisCacher {
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Addr: host,
|
||||||
|
Password: password, // no password set
|
||||||
|
DB: dbIdx, // use default DB
|
||||||
|
})
|
||||||
|
|
||||||
|
return &RedisCacher{
|
||||||
|
client: client,
|
||||||
|
defaultExpiration: defaultExpiration,
|
||||||
|
logger:logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) logErrf(format string, contents ...interface{}) {
|
||||||
|
if c.logger != nil {
|
||||||
|
c.logger.Errorf(fmt.Sprintf("%s %s", loggerPrefix, format), contents...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) logDebugf(format string, contents ...interface{}) {
|
||||||
|
if c.logger != nil {
|
||||||
|
c.logger.Debugf(fmt.Sprintf("%s %s", loggerPrefix, format), contents...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) getBeanKey(tableName string, id string) string {
|
||||||
|
return fmt.Sprintf("xorm:bean:%s:%s", tableName, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) getSQLKey(tableName string, sql string) string {
|
||||||
|
// hash sql to minimize key length
|
||||||
|
crc := crc32.ChecksumIEEE([]byte(sql))
|
||||||
|
return fmt.Sprintf("xorm:sql:%s:%d", tableName, crc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush deletes all xorm cached objects
|
||||||
|
func (c *RedisCacher) Flush() error {
|
||||||
|
// conn := c.pool.Get()
|
||||||
|
// defer conn.Close()
|
||||||
|
// _, err := conn.Do("FLUSHALL")
|
||||||
|
// return err
|
||||||
|
return c.delObject("xorm:*")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) getObject(key string) interface{} {
|
||||||
|
bs, err := c.client.Get(key).Bytes()
|
||||||
|
if err != nil {
|
||||||
|
c.logErrf("redis.Bytes failed: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := c.deserialize(bs)
|
||||||
|
if err != nil {
|
||||||
|
c.logErrf("deserialize: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIDs implemented Cacher
|
||||||
|
func (c *RedisCacher) GetIDs(tableName, sql string) interface{} {
|
||||||
|
sqlKey := c.getSQLKey(tableName, sql)
|
||||||
|
c.logDebugf(" GetIds|tableName:%s|sql:%s|key:%s", tableName, sql, sqlKey)
|
||||||
|
return c.getObject(sqlKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBean implemented Cacher
|
||||||
|
func (c *RedisCacher) GetBean(tableName string, id string) interface{} {
|
||||||
|
beanKey := c.getBeanKey(tableName, id)
|
||||||
|
c.logDebugf("[xorm/redis_cacher] GetBean|tableName:%s|id:%s|key:%s", tableName, id, beanKey)
|
||||||
|
return c.getObject(beanKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) putObject(key string, value interface{}) {
|
||||||
|
c.set(key, value, c.defaultExpiration)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutIDs implemented Cacher
|
||||||
|
func (c *RedisCacher) PutIDs(tableName, sql string, ids interface{}) {
|
||||||
|
sqlKey := c.getSQLKey(tableName, sql)
|
||||||
|
c.logDebugf("PutIds|tableName:%s|sql:%s|key:%s|obj:%s|type:%v", tableName, sql, sqlKey, ids, reflect.TypeOf(ids))
|
||||||
|
c.putObject(sqlKey, ids)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBean implemented Cacher
|
||||||
|
func (c *RedisCacher) PutBean(tableName string, id string, obj interface{}) {
|
||||||
|
beanKey := c.getBeanKey(tableName, id)
|
||||||
|
c.logDebugf("PutBean|tableName:%s|id:%s|key:%s|type:%v", tableName, id, beanKey, reflect.TypeOf(obj))
|
||||||
|
c.putObject(beanKey, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) delObject(key string) error {
|
||||||
|
c.logDebugf("delObject key:[%s]", key)
|
||||||
|
|
||||||
|
r, err := c.client.Do("EXISTS", key).Result()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if exist, ok := r.(bool); ok && !exist {
|
||||||
|
c.logErrf("delObject key:[%s] err: %v", key, ErrCacheMiss)
|
||||||
|
return ErrCacheMiss
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.client.Do("DEL", key).Result()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) delObjects(key string) error {
|
||||||
|
c.logDebugf("delObjects key:[%s]", key)
|
||||||
|
|
||||||
|
keys, err := c.client.Do("KEYS", key).Result()
|
||||||
|
c.logDebugf("delObjects keys: %v", keys)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys.([]interface{}) {
|
||||||
|
_, err = c.client.Do("DEL", key).Result()
|
||||||
|
if err != nil {
|
||||||
|
c.logErrf("delObje")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelIDs implemented Cacher
|
||||||
|
func (c *RedisCacher) DelIDs(tableName, sql string) {
|
||||||
|
c.delObject(c.getSQLKey(tableName, sql))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelBean implemented Cacher
|
||||||
|
func (c *RedisCacher) DelBean(tableName string, id string) {
|
||||||
|
c.delObject(c.getBeanKey(tableName, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIDs implemented Cacher
|
||||||
|
func (c *RedisCacher) ClearIDs(tableName string) {
|
||||||
|
c.delObjects(fmt.Sprintf("xorm:sql:%s:*", tableName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearBeans implemented Cacher
|
||||||
|
func (c *RedisCacher) ClearBeans(tableName string) {
|
||||||
|
c.delObjects(c.getBeanKey(tableName, "*"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) set(key string, value interface{}, expires time.Duration) error {
|
||||||
|
switch expires {
|
||||||
|
case DefaultRedisExpiration:
|
||||||
|
expires = c.defaultExpiration
|
||||||
|
case foreverExpiration:
|
||||||
|
expires = time.Duration(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := c.serialize(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if expires > 0 {
|
||||||
|
_, err = c.client.Do("SETEX", key, int32(expires/time.Second), b).Result()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.client.Do("SET", key, b).Result()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) serialize(value interface{}) ([]byte, error) {
|
||||||
|
err := c.registerGobConcreteType(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if reflect.TypeOf(value).Kind() == reflect.Struct {
|
||||||
|
return nil, fmt.Errorf("serialize func only take pointer of a struct")
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
encoder := gob.NewEncoder(&b)
|
||||||
|
|
||||||
|
c.logDebugf("serialize type:%v", reflect.TypeOf(value))
|
||||||
|
err = encoder.Encode(&value)
|
||||||
|
if err != nil {
|
||||||
|
c.logErrf("gob encoding '%s' failed: %s|value:%v", value, err, value)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) deserialize(byt []byte) (ptr interface{}, err error) {
|
||||||
|
b := bytes.NewBuffer(byt)
|
||||||
|
decoder := gob.NewDecoder(b)
|
||||||
|
|
||||||
|
var p interface{}
|
||||||
|
err = decoder.Decode(&p)
|
||||||
|
if err != nil {
|
||||||
|
c.logErrf("decode failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v := reflect.ValueOf(p)
|
||||||
|
c.logDebugf("deserialize type:%v", v.Type())
|
||||||
|
if v.Kind() == reflect.Struct {
|
||||||
|
|
||||||
|
var pp interface{} = &p
|
||||||
|
datas := reflect.ValueOf(pp).Elem().InterfaceData()
|
||||||
|
|
||||||
|
sp := reflect.NewAt(v.Type(),
|
||||||
|
unsafe.Pointer(datas[1])).Interface()
|
||||||
|
ptr = sp
|
||||||
|
vv := reflect.ValueOf(ptr)
|
||||||
|
c.logDebugf("deserialize convert ptr type:%v | CanAddr:%t", vv.Type(), vv.CanAddr())
|
||||||
|
} else {
|
||||||
|
ptr = p
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RedisCacher) registerGobConcreteType(value interface{}) error {
|
||||||
|
t := reflect.TypeOf(value)
|
||||||
|
|
||||||
|
c.logDebugf("registerGobConcreteType:%v", t)
|
||||||
|
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
v := reflect.ValueOf(value)
|
||||||
|
i := v.Elem().Interface()
|
||||||
|
gob.Register(&i)
|
||||||
|
case reflect.Struct, reflect.Map, reflect.Slice:
|
||||||
|
gob.Register(value)
|
||||||
|
case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||||
|
// do nothing since already registered known type
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unhandled type: %v", t)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -892,7 +892,7 @@ func (engine *Engine) ClearCacheBean(bean interface{}, id string) error {
|
||||||
tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean)
|
tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean)
|
||||||
cacher := engine.GetCacher(tableName)
|
cacher := engine.GetCacher(tableName)
|
||||||
if cacher != nil {
|
if cacher != nil {
|
||||||
cacher.ClearIds(tableName)
|
cacher.ClearIDs(tableName)
|
||||||
cacher.DelBean(tableName, id)
|
cacher.DelBean(tableName, id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -904,7 +904,7 @@ func (engine *Engine) ClearCache(beans ...interface{}) error {
|
||||||
tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean)
|
tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean)
|
||||||
cacher := engine.GetCacher(tableName)
|
cacher := engine.GetCacher(tableName)
|
||||||
if cacher != nil {
|
if cacher != nil {
|
||||||
cacher.ClearIds(tableName)
|
cacher.ClearIDs(tableName)
|
||||||
cacher.ClearBeans(tableName)
|
cacher.ClearBeans(tableName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -6,6 +6,7 @@ require (
|
||||||
github.com/denisenkom/go-mssqldb v0.10.0
|
github.com/denisenkom/go-mssqldb v0.10.0
|
||||||
github.com/go-sql-driver/mysql v1.6.0
|
github.com/go-sql-driver/mysql v1.6.0
|
||||||
github.com/goccy/go-json v0.7.4
|
github.com/goccy/go-json v0.7.4
|
||||||
|
github.com/go-redis/redis/v7 v7.2.0
|
||||||
github.com/json-iterator/go v1.1.11
|
github.com/json-iterator/go v1.1.11
|
||||||
github.com/lib/pq v1.10.2
|
github.com/lib/pq v1.10.2
|
||||||
github.com/mattn/go-sqlite3 v1.14.8
|
github.com/mattn/go-sqlite3 v1.14.8
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -21,6 +21,8 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZ
|
||||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
|
||||||
|
@ -49,6 +51,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||||
|
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -30,7 +31,7 @@ var (
|
||||||
showSQL = flag.Bool("show_sql", true, "show generated SQLs")
|
showSQL = flag.Bool("show_sql", true, "show generated SQLs")
|
||||||
ptrConnStr = flag.String("conn_str", "./test.db?cache=shared&mode=rwc", "test database connection string")
|
ptrConnStr = flag.String("conn_str", "./test.db?cache=shared&mode=rwc", "test database connection string")
|
||||||
mapType = flag.String("map_type", "snake", "indicate the name mapping")
|
mapType = flag.String("map_type", "snake", "indicate the name mapping")
|
||||||
cacheFlag = flag.Bool("cache", false, "if enable cache")
|
cacheConnstr = flag.String("cache", "none", "Enable cache or not, could be <empty>, memory, leveldb://<dirpath> or redis://:<passwd>@host")
|
||||||
cluster = flag.Bool("cluster", false, "if this is a cluster")
|
cluster = flag.Bool("cluster", false, "if this is a cluster")
|
||||||
splitter = flag.String("splitter", ";", "the splitter on connstr for cluster")
|
splitter = flag.String("splitter", ";", "the splitter on connstr for cluster")
|
||||||
schema = flag.String("schema", "", "specify the schema")
|
schema = flag.String("schema", "", "specify the schema")
|
||||||
|
@ -125,9 +126,39 @@ func createEngine(dbType, connStr string) error {
|
||||||
}
|
}
|
||||||
testEngine.ShowSQL(*showSQL)
|
testEngine.ShowSQL(*showSQL)
|
||||||
testEngine.SetLogLevel(log.LOG_DEBUG)
|
testEngine.SetLogLevel(log.LOG_DEBUG)
|
||||||
if *cacheFlag {
|
|
||||||
cacher := caches.NewLRUCacher(caches.NewMemoryStore(), 100000)
|
if cacheConnstr != nil {
|
||||||
testEngine.SetDefaultCacher(cacher)
|
switch {
|
||||||
|
case *cacheConnstr == "memory":
|
||||||
|
cacher := caches.NewLRUCacher(caches.NewMemoryStore(), 100000)
|
||||||
|
testEngine.SetDefaultCacher(cacher)
|
||||||
|
case strings.HasPrefix(*cacheConnstr, "leveldb"):
|
||||||
|
p := (*cacheConnstr)[10:]
|
||||||
|
store, err := caches.NewLevelDBStore(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cacher := caches.NewLRUCacher(store, 100000)
|
||||||
|
testEngine.SetDefaultCacher(cacher)
|
||||||
|
case strings.HasPrefix(*cacheConnstr, "redis"):
|
||||||
|
u, err := url.Parse(*cacheConnstr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var passwd string
|
||||||
|
if u.User != nil {
|
||||||
|
passwd, _ = u.User.Password()
|
||||||
|
}
|
||||||
|
var dbIdx int
|
||||||
|
p := strings.TrimPrefix(u.Path, "/")
|
||||||
|
if p != "" {
|
||||||
|
dbIdx, _ = strconv.Atoi(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := testEngine.(*xorm.Engine).Logger().(log.Logger)
|
||||||
|
cacher := caches.NewRedisCacher(u.Host, passwd, dbIdx, caches.DefaultRedisExpiration, logger)
|
||||||
|
testEngine.SetDefaultCacher(cacher)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(*mapType) > 0 {
|
if len(*mapType) > 0 {
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr stri
|
||||||
|
|
||||||
cacher := session.engine.cacherMgr.GetCacher(tableName)
|
cacher := session.engine.cacherMgr.GetCacher(tableName)
|
||||||
pkColumns := table.PKColumns()
|
pkColumns := table.PKColumns()
|
||||||
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args)
|
ids, err := caches.GetCacheSQL(cacher, tableName, newsql, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resultsSlice, err := session.queryBytes(newsql, args...)
|
resultsSlice, err := session.queryBytes(newsql, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -78,7 +78,7 @@ func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr stri
|
||||||
cacher.DelBean(tableName, sid)
|
cacher.DelBean(tableName, sid)
|
||||||
}
|
}
|
||||||
session.engine.logger.Debugf("[cache] clear cache table: %v", tableName)
|
session.engine.logger.Debugf("[cache] clear cache table: %v", tableName)
|
||||||
cacher.ClearIds(tableName)
|
cacher.ClearIDs(tableName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -311,7 +311,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in
|
||||||
}
|
}
|
||||||
|
|
||||||
table := session.statement.RefTable
|
table := session.statement.RefTable
|
||||||
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args)
|
ids, err := caches.GetCacheSQL(cacher, tableName, newsql, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows, err := session.queryRows(newsql, args...)
|
rows, err := session.queryRows(newsql, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -347,7 +347,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in
|
||||||
}
|
}
|
||||||
|
|
||||||
session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args)
|
session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args)
|
||||||
err = caches.PutCacheSql(cacher, ids, tableName, newsql, args)
|
err = caches.PutCacheSQL(cacher, ids, tableName, newsql, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -300,7 +300,7 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf
|
||||||
|
|
||||||
session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args)
|
session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args)
|
||||||
table := session.statement.RefTable
|
table := session.statement.RefTable
|
||||||
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args)
|
ids, err := caches.GetCacheSQL(cacher, tableName, newsql, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var res = make([]string, len(table.PrimaryKeys))
|
var res = make([]string, len(table.PrimaryKeys))
|
||||||
rows, err := session.NoCache().queryRows(newsql, args...)
|
rows, err := session.NoCache().queryRows(newsql, args...)
|
||||||
|
@ -338,7 +338,7 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf
|
||||||
|
|
||||||
ids = []schemas.PK{pk}
|
ids = []schemas.PK{pk}
|
||||||
session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids)
|
session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids)
|
||||||
err = caches.PutCacheSql(cacher, ids, tableName, newsql, args)
|
err = caches.PutCacheSQL(cacher, ids, tableName, newsql, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -445,7 +445,7 @@ func (session *Session) cacheInsert(table string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
session.engine.logger.Debugf("[cache] clear SQL: %v", table)
|
session.engine.logger.Debugf("[cache] clear SQL: %v", table)
|
||||||
cacher.ClearIds(table)
|
cacher.ClearIDs(table)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr stri
|
||||||
|
|
||||||
cacher := session.engine.GetCacher(tableName)
|
cacher := session.engine.GetCacher(tableName)
|
||||||
session.engine.logger.Debugf("[cache] get cache sql: %v, %v", newsql, args[nStart:])
|
session.engine.logger.Debugf("[cache] get cache sql: %v, %v", newsql, args[nStart:])
|
||||||
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args[nStart:])
|
ids, err := caches.GetCacheSQL(cacher, tableName, newsql, args[nStart:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows, err := session.NoCache().queryRows(newsql, args[nStart:]...)
|
rows, err := session.NoCache().queryRows(newsql, args[nStart:]...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -137,7 +137,7 @@ func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr stri
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
session.engine.logger.Debugf("[cache] clear cached table sql: %v", tableName)
|
session.engine.logger.Debugf("[cache] clear cached table sql: %v", tableName)
|
||||||
cacher.ClearIds(tableName)
|
cacher.ClearIDs(tableName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,7 +433,7 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||||
if cacher := session.engine.GetCacher(tableName); cacher != nil && session.statement.UseCache {
|
if cacher := session.engine.GetCacher(tableName); cacher != nil && session.statement.UseCache {
|
||||||
// session.cacheUpdate(table, tableName, sqlStr, args...)
|
// session.cacheUpdate(table, tableName, sqlStr, args...)
|
||||||
session.engine.logger.Debugf("[cache] clear table: %v", tableName)
|
session.engine.logger.Debugf("[cache] clear table: %v", tableName)
|
||||||
cacher.ClearIds(tableName)
|
cacher.ClearIDs(tableName)
|
||||||
cacher.ClearBeans(tableName)
|
cacher.ClearBeans(tableName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue