This commit is contained in:
Lunny Xiao 2023-10-27 23:26:59 +08:00
parent 82faf6c301
commit 7118e6f646
No known key found for this signature in database
GPG Key ID: C3B7C91B632F738A
29 changed files with 12 additions and 1698 deletions

View File

@ -1,99 +0,0 @@
// Copyright 2019 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"strings"
"time"
"xorm.io/xorm/v2/schemas"
)
const (
// CacheExpired is default cache expired time
CacheExpired = 60 * time.Minute
// CacheMaxMemory is not use now
CacheMaxMemory = 256
// CacheGcInterval represents interval time to clear all expired nodes
CacheGcInterval = 10 * time.Minute
// CacheGcMaxRemoved represents max nodes removed when gc
CacheGcMaxRemoved = 20
)
// list all the errors
var (
ErrCacheMiss = errors.New("xorm/cache: key not found")
ErrNotStored = errors.New("xorm/cache: not stored")
// ErrNotExist record does not exist error
ErrNotExist = errors.New("Record does not exist")
)
// CacheStore is a interface to store cache
type CacheStore interface {
// key is primary key or composite primary key
// value is struct's pointer
// key format : <tablename>-p-<pk1>-<pk2>...
Put(key string, value interface{}) error
Get(key string) (interface{}, error)
Del(key string) error
}
// Cacher is an interface to provide cache
// id format : u-<pk1>-<pk2>...
type Cacher interface {
GetIds(tableName, sql string) interface{}
GetBean(tableName string, id string) interface{}
PutIds(tableName, sql string, ids interface{})
PutBean(tableName string, id string, obj interface{})
DelIds(tableName, sql string)
DelBean(tableName string, id string)
ClearIds(tableName string)
ClearBeans(tableName string)
}
func encodeIds(ids []schemas.PK) (string, error) {
buf := new(bytes.Buffer)
enc := gob.NewEncoder(buf)
err := enc.Encode(ids)
return buf.String(), err
}
func decodeIds(s string) ([]schemas.PK, error) {
pks := make([]schemas.PK, 0)
dec := gob.NewDecoder(strings.NewReader(s))
err := dec.Decode(&pks)
return pks, err
}
// GetCacheSql returns cacher PKs via SQL
func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) {
bytes := m.GetIds(tableName, GenSqlKey(sql, args))
if bytes == nil {
return nil, errors.New("Not Exist")
}
return decodeIds(bytes.(string))
}
// PutCacheSql puts cacher SQL and PKs
func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error {
bytes, err := encodeIds(ids)
if err != nil {
return err
}
m.PutIds(tableName, GenSqlKey(sql, args), bytes)
return nil
}
// GenSqlKey generates cache key
func GenSqlKey(sql string, args interface{}) string {
return fmt.Sprintf("%v-%v", sql, args)
}

View File

@ -1,65 +0,0 @@
// Copyright 2020 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"bytes"
"crypto/md5"
"encoding/gob"
"encoding/json"
"fmt"
"io"
)
// Md5 return md5 hash string
func Md5(str string) string {
m := md5.New()
_, _ = io.WriteString(m, str)
return fmt.Sprintf("%x", m.Sum(nil))
}
// Encode Encode data
func Encode(data interface{}) ([]byte, error) {
// return JsonEncode(data)
return GobEncode(data)
}
// Decode decode data
func Decode(data []byte, to interface{}) error {
// return JsonDecode(data, to)
return GobDecode(data, to)
}
// GobEncode encode data with gob
func GobEncode(data interface{}) ([]byte, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(&data)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// GobDecode decode data with gob
func GobDecode(data []byte, to interface{}) error {
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
return dec.Decode(to)
}
// JsonEncode encode data with json
func JsonEncode(data interface{}) ([]byte, error) {
val, err := json.Marshal(data)
if err != nil {
return nil, err
}
return val, nil
}
// JsonDecode decode data with json
func JsonDecode(data []byte, to interface{}) error {
return json.Unmarshal(data, to)
}

View File

@ -1,99 +0,0 @@
// Copyright 2020 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"log"
"github.com/syndtr/goleveldb/leveldb"
)
// LevelDBStore implements CacheStore provide local machine
type LevelDBStore struct {
store *leveldb.DB
Debug bool
v interface{}
}
var _ CacheStore = &LevelDBStore{}
// NewLevelDBStore creates a leveldb store
func NewLevelDBStore(dbfile string) (*LevelDBStore, error) {
db := &LevelDBStore{}
h, err := leveldb.OpenFile(dbfile, nil)
if err != nil {
return nil, err
}
db.store = h
return db, nil
}
// Put implements CacheStore
func (s *LevelDBStore) Put(key string, value interface{}) error {
val, err := Encode(value)
if err != nil {
if s.Debug {
log.Println("[LevelDB]EncodeErr: ", err, "Key:", key)
}
return err
}
err = s.store.Put([]byte(key), val, nil)
if err != nil {
if s.Debug {
log.Println("[LevelDB]PutErr: ", err, "Key:", key)
}
return err
}
if s.Debug {
log.Println("[LevelDB]Put: ", key)
}
return err
}
// Get implements CacheStore
func (s *LevelDBStore) Get(key string) (interface{}, error) {
data, err := s.store.Get([]byte(key), nil)
if err != nil {
if s.Debug {
log.Println("[LevelDB]GetErr: ", err, "Key:", key)
}
if err == leveldb.ErrNotFound {
return nil, ErrNotExist
}
return nil, err
}
err = Decode(data, &s.v)
if err != nil {
if s.Debug {
log.Println("[LevelDB]DecodeErr: ", err, "Key:", key)
}
return nil, err
}
if s.Debug {
log.Println("[LevelDB]Get: ", key, s.v)
}
return s.v, err
}
// Del implements CacheStore
func (s *LevelDBStore) Del(key string) error {
err := s.store.Delete([]byte(key), nil)
if err != nil {
if s.Debug {
log.Println("[LevelDB]DelErr: ", err, "Key:", key)
}
return err
}
if s.Debug {
log.Println("[LevelDB]Del: ", key)
}
return err
}
// Close implements CacheStore
func (s *LevelDBStore) Close() {
s.store.Close()
}

View File

@ -1,39 +0,0 @@
// Copyright 2020 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLevelDBStore(t *testing.T) {
store, err := NewLevelDBStore("./level.db")
assert.NoError(t, err)
var kvs = map[string]interface{}{
"a": "b",
}
for k, v := range kvs {
assert.NoError(t, store.Put(k, v))
}
for k, v := range kvs {
val, err := store.Get(k)
assert.NoError(t, err)
assert.EqualValues(t, v, val)
}
for k := range kvs {
err := store.Del(k)
assert.NoError(t, err)
}
for k := range kvs {
_, err := store.Get(k)
assert.EqualValues(t, ErrNotExist, err)
}
}

View File

@ -1,278 +0,0 @@
// Copyright 2015 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"container/list"
"fmt"
"sync"
"time"
)
// LRUCacher implments cache object facilities
type LRUCacher struct {
idList *list.List
sqlList *list.List
idIndex map[string]map[string]*list.Element
sqlIndex map[string]map[string]*list.Element
store CacheStore
mutex sync.Mutex
MaxElementSize int
Expired time.Duration
GcInterval time.Duration
}
// NewLRUCacher creates a cacher
func NewLRUCacher(store CacheStore, maxElementSize int) *LRUCacher {
return NewLRUCacher2(store, 3600*time.Second, maxElementSize)
}
// NewLRUCacher2 creates a cache include different params
func NewLRUCacher2(store CacheStore, expired time.Duration, maxElementSize int) *LRUCacher {
cacher := &LRUCacher{store: store, idList: list.New(),
sqlList: list.New(), Expired: expired,
GcInterval: CacheGcInterval, MaxElementSize: maxElementSize,
sqlIndex: make(map[string]map[string]*list.Element),
idIndex: make(map[string]map[string]*list.Element),
}
cacher.RunGC()
return cacher
}
// RunGC run once every m.GcInterval
func (m *LRUCacher) RunGC() {
time.AfterFunc(m.GcInterval, func() {
m.RunGC()
m.GC()
})
}
// GC check ids lit and sql list to remove all element expired
func (m *LRUCacher) GC() {
m.mutex.Lock()
defer m.mutex.Unlock()
var removedNum int
for e := m.idList.Front(); e != nil; {
if removedNum <= CacheGcMaxRemoved &&
time.Since(e.Value.(*idNode).lastVisit) > m.Expired {
removedNum++
next := e.Next()
node := e.Value.(*idNode)
m.delBean(node.tbName, node.id)
e = next
} else {
break
}
}
removedNum = 0
for e := m.sqlList.Front(); e != nil; {
if removedNum <= CacheGcMaxRemoved &&
time.Since(e.Value.(*sqlNode).lastVisit) > m.Expired {
removedNum++
next := e.Next()
node := e.Value.(*sqlNode)
m.delIds(node.tbName, node.sql)
e = next
} else {
break
}
}
}
// GetIds returns all bean's ids according to sql and parameter from cache
func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
m.mutex.Lock()
defer m.mutex.Unlock()
if _, ok := m.sqlIndex[tableName]; !ok {
m.sqlIndex[tableName] = make(map[string]*list.Element)
}
if v, err := m.store.Get(sql); err == nil {
if el, ok := m.sqlIndex[tableName][sql]; !ok {
el = m.sqlList.PushBack(newSQLNode(tableName, sql))
m.sqlIndex[tableName][sql] = el
} else {
lastTime := el.Value.(*sqlNode).lastVisit
// if expired, remove the node and return nil
if time.Since(lastTime) > m.Expired {
m.delIds(tableName, sql)
return nil
}
m.sqlList.MoveToBack(el)
el.Value.(*sqlNode).lastVisit = time.Now()
}
return v
}
m.delIds(tableName, sql)
return nil
}
// GetBean returns bean according tableName and id from cache
func (m *LRUCacher) GetBean(tableName string, id string) interface{} {
m.mutex.Lock()
defer m.mutex.Unlock()
if _, ok := m.idIndex[tableName]; !ok {
m.idIndex[tableName] = make(map[string]*list.Element)
}
tid := genID(tableName, id)
if v, err := m.store.Get(tid); err == nil {
if el, ok := m.idIndex[tableName][id]; ok {
lastTime := el.Value.(*idNode).lastVisit
// if expired, remove the node and return nil
if time.Since(lastTime) > m.Expired {
m.delBean(tableName, id)
return nil
}
m.idList.MoveToBack(el)
el.Value.(*idNode).lastVisit = time.Now()
} else {
el = m.idList.PushBack(newIDNode(tableName, id))
m.idIndex[tableName][id] = el
}
return v
}
// store bean is not exist, then remove memory's index
m.delBean(tableName, id)
return nil
}
// clearIds clears all sql-ids mapping on table tableName from cache
func (m *LRUCacher) clearIds(tableName string) {
if tis, ok := m.sqlIndex[tableName]; ok {
for sql, v := range tis {
m.sqlList.Remove(v)
_ = m.store.Del(sql)
}
}
m.sqlIndex[tableName] = make(map[string]*list.Element)
}
// ClearIds clears all sql-ids mapping on table tableName from cache
func (m *LRUCacher) ClearIds(tableName string) {
m.mutex.Lock()
m.clearIds(tableName)
m.mutex.Unlock()
}
func (m *LRUCacher) clearBeans(tableName string) {
if tis, ok := m.idIndex[tableName]; ok {
for id, v := range tis {
m.idList.Remove(v)
tid := genID(tableName, id)
_ = m.store.Del(tid)
}
}
m.idIndex[tableName] = make(map[string]*list.Element)
}
// ClearBeans clears all beans in some table
func (m *LRUCacher) ClearBeans(tableName string) {
m.mutex.Lock()
m.clearBeans(tableName)
m.mutex.Unlock()
}
// PutIds pus ids into table
func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
m.mutex.Lock()
if _, ok := m.sqlIndex[tableName]; !ok {
m.sqlIndex[tableName] = make(map[string]*list.Element)
}
if el, ok := m.sqlIndex[tableName][sql]; !ok {
el = m.sqlList.PushBack(newSQLNode(tableName, sql))
m.sqlIndex[tableName][sql] = el
} else {
el.Value.(*sqlNode).lastVisit = time.Now()
}
_ = m.store.Put(sql, ids)
if m.sqlList.Len() > m.MaxElementSize {
e := m.sqlList.Front()
node := e.Value.(*sqlNode)
m.delIds(node.tbName, node.sql)
}
m.mutex.Unlock()
}
// PutBean puts beans into table
func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) {
m.mutex.Lock()
var el *list.Element
var ok bool
if el, ok = m.idIndex[tableName][id]; !ok {
el = m.idList.PushBack(newIDNode(tableName, id))
m.idIndex[tableName][id] = el
} else {
el.Value.(*idNode).lastVisit = time.Now()
}
_ = m.store.Put(genID(tableName, id), obj)
if m.idList.Len() > m.MaxElementSize {
e := m.idList.Front()
node := e.Value.(*idNode)
m.delBean(node.tbName, node.id)
}
m.mutex.Unlock()
}
func (m *LRUCacher) delIds(tableName, sql string) {
if _, ok := m.sqlIndex[tableName]; ok {
if el, ok := m.sqlIndex[tableName][sql]; ok {
delete(m.sqlIndex[tableName], sql)
m.sqlList.Remove(el)
}
}
_ = m.store.Del(sql)
}
// DelIds deletes ids
func (m *LRUCacher) DelIds(tableName, sql string) {
m.mutex.Lock()
m.delIds(tableName, sql)
m.mutex.Unlock()
}
func (m *LRUCacher) delBean(tableName string, id string) {
tid := genID(tableName, id)
if el, ok := m.idIndex[tableName][id]; ok {
delete(m.idIndex[tableName], id)
m.idList.Remove(el)
m.clearIds(tableName)
}
_ = m.store.Del(tid)
}
// DelBean deletes beans in some table
func (m *LRUCacher) DelBean(tableName string, id string) {
m.mutex.Lock()
m.delBean(tableName, id)
m.mutex.Unlock()
}
type idNode struct {
tbName string
id string
lastVisit time.Time
}
type sqlNode struct {
tbName string
sql string
lastVisit time.Time
}
func genID(prefix string, id string) string {
return fmt.Sprintf("%s-%s", prefix, id)
}
func newIDNode(tbName string, id string) *idNode {
return &idNode{tbName, id, time.Now()}
}
func newSQLNode(tbName, sql string) *sqlNode {
return &sqlNode{tbName, sql, time.Now()}
}

View File

@ -1,52 +0,0 @@
// Copyright 2015 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"testing"
"github.com/stretchr/testify/assert"
"xorm.io/xorm/v2/schemas"
)
func TestLRUCache(t *testing.T) {
type CacheObject1 struct {
Id int64
}
store := NewMemoryStore()
cacher := NewLRUCacher(store, 10000)
tableName := "cache_object1"
pks := []schemas.PK{
{1},
{2},
}
for _, pk := range pks {
sid, err := pk.ToString()
assert.NoError(t, err)
cacher.PutIds(tableName, "select * from cache_object1", sid)
ids := cacher.GetIds(tableName, "select * from cache_object1")
assert.EqualValues(t, sid, ids)
cacher.ClearIds(tableName)
ids2 := cacher.GetIds(tableName, "select * from cache_object1")
assert.Nil(t, ids2)
obj2 := cacher.GetBean(tableName, sid)
assert.Nil(t, obj2)
obj := new(CacheObject1)
cacher.PutBean(tableName, sid, obj)
obj3 := cacher.GetBean(tableName, sid)
assert.EqualValues(t, obj, obj3)
cacher.DelBean(tableName, sid)
obj4 := cacher.GetBean(tableName, sid)
assert.Nil(t, obj4)
}
}

View File

@ -1,60 +0,0 @@
// Copyright 2020 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import "sync"
// Manager represents a cache manager
type Manager struct {
cacher Cacher
disableGlobalCache bool
cachers map[string]Cacher
cacherLock sync.RWMutex
}
// NewManager creates a cache manager
func NewManager() *Manager {
return &Manager{
cachers: make(map[string]Cacher),
}
}
// SetDisableGlobalCache disable global cache or not
func (mgr *Manager) SetDisableGlobalCache(disable bool) {
if mgr.disableGlobalCache != disable {
mgr.disableGlobalCache = disable
}
}
// SetCacher set cacher of table
func (mgr *Manager) SetCacher(tableName string, cacher Cacher) {
mgr.cacherLock.Lock()
mgr.cachers[tableName] = cacher
mgr.cacherLock.Unlock()
}
// GetCacher returns a cache of a table
func (mgr *Manager) GetCacher(tableName string) Cacher {
var cacher Cacher
var ok bool
mgr.cacherLock.RLock()
cacher, ok = mgr.cachers[tableName]
mgr.cacherLock.RUnlock()
if !ok && !mgr.disableGlobalCache {
cacher = mgr.cacher
}
return cacher
}
// SetDefaultCacher set the default cacher. Xorm's default not enable cacher.
func (mgr *Manager) SetDefaultCacher(cacher Cacher) {
mgr.cacher = cacher
}
// GetDefaultCacher returns the default cacher
func (mgr *Manager) GetDefaultCacher() Cacher {
return mgr.cacher
}

View File

@ -1,49 +0,0 @@
// Copyright 2015 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"sync"
)
var _ CacheStore = NewMemoryStore()
// MemoryStore represents in-memory store
type MemoryStore struct {
store map[interface{}]interface{}
mutex sync.RWMutex
}
// NewMemoryStore creates a new store in memory
func NewMemoryStore() *MemoryStore {
return &MemoryStore{store: make(map[interface{}]interface{})}
}
// Put puts object into store
func (s *MemoryStore) Put(key string, value interface{}) error {
s.mutex.Lock()
defer s.mutex.Unlock()
s.store[key] = value
return nil
}
// Get gets object from store
func (s *MemoryStore) Get(key string) (interface{}, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
if v, ok := s.store[key]; ok {
return v, nil
}
return nil, ErrNotExist
}
// Del deletes object
func (s *MemoryStore) Del(key string) error {
s.mutex.Lock()
defer s.mutex.Unlock()
delete(s.store, key)
return nil
}

View File

@ -1,37 +0,0 @@
// Copyright 2015 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package caches
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMemoryStore(t *testing.T) {
store := NewMemoryStore()
var kvs = map[string]interface{}{
"a": "b",
}
for k, v := range kvs {
assert.NoError(t, store.Put(k, v))
}
for k, v := range kvs {
val, err := store.Get(k)
assert.NoError(t, err)
assert.EqualValues(t, v, val)
}
for k := range kvs {
err := store.Del(k)
assert.NoError(t, err)
}
for k := range kvs {
_, err := store.Get(k)
assert.EqualValues(t, ErrNotExist, err)
}
}

View File

@ -17,7 +17,6 @@ import (
"strings" "strings"
"time" "time"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/contexts" "xorm.io/xorm/v2/contexts"
"xorm.io/xorm/v2/core" "xorm.io/xorm/v2/core"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
@ -31,7 +30,6 @@ import (
// Engine is the major struct of xorm, it means a database manager. // Engine is the major struct of xorm, it means a database manager.
// Commonly, an application only need one engine // Commonly, an application only need one engine
type Engine struct { type Engine struct {
cacherMgr *caches.Manager
defaultContext context.Context defaultContext context.Context
dialect dialects.Dialect dialect dialects.Dialect
driver dialects.Driver driver dialects.Driver
@ -66,16 +64,14 @@ func NewEngine(driverName string, dataSourceName string) (*Engine, error) {
} }
func newEngine(driverName, dataSourceName string, dialect dialects.Dialect, db *core.DB) (*Engine, error) { func newEngine(driverName, dataSourceName string, dialect dialects.Dialect, db *core.DB) (*Engine, error) {
cacherMgr := caches.NewManager()
mapper := names.NewCacheMapper(new(names.SnakeMapper)) mapper := names.NewCacheMapper(new(names.SnakeMapper))
tagParser := tags.NewParser("xorm", dialect, mapper, mapper, cacherMgr) tagParser := tags.NewParser("xorm", dialect, mapper, mapper)
engine := &Engine{ engine := &Engine{
dialect: dialect, dialect: dialect,
driver: dialects.QueryDriver(driverName), driver: dialects.QueryDriver(driverName),
TZLocation: time.Local, TZLocation: time.Local,
defaultContext: context.Background(), defaultContext: context.Background(),
cacherMgr: cacherMgr,
tagParser: tagParser, tagParser: tagParser,
driverName: driverName, driverName: driverName,
dataSourceName: dataSourceName, dataSourceName: dataSourceName,
@ -129,16 +125,6 @@ func (engine *Engine) EnableSessionID(enable bool) {
engine.logSessionID = enable engine.logSessionID = enable
} }
// SetCacher sets cacher for the table
func (engine *Engine) SetCacher(tableName string, cacher caches.Cacher) {
engine.cacherMgr.SetCacher(tableName, cacher)
}
// GetCacher returns the cachher of the special table
func (engine *Engine) GetCacher(tableName string) caches.Cacher {
return engine.cacherMgr.GetCacher(tableName)
}
// SetQuotePolicy sets the special quote policy // SetQuotePolicy sets the special quote policy
func (engine *Engine) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { func (engine *Engine) SetQuotePolicy(quotePolicy dialects.QuotePolicy) {
engine.dialect.SetQuotePolicy(quotePolicy) engine.dialect.SetQuotePolicy(quotePolicy)
@ -182,11 +168,6 @@ func (engine *Engine) SetLogLevel(level log.LogLevel) {
engine.logger.SetLevel(level) engine.logger.SetLevel(level)
} }
// SetDisableGlobalCache disable global cache or not
func (engine *Engine) SetDisableGlobalCache(disable bool) {
engine.cacherMgr.SetDisableGlobalCache(disable)
}
// DriverName return the current sql driver's name // DriverName return the current sql driver's name
func (engine *Engine) DriverName() string { func (engine *Engine) DriverName() string {
return engine.driverName return engine.driverName
@ -269,24 +250,6 @@ func (engine *Engine) SetMaxIdleConns(conns int) {
engine.DB().SetMaxIdleConns(conns) engine.DB().SetMaxIdleConns(conns)
} }
// SetDefaultCacher set the default cacher. Xorm's default not enable cacher.
func (engine *Engine) SetDefaultCacher(cacher caches.Cacher) {
engine.cacherMgr.SetDefaultCacher(cacher)
}
// GetDefaultCacher returns the default cacher
func (engine *Engine) GetDefaultCacher() caches.Cacher {
return engine.cacherMgr.GetDefaultCacher()
}
// NoCache If you has set default cacher, and you want temporilly stop use cache,
// you can use NoCache()
func (engine *Engine) NoCache() *Session {
session := engine.NewSession()
session.isAutoClose = true
return session.NoCache()
}
// NoCascade If you do not want to auto cascade load object // NoCascade If you do not want to auto cascade load object
func (engine *Engine) NoCascade() *Session { func (engine *Engine) NoCascade() *Session {
session := engine.NewSession() session := engine.NewSession()
@ -294,12 +257,6 @@ func (engine *Engine) NoCascade() *Session {
return session.NoCascade() return session.NoCascade()
} }
// MapCacher Set a table use a special cacher
func (engine *Engine) MapCacher(bean interface{}, cacher caches.Cacher) error {
engine.SetCacher(dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, true), cacher)
return nil
}
// NewDB provides an interface to operate database directly // NewDB provides an interface to operate database directly
func (engine *Engine) NewDB() (*core.DB, error) { func (engine *Engine) NewDB() (*core.DB, error) {
return core.Open(engine.driverName, engine.dataSourceName) return core.Open(engine.driverName, engine.dataSourceName)
@ -481,8 +438,7 @@ func (engine *Engine) dumpTables(ctx context.Context, tables []*schemas.Table, w
return err return err
} }
} }
cacherMgr := caches.NewManager() dstTableCache := tags.NewParser("xorm", dstDialect, engine.GetTableMapper(), engine.GetColumnMapper())
dstTableCache := tags.NewParser("xorm", dstDialect, engine.GetTableMapper(), engine.GetColumnMapper(), cacherMgr)
_, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm %s, from %s to %s*/\n\n", _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm %s, from %s to %s*/\n\n",
time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.URI().DBType, dstDialect.URI().DBType)) time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.URI().DBType, dstDialect.URI().DBType))
@ -1091,30 +1047,6 @@ func (engine *Engine) CreateUniques(bean interface{}) error {
return session.CreateUniques(bean) return session.CreateUniques(bean)
} }
// ClearCacheBean if enabled cache, clear the cache bean
func (engine *Engine) ClearCacheBean(bean interface{}, id string) error {
tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean)
cacher := engine.GetCacher(tableName)
if cacher != nil {
cacher.ClearIds(tableName)
cacher.DelBean(tableName, id)
}
return nil
}
// ClearCache if enabled cache, clear some tables' cache
func (engine *Engine) ClearCache(beans ...interface{}) error {
for _, bean := range beans {
tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean)
cacher := engine.GetCacher(tableName)
if cacher != nil {
cacher.ClearIds(tableName)
cacher.ClearBeans(tableName)
}
}
return nil
}
// UnMapType remove table from tables cache // UnMapType remove table from tables cache
func (engine *Engine) UnMapType(t reflect.Type) { func (engine *Engine) UnMapType(t reflect.Type) {
engine.tagParser.ClearCacheTable(t) engine.tagParser.ClearCacheTable(t)

View File

@ -8,7 +8,6 @@ import (
"context" "context"
"time" "time"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/contexts" "xorm.io/xorm/v2/contexts"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
"xorm.io/xorm/v2/log" "xorm.io/xorm/v2/log"
@ -128,14 +127,6 @@ func (eg *EngineGroup) SetConnMaxLifetime(d time.Duration) {
} }
} }
// SetDefaultCacher set the default cacher
func (eg *EngineGroup) SetDefaultCacher(cacher caches.Cacher) {
eg.Engine.SetDefaultCacher(cacher)
for i := 0; i < len(eg.slaves); i++ {
eg.slaves[i].SetDefaultCacher(cacher)
}
}
// SetLogger set the new logger // SetLogger set the new logger
func (eg *EngineGroup) SetLogger(logger interface{}) { func (eg *EngineGroup) SetLogger(logger interface{}) {
eg.Engine.SetLogger(logger) eg.Engine.SetLogger(logger)

View File

@ -10,7 +10,6 @@ import (
"reflect" "reflect"
"time" "time"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/contexts" "xorm.io/xorm/v2/contexts"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
"xorm.io/xorm/v2/log" "xorm.io/xorm/v2/log"
@ -81,7 +80,6 @@ type EngineInterface interface {
Before(func(interface{})) *Session Before(func(interface{})) *Session
Charset(charset string) *Session Charset(charset string) *Session
ClearCache(...interface{}) error
Context(context.Context) *Session Context(context.Context) *Session
CreateTables(...interface{}) error CreateTables(...interface{}) error
DBMetas() ([]*schemas.Table, error) DBMetas() ([]*schemas.Table, error)
@ -90,23 +88,18 @@ type EngineInterface interface {
DriverName() string DriverName() string
DropTables(...interface{}) error DropTables(...interface{}) error
DumpAllToFile(fp string, tp ...schemas.DBType) error DumpAllToFile(fp string, tp ...schemas.DBType) error
GetCacher(string) caches.Cacher
GetColumnMapper() names.Mapper GetColumnMapper() names.Mapper
GetDefaultCacher() caches.Cacher
GetTableMapper() names.Mapper GetTableMapper() names.Mapper
GetTZDatabase() *time.Location GetTZDatabase() *time.Location
GetTZLocation() *time.Location GetTZLocation() *time.Location
ImportFile(fp string) ([]sql.Result, error) ImportFile(fp string) ([]sql.Result, error)
MapCacher(interface{}, caches.Cacher) error
NewSession() *Session NewSession() *Session
NoAutoTime() *Session NoAutoTime() *Session
Prepare() *Session Prepare() *Session
Quote(string) string Quote(string) string
SetCacher(string, caches.Cacher)
SetConnMaxLifetime(time.Duration) SetConnMaxLifetime(time.Duration)
SetColumnMapper(names.Mapper) SetColumnMapper(names.Mapper)
SetTagIdentifier(string) SetTagIdentifier(string)
SetDefaultCacher(caches.Cacher)
SetLogger(logger interface{}) SetLogger(logger interface{})
SetLogLevel(log.LogLevel) SetLogLevel(log.LogLevel)
SetMapper(names.Mapper) SetMapper(names.Mapper)

View File

@ -64,7 +64,6 @@ type Statement struct {
UseAutoJoin bool UseAutoJoin bool
StoreEngine string StoreEngine string
Charset string Charset string
UseCache bool
UseAutoTime bool UseAutoTime bool
NoAutoCondition bool NoAutoCondition bool
IsDistinct bool IsDistinct bool
@ -138,7 +137,6 @@ func (statement *Statement) Reset() {
statement.idParam = nil statement.idParam = nil
statement.RawSQL = "" statement.RawSQL = ""
statement.RawParams = make([]interface{}, 0) statement.RawParams = make([]interface{}, 0)
statement.UseCache = true
statement.UseAutoTime = true statement.UseAutoTime = true
statement.NoAutoCondition = false statement.NoAutoCondition = false
statement.IsDistinct = false statement.IsDistinct = false

View File

@ -12,7 +12,7 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
"xorm.io/xorm/v2/names" "xorm.io/xorm/v2/names"
"xorm.io/xorm/v2/schemas" "xorm.io/xorm/v2/schemas"
@ -33,7 +33,7 @@ func TestMain(m *testing.M) {
panic("unknow dialect") panic("unknow dialect")
} }
tagParser = tags.NewParser("xorm", dialect, names.SnakeMapper{}, names.SnakeMapper{}, caches.NewManager()) tagParser = tags.NewParser("xorm", dialect, names.SnakeMapper{}, names.SnakeMapper{})
if tagParser == nil { if tagParser == nil {
panic("tags parser is nil") panic("tags parser is nil")
} }

View File

@ -321,13 +321,6 @@ func (session *Session) MustLogSQL(logs ...bool) *Session {
return session return session
} }
// NoCache ask this session do not retrieve data from cache system and
// get data from database directly.
func (session *Session) NoCache() *Session {
session.statement.UseCache = false
return session
}
// Join join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN // Join join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN
func (session *Session) Join(joinOperator string, tablename interface{}, condition interface{}, args ...interface{}) *Session { func (session *Session) Join(joinOperator string, tablename interface{}, condition interface{}, args ...interface{}) *Session {
session.statement.Join(joinOperator, tablename, condition, args...) session.statement.Join(joinOperator, tablename, condition, args...)
@ -351,19 +344,6 @@ func (session *Session) DB() *core.DB {
return session.db() return session.db()
} }
func (session *Session) canCache() bool {
if session.statement.RefTable == nil ||
session.statement.NeedTableName() ||
session.statement.RawSQL != "" ||
!session.statement.UseCache ||
session.statement.IsForUpdate ||
session.tx != nil ||
len(session.statement.SelectStr) > 0 {
return false
}
return true
}
func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) { func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) {
crc := crc32.ChecksumIEEE([]byte(sqlStr)) crc := crc32.ChecksumIEEE([]byte(sqlStr))
// TODO try hash(sqlStr+len(sqlStr)) // TODO try hash(sqlStr+len(sqlStr))

View File

@ -6,83 +6,13 @@ package xorm
import ( import (
"errors" "errors"
"strconv"
"xorm.io/builder" "xorm.io/builder"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/schemas"
) )
// ErrNeedDeletedCond delete needs less one condition error // ErrNeedDeletedCond delete needs less one condition error
var ErrNeedDeletedCond = errors.New("Delete action needs at least one condition") var ErrNeedDeletedCond = errors.New("Delete action needs at least one condition")
func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error {
if table == nil ||
session.tx != nil {
return ErrCacheFailed
}
for _, filter := range session.engine.dialect.Filters() {
sqlStr = filter.Do(session.ctx, sqlStr)
}
newsql := session.statement.ConvertIDSQL(sqlStr)
if newsql == "" {
return ErrCacheFailed
}
cacher := session.engine.cacherMgr.GetCacher(tableName)
pkColumns := table.PKColumns()
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args)
if err != nil {
rows, err := session.queryRows(newsql, args...)
if err != nil {
return err
}
defer rows.Close()
resultsSlice, err := session.engine.ScanStringMaps(rows)
if err != nil {
return err
}
ids = make([]schemas.PK, 0)
if len(resultsSlice) > 0 {
for _, data := range resultsSlice {
var id int64
var pk schemas.PK = make([]interface{}, 0)
for _, col := range pkColumns {
if v, ok := data[col.Name]; !ok {
return errors.New("no id")
} else if col.SQLType.IsText() {
pk = append(pk, v)
} else if col.SQLType.IsNumeric() {
id, err = strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
pk = append(pk, id)
} else {
return errors.New("not supported primary key type")
}
}
ids = append(ids, pk)
}
}
}
for _, id := range ids {
session.engine.logger.Debugf("[cache] delete cache obj: %v, %v", tableName, id)
sid, err := id.ToString()
if err != nil {
return err
}
cacher.DelBean(tableName, sid)
}
session.engine.logger.Debugf("[cache] clear cache table: %v", tableName)
cacher.ClearIds(tableName)
return nil
}
// Delete records, bean's non-empty fields are conditions // Delete records, bean's non-empty fields are conditions
// At least one condition must be set. // At least one condition must be set.
func (session *Session) Delete(beans ...interface{}) (int64, error) { func (session *Session) Delete(beans ...interface{}) (int64, error) {
@ -130,7 +60,6 @@ func (session *Session) delete(beans []interface{}, mustHaveConditions bool) (in
return 0, ErrNeedDeletedCond return 0, ErrNeedDeletedCond
} }
tableNameNoQuote := session.statement.TableName()
table := session.statement.RefTable table := session.statement.RefTable
realSQLWriter := builder.NewWriter() realSQLWriter := builder.NewWriter()
@ -154,14 +83,6 @@ func (session *Session) delete(beans []interface{}, mustHaveConditions bool) (in
}) })
} }
argsForCache := make([]interface{}, 0, len(deleteSQLWriter.Args())*2)
copy(argsForCache, deleteSQLWriter.Args())
argsForCache = append(deleteSQLWriter.Args(), argsForCache...)
if cacher := session.engine.GetCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache {
_ = session.cacheDelete(table, tableNameNoQuote, deleteSQLWriter.String(), argsForCache...)
}
session.statement.RefTable = table session.statement.RefTable = table
res, err := session.exec(realSQLWriter.String(), realSQLWriter.Args()...) res, err := session.exec(realSQLWriter.String(), realSQLWriter.Args()...)
if err != nil { if err != nil {

View File

@ -11,9 +11,8 @@ import (
"strings" "strings"
"xorm.io/builder" "xorm.io/builder"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/convert" "xorm.io/xorm/v2/convert"
"xorm.io/xorm/v2/internal/statements"
"xorm.io/xorm/v2/internal/utils" "xorm.io/xorm/v2/internal/utils"
"xorm.io/xorm/v2/schemas" "xorm.io/xorm/v2/schemas"
) )
@ -148,18 +147,6 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
return err return err
} }
if session.statement.ColumnMap.IsEmpty() && session.canCache() {
if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil &&
!session.statement.IsDistinct &&
!session.statement.GetUnscoped() {
err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...)
if err != ErrCacheFailed {
return err
}
session.engine.logger.Warnf("Cache Find Failed")
}
}
return session.noCacheFind(table, sliceValue, sqlStr, args...) return session.noCacheFind(table, sliceValue, sqlStr, args...)
} }
@ -331,216 +318,3 @@ func (session *Session) noCacheFind(table *schemas.Table, containerValue reflect
} }
return rows.Err() return rows.Err()
} }
func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) {
if !session.canCache() ||
utils.IndexNoCase(sqlStr, "having") != -1 ||
utils.IndexNoCase(sqlStr, "group by") != -1 {
return ErrCacheFailed
}
tableName := session.statement.TableName()
cacher := session.engine.cacherMgr.GetCacher(tableName)
if cacher == nil {
return nil
}
for _, filter := range session.engine.dialect.Filters() {
sqlStr = filter.Do(session.ctx, sqlStr)
}
newsql := session.statement.ConvertIDSQL(sqlStr)
if newsql == "" {
return ErrCacheFailed
}
table := session.statement.RefTable
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args)
if err != nil {
rows, err := session.queryRows(newsql, args...)
if err != nil {
return err
}
defer rows.Close()
var i int
ids = make([]schemas.PK, 0)
for rows.Next() {
i++
if i > 500 {
session.engine.logger.Debugf("[cacheFind] ids length > 500, no cache")
return ErrCacheFailed
}
res := make([]string, len(table.PrimaryKeys))
err = rows.ScanSlice(&res)
if err != nil {
return err
}
var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys))
for i, col := range table.PKColumns() {
pk[i], err = col.ConvertID(res[i])
if err != nil {
return err
}
}
ids = append(ids, pk)
}
if rows.Err() != nil {
return rows.Err()
}
session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args)
err = caches.PutCacheSql(cacher, ids, tableName, newsql, args)
if err != nil {
return err
}
} else {
session.engine.logger.Debugf("[cache] cache hit sql: %v, %v, %v, %v", tableName, sqlStr, newsql, args)
}
sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
ididxes := make(map[string]int)
var ides []schemas.PK
temps := make([]interface{}, len(ids))
for idx, id := range ids {
sid, err := id.ToString()
if err != nil {
return err
}
bean := cacher.GetBean(tableName, sid)
// fix issue #894
isHit := func() (ht bool) {
if bean == nil {
ht = false
return
}
ckb := reflect.ValueOf(bean).Elem().Type()
ht = ckb == t
if !ht && t.Kind() == reflect.Ptr {
ht = t.Elem() == ckb
}
return
}
if !isHit() {
ides = append(ides, id)
ididxes[sid] = idx
} else {
session.engine.logger.Debugf("[cache] cache hit bean: %v, %v, %v", tableName, id, bean)
pk, err := table.IDOfV(reflect.ValueOf(bean))
if err != nil {
return err
}
xid, err := pk.ToString()
if err != nil {
return err
}
if sid != xid {
session.engine.logger.Errorf("[cache] error cache: %v, %v, %v", xid, sid, bean)
return ErrCacheFailed
}
temps[idx] = bean
}
}
if len(ides) > 0 {
slices := reflect.New(reflect.SliceOf(t))
beans := slices.Interface()
statement := session.statement
session.statement = statements.NewStatement(
session.engine.dialect,
session.engine.tagParser,
session.engine.DatabaseTZ,
)
if len(table.PrimaryKeys) == 1 {
ff := make([]interface{}, 0, len(ides))
for _, ie := range ides {
ff = append(ff, ie[0])
}
session.In("`"+table.PrimaryKeys[0]+"`", ff...)
} else {
for _, ie := range ides {
cond := builder.NewCond()
for i, name := range table.PrimaryKeys {
cond = cond.And(builder.Eq{"`" + name + "`": ie[i]})
}
session.Or(cond)
}
}
err = session.NoCache().Table(tableName).find(beans)
if err != nil {
return err
}
session.statement = statement
vs := reflect.Indirect(reflect.ValueOf(beans))
for i := 0; i < vs.Len(); i++ {
rv := vs.Index(i)
if rv.Kind() != reflect.Ptr {
rv = rv.Addr()
}
id, err := table.IDOfV(rv)
if err != nil {
return err
}
sid, err := id.ToString()
if err != nil {
return err
}
bean := rv.Interface()
temps[ididxes[sid]] = bean
session.engine.logger.Debugf("[cache] cache bean: %v, %v, %v, %v", tableName, id, bean, temps)
cacher.PutBean(tableName, sid, bean)
}
}
for j := 0; j < len(temps); j++ {
bean := temps[j]
if bean == nil {
session.engine.logger.Warnf("[cache] cache no hit: %v, %v, %v", tableName, ids[j], temps)
// return errors.New("cache error") // !nashtsai! no need to return error, but continue instead
continue
}
if sliceValue.Kind() == reflect.Slice {
if t.Kind() == reflect.Ptr {
sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean)))
} else {
sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean))))
}
} else if sliceValue.Kind() == reflect.Map {
key := ids[j]
keyType := sliceValue.Type().Key()
keyValue := reflect.New(keyType)
var ikey interface{}
if len(key) == 1 {
if err := convert.AssignValue(keyValue, key[0]); err != nil {
return err
}
ikey = keyValue.Elem().Interface()
} else {
if keyType.Kind() != reflect.Slice {
return errors.New("table have multiple primary keys, key is not schemas.PK or slice")
}
ikey = key
}
if t.Kind() == reflect.Ptr {
sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean))
} else {
sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean)))
}
}
}
return nil
}

View File

@ -10,10 +10,8 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"reflect" "reflect"
"strconv"
"time" "time"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/convert" "xorm.io/xorm/v2/convert"
"xorm.io/xorm/v2/core" "xorm.io/xorm/v2/core"
"xorm.io/xorm/v2/internal/utils" "xorm.io/xorm/v2/internal/utils"
@ -90,17 +88,6 @@ func (session *Session) get(beans ...interface{}) (bool, error) {
} }
table := session.statement.RefTable table := session.statement.RefTable
if session.statement.ColumnMap.IsEmpty() && session.canCache() && isStruct {
if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil &&
!session.statement.GetUnscoped() {
has, err := session.cacheGet(beans[0], sqlStr, args...)
if err != ErrCacheFailed {
return has, err
}
}
}
context := session.statement.Context context := session.statement.Context
if context != nil && isStruct { if context != nil && isStruct {
res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args)) res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args))
@ -270,97 +257,3 @@ func (session *Session) getMap(rows *core.Rows, types []*sql.ColumnType, fields
return fmt.Errorf("unspoorted map type: %t", t) return fmt.Errorf("unspoorted map type: %t", t)
} }
} }
func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interface{}) (has bool, err error) {
// if has no reftable, then don't use cache currently
if !session.canCache() {
return false, ErrCacheFailed
}
for _, filter := range session.engine.dialect.Filters() {
sqlStr = filter.Do(session.ctx, sqlStr)
}
newsql := session.statement.ConvertIDSQL(sqlStr)
if newsql == "" {
return false, ErrCacheFailed
}
tableName := session.statement.TableName()
cacher := session.engine.cacherMgr.GetCacher(tableName)
session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args)
table := session.statement.RefTable
ids, err := caches.GetCacheSql(cacher, tableName, newsql, args)
if err != nil {
res := make([]string, len(table.PrimaryKeys))
rows, err := session.NoCache().queryRows(newsql, args...)
if err != nil {
return false, err
}
defer rows.Close()
if rows.Next() {
err = rows.ScanSlice(&res)
if err != nil {
return true, err
}
} else {
if rows.Err() != nil {
return false, rows.Err()
}
return false, ErrCacheFailed
}
var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys))
for i, col := range table.PKColumns() {
if col.SQLType.IsText() {
pk[i] = res[i]
} else if col.SQLType.IsNumeric() {
n, err := strconv.ParseInt(res[i], 10, 64)
if err != nil {
return false, err
}
pk[i] = n
} else {
return false, errors.New("unsupported")
}
}
ids = []schemas.PK{pk}
session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids)
err = caches.PutCacheSql(cacher, ids, tableName, newsql, args)
if err != nil {
return false, err
}
} else {
session.engine.logger.Debugf("[cache] cache hit: %s, %v", newsql, ids)
}
if len(ids) > 0 {
structValue := reflect.Indirect(reflect.ValueOf(bean))
id := ids[0]
session.engine.logger.Debugf("[cache] get bean: %s, %v", tableName, id)
sid, err := id.ToString()
if err != nil {
return false, err
}
cacheBean := cacher.GetBean(tableName, sid)
if cacheBean == nil {
cacheBean = bean
has, err = session.nocacheGet(reflect.Struct, table, []interface{}{cacheBean}, sqlStr, args...)
if err != nil || !has {
return has, err
}
session.engine.logger.Debugf("[cache] cache bean: %s, %v, %v", tableName, id, cacheBean)
cacher.PutBean(tableName, sid, cacheBean)
} else {
session.engine.logger.Debugf("[cache] cache hit: %s, %v, %v", tableName, id, cacheBean)
has = true
}
structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean)))
return has, nil
}
return false, nil
}

View File

@ -197,8 +197,6 @@ func (session *Session) insertMultipleStruct(rowsSlicePtr interface{}) (int64, e
return 0, err return 0, err
} }
_ = session.cacheInsert(tableName)
lenAfterClosures := len(session.afterClosures) lenAfterClosures := len(session.afterClosures)
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface() elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface()
@ -354,8 +352,6 @@ func (session *Session) insertStruct(bean interface{}) (int64, error) {
defer handleAfterInsertProcessorFunc(bean) defer handleAfterInsertProcessorFunc(bean)
_ = session.cacheInsert(tableName)
if table.Version != "" && session.statement.CheckVersion { if table.Version != "" && session.statement.CheckVersion {
verValue, err := table.VersionColumn().ValueOf(bean) verValue, err := table.VersionColumn().ValueOf(bean)
if err != nil { if err != nil {
@ -384,8 +380,6 @@ func (session *Session) insertStruct(bean interface{}) (int64, error) {
defer handleAfterInsertProcessorFunc(bean) defer handleAfterInsertProcessorFunc(bean)
_ = session.cacheInsert(tableName)
if table.Version != "" && session.statement.CheckVersion { if table.Version != "" && session.statement.CheckVersion {
verValue, err := table.VersionColumn().ValueOf(bean) verValue, err := table.VersionColumn().ValueOf(bean)
if err != nil { if err != nil {
@ -433,19 +427,6 @@ func (session *Session) InsertOne(bean interface{}) (int64, error) {
return session.insertStruct(bean) return session.insertStruct(bean)
} }
func (session *Session) cacheInsert(table string) error {
if !session.statement.UseCache {
return nil
}
cacher := session.engine.cacherMgr.GetCacher(table)
if cacher == nil {
return nil
}
session.engine.logger.Debugf("[cache] clear SQL: %v", table)
cacher.ClearIds(table)
return nil
}
// genInsertColumns generates insert needed columns // genInsertColumns generates insert needed columns
func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) { func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) {
table := session.statement.RefTable table := session.statement.RefTable
@ -655,10 +636,6 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64,
} }
sql = session.engine.dialect.Quoter().Replace(sql) sql = session.engine.dialect.Quoter().Replace(sql)
if err := session.cacheInsert(tableName); err != nil {
return 0, err
}
res, err := session.exec(sql, args...) res, err := session.exec(sql, args...)
if err != nil { if err != nil {
return 0, err return 0, err
@ -682,10 +659,6 @@ func (session *Session) insertMultipleMap(columns []string, argss [][]interface{
} }
sql = session.engine.dialect.Quoter().Replace(sql) sql = session.engine.dialect.Quoter().Replace(sql)
if err := session.cacheInsert(tableName); err != nil {
return 0, err
}
res, err := session.exec(sql, args...) res, err := session.exec(sql, args...)
if err != nil { if err != nil {
return 0, err return 0, err

View File

@ -80,7 +80,7 @@ func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error {
for bufferSize > 0 { for bufferSize > 0 {
slice := reflect.New(sliceType) slice := reflect.New(sliceType)
if err := session.NoCache().Limit(bufferSize, start).find(slice.Interface(), bean); err != nil { if err := session.Limit(bufferSize, start).find(slice.Interface(), bean); err != nil {
return err return err
} }

View File

@ -191,7 +191,6 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
} }
tableName := session.statement.TableName() // table name must been get before exec because statement will be reset tableName := session.statement.TableName() // table name must been get before exec because statement will be reset
useCache := session.statement.UseCache
res, err := session.exec(updateWriter.String(), updateWriter.Args()...) res, err := session.exec(updateWriter.String(), updateWriter.Args()...)
if err != nil { if err != nil {
@ -202,12 +201,6 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
} }
} }
if cacher := session.engine.GetCacher(tableName); cacher != nil && useCache {
session.engine.logger.Debugf("[cache] clear table: %v", tableName)
cacher.ClearIds(tableName)
cacher.ClearBeans(tableName)
}
// handle after update processors // handle after update processors
if session.isAutoCommit { if session.isAutoCommit {
for _, closure := range session.afterClosures { for _, closure := range session.afterClosures {

View File

@ -5,16 +5,13 @@
package tags package tags
import ( import (
"encoding/gob"
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
"strings" "strings"
"sync" "sync"
"time"
"unicode" "unicode"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/convert" "xorm.io/xorm/v2/convert"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
"xorm.io/xorm/v2/names" "xorm.io/xorm/v2/names"
@ -44,19 +41,17 @@ type Parser struct {
columnMapper names.Mapper columnMapper names.Mapper
tableMapper names.Mapper tableMapper names.Mapper
handlers map[string]Handler handlers map[string]Handler
cacherMgr *caches.Manager
tableCache sync.Map // map[reflect.Type]*schemas.Table tableCache sync.Map // map[reflect.Type]*schemas.Table
} }
// NewParser creates a tag parser // NewParser creates a tag parser
func NewParser(identifier string, dialect dialects.Dialect, tableMapper, columnMapper names.Mapper, cacherMgr *caches.Manager) *Parser { func NewParser(identifier string, dialect dialects.Dialect, tableMapper, columnMapper names.Mapper) *Parser {
return &Parser{ return &Parser{
identifier: identifier, identifier: identifier,
dialect: dialect, dialect: dialect,
tableMapper: tableMapper, tableMapper: tableMapper,
columnMapper: columnMapper, columnMapper: columnMapper,
handlers: defaultTagHandlers, handlers: defaultTagHandlers,
cacherMgr: cacherMgr,
} }
} }
@ -103,14 +98,6 @@ func (parser *Parser) ParseWithCache(v reflect.Value) (*schemas.Table, error) {
parser.tableCache.Store(t, table) parser.tableCache.Store(t, table)
if parser.cacherMgr.GetDefaultCacher() != nil {
if v.CanAddr() {
gob.Register(v.Addr().Interface())
} else {
gob.Register(v.Interface())
}
}
return table, nil return table, nil
} }
@ -236,17 +223,6 @@ func (parser *Parser) parseFieldWithTags(table *schemas.Table, fieldIndex int, f
col.Name = ctx.tag.name col.Name = ctx.tag.name
} }
} }
if ctx.hasCacheTag {
if parser.cacherMgr.GetDefaultCacher() != nil {
parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher())
} else {
parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000))
}
}
if ctx.hasNoCacheTag {
parser.cacherMgr.SetCacher(table.Name, nil)
}
} }
if col.SQLType.Name == "" { if col.SQLType.Name == "" {

View File

@ -10,7 +10,6 @@ import (
"testing" "testing"
"time" "time"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
"xorm.io/xorm/v2/names" "xorm.io/xorm/v2/names"
"xorm.io/xorm/v2/schemas" "xorm.io/xorm/v2/schemas"
@ -46,7 +45,6 @@ func TestParseTableName(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
table, err := parser.Parse(reflect.ValueOf(new(ParseTableName1))) table, err := parser.Parse(reflect.ValueOf(new(ParseTableName1)))
assert.NoError(t, err) assert.NoError(t, err)
@ -67,7 +65,6 @@ func TestParseTableComment(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
table, err := parser.Parse(reflect.ValueOf(new(ParseTableComment))) table, err := parser.Parse(reflect.ValueOf(new(ParseTableComment)))
@ -97,7 +94,6 @@ func TestUnexportField(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
type VanilaStruct struct { type VanilaStruct struct {
@ -130,7 +126,6 @@ func TestParseWithOtherIdentifier(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SameMapper{}, names.SameMapper{},
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
type StructWithDBTag struct { type StructWithDBTag struct {
@ -154,7 +149,6 @@ func TestParseWithIgnore(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SameMapper{}, names.SameMapper{},
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
type StructWithIgnoreTag struct { type StructWithIgnoreTag struct {
@ -173,7 +167,6 @@ func TestParseWithAutoincrement(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithAutoIncrement struct { type StructWithAutoIncrement struct {
@ -195,7 +188,6 @@ func TestParseWithAutoincrement2(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithAutoIncrement2 struct { type StructWithAutoIncrement2 struct {
@ -218,7 +210,6 @@ func TestParseWithNullable(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithNullable struct { type StructWithNullable struct {
@ -243,7 +234,6 @@ func TestParseWithTimes(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithTimes struct { type StructWithTimes struct {
@ -276,7 +266,6 @@ func TestParseWithExtends(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithEmbed struct { type StructWithEmbed struct {
@ -307,59 +296,12 @@ func TestParseWithExtends(t *testing.T) {
assert.True(t, table.Columns()[3].IsDeleted) assert.True(t, table.Columns()[3].IsDeleted)
} }
func TestParseWithCache(t *testing.T) {
parser := NewParser(
"db",
dialects.QueryDialect("mysql"),
names.SnakeMapper{},
names.GonicMapper{},
caches.NewManager(),
)
type StructWithCache struct {
Name string `db:"cache"`
}
table, err := parser.Parse(reflect.ValueOf(new(StructWithCache)))
assert.NoError(t, err)
assert.EqualValues(t, "struct_with_cache", table.Name)
assert.EqualValues(t, 1, len(table.Columns()))
assert.EqualValues(t, "name", table.Columns()[0].Name)
assert.True(t, table.Columns()[0].Nullable)
cacher := parser.cacherMgr.GetCacher(table.Name)
assert.NotNil(t, cacher)
}
func TestParseWithNoCache(t *testing.T) {
parser := NewParser(
"db",
dialects.QueryDialect("mysql"),
names.SnakeMapper{},
names.GonicMapper{},
caches.NewManager(),
)
type StructWithNoCache struct {
Name string `db:"nocache"`
}
table, err := parser.Parse(reflect.ValueOf(new(StructWithNoCache)))
assert.NoError(t, err)
assert.EqualValues(t, "struct_with_no_cache", table.Name)
assert.EqualValues(t, 1, len(table.Columns()))
assert.EqualValues(t, "name", table.Columns()[0].Name)
assert.True(t, table.Columns()[0].Nullable)
cacher := parser.cacherMgr.GetCacher(table.Name)
assert.Nil(t, cacher)
}
func TestParseWithEnum(t *testing.T) { func TestParseWithEnum(t *testing.T) {
parser := NewParser( parser := NewParser(
"db", "db",
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithEnum struct { type StructWithEnum struct {
@ -385,7 +327,6 @@ func TestParseWithSet(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithSet struct { type StructWithSet struct {
@ -411,7 +352,6 @@ func TestParseWithIndex(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithIndex struct { type StructWithIndex struct {
@ -441,7 +381,6 @@ func TestParseWithVersion(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithVersion struct { type StructWithVersion struct {
@ -466,7 +405,6 @@ func TestParseWithLocale(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithLocale struct { type StructWithLocale struct {
@ -490,7 +428,6 @@ func TestParseWithDefault(t *testing.T) {
dialects.QueryDialect("mysql"), dialects.QueryDialect("mysql"),
names.SnakeMapper{}, names.SnakeMapper{},
names.GonicMapper{}, names.GonicMapper{},
caches.NewManager(),
) )
type StructWithDefault struct { type StructWithDefault struct {
@ -516,7 +453,6 @@ func TestParseWithOnlyToDB(t *testing.T) {
"DB": true, "DB": true,
}, },
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
type StructWithOnlyToDB struct { type StructWithOnlyToDB struct {
@ -542,7 +478,6 @@ func TestParseWithJSON(t *testing.T) {
"JSON": true, "JSON": true,
}, },
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
type StructWithJSON struct { type StructWithJSON struct {
@ -565,7 +500,6 @@ func TestParseWithJSONB(t *testing.T) {
"JSONB": true, "JSONB": true,
}, },
names.SnakeMapper{}, names.SnakeMapper{},
caches.NewManager(),
) )
type StructWithJSONB struct { type StructWithJSONB struct {
@ -590,7 +524,6 @@ func TestParseWithSQLType(t *testing.T) {
names.GonicMapper{ names.GonicMapper{
"UUID": true, "UUID": true,
}, },
caches.NewManager(),
) )
type StructWithSQLType struct { type StructWithSQLType struct {

View File

@ -90,8 +90,6 @@ type Context struct {
isUnique bool isUnique bool
indexNames map[string]int indexNames map[string]int
parser *Parser parser *Parser
hasCacheTag bool
hasNoCacheTag bool
ignoreNext bool ignoreNext bool
isUnsigned bool isUnsigned bool
} }
@ -118,8 +116,6 @@ var defaultTagHandlers = map[string]Handler{
"NOTNULL": NotNullTagHandler, "NOTNULL": NotNullTagHandler,
"INDEX": IndexTagHandler, "INDEX": IndexTagHandler,
"UNIQUE": UniqueTagHandler, "UNIQUE": UniqueTagHandler,
"CACHE": CacheTagHandler,
"NOCACHE": NoCacheTagHandler,
"COMMENT": CommentTagHandler, "COMMENT": CommentTagHandler,
"EXTENDS": ExtendsTagHandler, "EXTENDS": ExtendsTagHandler,
"UNSIGNED": UnsignedTagHandler, "UNSIGNED": UnsignedTagHandler,
@ -391,19 +387,3 @@ func ExtendsTagHandler(ctx *Context) error {
} }
return ErrIgnoreField return ErrIgnoreField
} }
// CacheTagHandler describes cache tag handler
func CacheTagHandler(ctx *Context) error {
if !ctx.hasCacheTag {
ctx.hasCacheTag = true
}
return nil
}
// NoCacheTagHandler describes nocache tag handler
func NoCacheTagHandler(ctx *Context) error {
if !ctx.hasNoCacheTag {
ctx.hasNoCacheTag = true
}
return nil
}

View File

@ -1,183 +0,0 @@
// Copyright 2017 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tests
import (
"testing"
"time"
"xorm.io/xorm/v2/caches"
"github.com/stretchr/testify/assert"
)
func TestCacheFind(t *testing.T) {
assert.NoError(t, PrepareEngine())
type MailBox struct {
Id int64 `xorm:"pk"`
Username string
Password string
}
oldCacher := testEngine.GetDefaultCacher()
cacher := caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)
testEngine.SetDefaultCacher(cacher)
assert.NoError(t, testEngine.Sync(new(MailBox)))
inserts := []*MailBox{
{
Id: 0,
Username: "user1",
Password: "pass1",
},
{
Id: 1,
Username: "user2",
Password: "pass2",
},
}
_, err := testEngine.Insert(inserts[0], inserts[1])
assert.NoError(t, err)
var boxes []MailBox
assert.NoError(t, testEngine.Find(&boxes))
assert.EqualValues(t, 2, len(boxes))
for i, box := range boxes {
assert.Equal(t, inserts[i].Id, box.Id)
assert.Equal(t, inserts[i].Username, box.Username)
assert.Equal(t, inserts[i].Password, box.Password)
}
boxes = make([]MailBox, 0, 2)
assert.NoError(t, testEngine.Find(&boxes))
assert.EqualValues(t, 2, len(boxes))
for i, box := range boxes {
assert.Equal(t, inserts[i].Id, box.Id)
assert.Equal(t, inserts[i].Username, box.Username)
assert.Equal(t, inserts[i].Password, box.Password)
}
boxes = make([]MailBox, 0, 2)
assert.NoError(t, testEngine.Alias("a").Where("`a`.`id`> -1").
Asc("`a`.`id`").Find(&boxes))
assert.EqualValues(t, 2, len(boxes))
for i, box := range boxes {
assert.Equal(t, inserts[i].Id, box.Id)
assert.Equal(t, inserts[i].Username, box.Username)
assert.Equal(t, inserts[i].Password, box.Password)
}
type MailBox4 struct {
Id int64
Username string
Password string
}
boxes2 := make([]MailBox4, 0, 2)
assert.NoError(t, testEngine.Table("mail_box").Where("`mail_box`.`id` > -1").
Asc("mail_box.id").Find(&boxes2))
assert.EqualValues(t, 2, len(boxes2))
for i, box := range boxes2 {
assert.Equal(t, inserts[i].Id, box.Id)
assert.Equal(t, inserts[i].Username, box.Username)
assert.Equal(t, inserts[i].Password, box.Password)
}
testEngine.SetDefaultCacher(oldCacher)
}
func TestCacheFind2(t *testing.T) {
assert.NoError(t, PrepareEngine())
type MailBox2 struct {
Id uint64 `xorm:"pk"`
Username string
Password string
}
oldCacher := testEngine.GetDefaultCacher()
cacher := caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)
testEngine.SetDefaultCacher(cacher)
assert.NoError(t, testEngine.Sync(new(MailBox2)))
inserts := []*MailBox2{
{
Id: 0,
Username: "user1",
Password: "pass1",
},
{
Id: 1,
Username: "user2",
Password: "pass2",
},
}
_, err := testEngine.Insert(inserts[0], inserts[1])
assert.NoError(t, err)
var boxes []MailBox2
assert.NoError(t, testEngine.Find(&boxes))
assert.EqualValues(t, 2, len(boxes))
for i, box := range boxes {
assert.Equal(t, inserts[i].Id, box.Id)
assert.Equal(t, inserts[i].Username, box.Username)
assert.Equal(t, inserts[i].Password, box.Password)
}
boxes = make([]MailBox2, 0, 2)
assert.NoError(t, testEngine.Find(&boxes))
assert.EqualValues(t, 2, len(boxes))
for i, box := range boxes {
assert.Equal(t, inserts[i].Id, box.Id)
assert.Equal(t, inserts[i].Username, box.Username)
assert.Equal(t, inserts[i].Password, box.Password)
}
testEngine.SetDefaultCacher(oldCacher)
}
func TestCacheGet(t *testing.T) {
assert.NoError(t, PrepareEngine())
type MailBox3 struct {
Id uint64
Username string
Password string
}
oldCacher := testEngine.GetDefaultCacher()
cacher := caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)
testEngine.SetDefaultCacher(cacher)
assert.NoError(t, testEngine.Sync(new(MailBox3)))
inserts := []*MailBox3{
{
Username: "user1",
Password: "pass1",
},
}
_, err := testEngine.Insert(inserts[0])
assert.NoError(t, err)
var box1 MailBox3
has, err := testEngine.Where("`id` = ?", inserts[0].Id).Get(&box1)
assert.NoError(t, err)
assert.True(t, has)
assert.EqualValues(t, "user1", box1.Username)
assert.EqualValues(t, "pass1", box1.Password)
var box2 MailBox3
has, err = testEngine.Where("`id` = ?", inserts[0].Id).Get(&box2)
assert.NoError(t, err)
assert.True(t, has)
assert.EqualValues(t, "user1", box2.Username)
assert.EqualValues(t, "pass1", box2.Password)
testEngine.SetDefaultCacher(oldCacher)
}

View File

@ -9,7 +9,6 @@ import (
"testing" "testing"
"time" "time"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/schemas" "xorm.io/xorm/v2/schemas"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -217,38 +216,6 @@ func TestDeleted(t *testing.T) {
assert.EqualValues(t, 2, len(records3)) assert.EqualValues(t, 2, len(records3))
} }
func TestCacheDelete(t *testing.T) {
assert.NoError(t, PrepareEngine())
oldCacher := testEngine.GetDefaultCacher()
cacher := caches.NewLRUCacher(caches.NewMemoryStore(), 1000)
testEngine.SetDefaultCacher(cacher)
type CacheDeleteStruct struct {
Id int64
}
err := testEngine.CreateTables(&CacheDeleteStruct{})
assert.NoError(t, err)
_, err = testEngine.Insert(&CacheDeleteStruct{})
assert.NoError(t, err)
aff, err := testEngine.Delete(&CacheDeleteStruct{
Id: 1,
})
assert.NoError(t, err)
assert.EqualValues(t, aff, 1)
aff, err = testEngine.Unscoped().Delete(&CacheDeleteStruct{
Id: 1,
})
assert.NoError(t, err)
assert.EqualValues(t, aff, 0)
testEngine.SetDefaultCacher(oldCacher)
}
func TestUnscopeDelete(t *testing.T) { func TestUnscopeDelete(t *testing.T) {
assert.NoError(t, PrepareEngine()) assert.NoError(t, PrepareEngine())

View File

@ -509,7 +509,7 @@ func TestContextGet(t *testing.T) {
context := contexts.NewMemoryContextCache() context := contexts.NewMemoryContextCache()
var c2 ContextGetStruct var c2 ContextGetStruct
has, err := sess.ID(1).NoCache().ContextCache(context).Get(&c2) has, err := sess.ID(1).ContextCache(context).Get(&c2)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, has) assert.True(t, has)
assert.EqualValues(t, 1, c2.Id) assert.EqualValues(t, 1, c2.Id)
@ -519,7 +519,7 @@ func TestContextGet(t *testing.T) {
assert.True(t, len(args) > 0) assert.True(t, len(args) > 0)
var c3 ContextGetStruct var c3 ContextGetStruct
has, err = sess.ID(1).NoCache().ContextCache(context).Get(&c3) has, err = sess.ID(1).ContextCache(context).Get(&c3)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, has) assert.True(t, has)
assert.EqualValues(t, 1, c3.Id) assert.EqualValues(t, 1, c3.Id)
@ -544,14 +544,14 @@ func TestContextGet2(t *testing.T) {
context := contexts.NewMemoryContextCache() context := contexts.NewMemoryContextCache()
var c2 ContextGetStruct2 var c2 ContextGetStruct2
has, err := testEngine.ID(1).NoCache().ContextCache(context).Get(&c2) has, err := testEngine.ID(1).ContextCache(context).Get(&c2)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, has) assert.True(t, has)
assert.EqualValues(t, 1, c2.Id) assert.EqualValues(t, 1, c2.Id)
assert.EqualValues(t, "1", c2.Name) assert.EqualValues(t, "1", c2.Name)
var c3 ContextGetStruct2 var c3 ContextGetStruct2
has, err = testEngine.ID(1).NoCache().ContextCache(context).Get(&c3) has, err = testEngine.ID(1).ContextCache(context).Get(&c3)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, has) assert.True(t, has)
assert.EqualValues(t, 1, c3.Id) assert.EqualValues(t, 1, c3.Id)

View File

@ -508,30 +508,6 @@ func TestExtends5(t *testing.T) {
} }
} }
func TestCacheTag(t *testing.T) {
assert.NoError(t, PrepareEngine())
type CacheDomain struct {
Id int64 `xorm:"pk cache"`
Name string
}
assert.NoError(t, testEngine.CreateTables(&CacheDomain{}))
assert.True(t, testEngine.GetCacher(testEngine.TableName(&CacheDomain{})) != nil)
}
func TestNoCacheTag(t *testing.T) {
assert.NoError(t, PrepareEngine())
type NoCacheDomain struct {
Id int64 `xorm:"pk nocache"`
Name string
}
assert.NoError(t, testEngine.CreateTables(&NoCacheDomain{}))
assert.True(t, testEngine.GetCacher(testEngine.TableName(&NoCacheDomain{})) == nil)
}
type IDGonicMapper struct { type IDGonicMapper struct {
ID int64 ID int64
} }

View File

@ -14,7 +14,7 @@ import (
"testing" "testing"
"xorm.io/xorm/v2" "xorm.io/xorm/v2"
"xorm.io/xorm/v2/caches"
"xorm.io/xorm/v2/dialects" "xorm.io/xorm/v2/dialects"
"xorm.io/xorm/v2/log" "xorm.io/xorm/v2/log"
"xorm.io/xorm/v2/names" "xorm.io/xorm/v2/names"
@ -30,7 +30,6 @@ var (
showSQL = flag.Bool("show_sql", true, "show generated SQLs") showSQL = flag.Bool("show_sql", true, "show generated SQLs")
ptrConnStr = flag.String("conn_str", "./test.db?cache=shared&mode=rwc", "test database connection string") ptrConnStr = flag.String("conn_str", "./test.db?cache=shared&mode=rwc", "test database connection string")
mapType = flag.String("map_type", "snake", "indicate the name mapping") mapType = flag.String("map_type", "snake", "indicate the name mapping")
cacheFlag = flag.Bool("cache", false, "if enable cache")
cluster = flag.Bool("cluster", false, "if this is a cluster") cluster = flag.Bool("cluster", false, "if this is a cluster")
splitter = flag.String("splitter", ";", "the splitter on connstr for cluster") splitter = flag.String("splitter", ";", "the splitter on connstr for cluster")
schema = flag.String("schema", "", "specify the schema") schema = flag.String("schema", "", "specify the schema")
@ -125,10 +124,6 @@ func createEngine(dbType, connStr string) error {
} }
testEngine.ShowSQL(*showSQL) testEngine.ShowSQL(*showSQL)
testEngine.SetLogLevel(log.LOG_DEBUG) testEngine.SetLogLevel(log.LOG_DEBUG)
if *cacheFlag {
cacher := caches.NewLRUCacher(caches.NewMemoryStore(), 100000)
testEngine.SetDefaultCacher(cacher)
}
if len(*mapType) > 0 { if len(*mapType) > 0 {
switch *mapType { switch *mapType {