use tabs
This commit is contained in:
parent
286b8725ed
commit
59412a951c
|
@ -25,3 +25,4 @@ vendor
|
|||
|
||||
*.log
|
||||
.vendor
|
||||
temp_test.go
|
||||
|
|
5767
base_test.go
5767
base_test.go
File diff suppressed because it is too large
Load Diff
|
@ -1,174 +1,174 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"database/sql"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type BigStruct struct {
|
||||
Id int64
|
||||
Name string
|
||||
Title string
|
||||
Age string
|
||||
Alias string
|
||||
NickName string
|
||||
Id int64
|
||||
Name string
|
||||
Title string
|
||||
Age string
|
||||
Alias string
|
||||
NickName string
|
||||
}
|
||||
|
||||
func doBenchDriverInsert(db *sql.DB, b *testing.B) {
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := db.Exec(`insert into big_struct (name, title, age, alias, nick_name)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := db.Exec(`insert into big_struct (name, title, age, alias, nick_name)
|
||||
values ('fafdasf', 'fadfa', 'afadfsaf', 'fadfafdsafd', 'fadfafdsaf')`)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func doBenchDriverFind(db *sql.DB, b *testing.B) {
|
||||
b.StopTimer()
|
||||
for i := 0; i < 50; i++ {
|
||||
_, err := db.Exec(`insert into big_struct (name, title, age, alias, nick_name)
|
||||
b.StopTimer()
|
||||
for i := 0; i < 50; i++ {
|
||||
_, err := db.Exec(`insert into big_struct (name, title, age, alias, nick_name)
|
||||
values ('fafdasf', 'fadfa', 'afadfsaf', 'fadfafdsafd', 'fadfafdsaf')`)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N/50; i++ {
|
||||
rows, err := db.Query("select * from big_struct limit 50")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
for rows.Next() {
|
||||
s := &BigStruct{}
|
||||
rows.Scan(&s.Id, &s.Name, &s.Title, &s.Age, &s.Alias, &s.NickName)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N/50; i++ {
|
||||
rows, err := db.Query("select * from big_struct limit 50")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
for rows.Next() {
|
||||
s := &BigStruct{}
|
||||
rows.Scan(&s.Id, &s.Name, &s.Title, &s.Age, &s.Alias, &s.NickName)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func doBenchDriver(newdriver func() (*sql.DB, error), createTableSql,
|
||||
dropTableSql string, opFunc func(*sql.DB, *testing.B), t *testing.B) {
|
||||
db, err := newdriver()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
dropTableSql string, opFunc func(*sql.DB, *testing.B), t *testing.B) {
|
||||
db, err := newdriver()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
_, err = db.Exec(createTableSql)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
_, err = db.Exec(createTableSql)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
opFunc(db, t)
|
||||
opFunc(db, t)
|
||||
|
||||
_, err = db.Exec(dropTableSql)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
_, err = db.Exec(dropTableSql)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func doBenchInsert(engine *Engine, b *testing.B) {
|
||||
b.StopTimer()
|
||||
bs := &BigStruct{0, "fafdasf", "fadfa", "afadfsaf", "fadfafdsafd", "fadfafdsaf"}
|
||||
err := engine.CreateTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
b.StopTimer()
|
||||
bs := &BigStruct{0, "fafdasf", "fadfa", "afadfsaf", "fadfafdsafd", "fadfafdsaf"}
|
||||
err := engine.CreateTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bs.Id = 0
|
||||
_, err = engine.Insert(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
err = engine.DropTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bs.Id = 0
|
||||
_, err = engine.Insert(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
err = engine.DropTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func doBenchFind(engine *Engine, b *testing.B) {
|
||||
b.StopTimer()
|
||||
bs := &BigStruct{0, "fafdasf", "fadfa", "afadfsaf", "fadfafdsafd", "fadfafdsaf"}
|
||||
err := engine.CreateTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
b.StopTimer()
|
||||
bs := &BigStruct{0, "fafdasf", "fadfa", "afadfsaf", "fadfafdsafd", "fadfafdsaf"}
|
||||
err := engine.CreateTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
bs.Id = 0
|
||||
_, err = engine.Insert(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
bs.Id = 0
|
||||
_, err = engine.Insert(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N/50; i++ {
|
||||
bss := new([]BigStruct)
|
||||
err = engine.Limit(50).Find(bss)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
err = engine.DropTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N/50; i++ {
|
||||
bss := new([]BigStruct)
|
||||
err = engine.Limit(50).Find(bss)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
err = engine.DropTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func doBenchFindPtr(engine *Engine, b *testing.B) {
|
||||
b.StopTimer()
|
||||
bs := &BigStruct{0, "fafdasf", "fadfa", "afadfsaf", "fadfafdsafd", "fadfafdsaf"}
|
||||
err := engine.CreateTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
b.StopTimer()
|
||||
bs := &BigStruct{0, "fafdasf", "fadfa", "afadfsaf", "fadfafdsafd", "fadfafdsaf"}
|
||||
err := engine.CreateTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
bs.Id = 0
|
||||
_, err = engine.Insert(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
bs.Id = 0
|
||||
_, err = engine.Insert(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N/50; i++ {
|
||||
bss := new([]*BigStruct)
|
||||
err = engine.Limit(50).Find(bss)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
err = engine.DropTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N/50; i++ {
|
||||
bss := new([]*BigStruct)
|
||||
err = engine.Limit(50).Find(bss)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
err = engine.DropTables(bs)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
528
cache.go
528
cache.go
|
@ -1,131 +1,131 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// default cache expired time
|
||||
CacheExpired = 60 * time.Minute
|
||||
// not use now
|
||||
CacheMaxMemory = 256
|
||||
// evey ten minutes to clear all expired nodes
|
||||
CacheGcInterval = 10 * time.Minute
|
||||
// each time when gc to removed max nodes
|
||||
CacheGcMaxRemoved = 20
|
||||
// default cache expired time
|
||||
CacheExpired = 60 * time.Minute
|
||||
// not use now
|
||||
CacheMaxMemory = 256
|
||||
// evey ten minutes to clear all expired nodes
|
||||
CacheGcInterval = 10 * time.Minute
|
||||
// each time when gc to removed max nodes
|
||||
CacheGcMaxRemoved = 20
|
||||
)
|
||||
|
||||
// CacheStore is a interface to store cache
|
||||
type CacheStore interface {
|
||||
Put(key, value interface{}) error
|
||||
Get(key interface{}) (interface{}, error)
|
||||
Del(key interface{}) error
|
||||
Put(key, value interface{}) error
|
||||
Get(key interface{}) (interface{}, error)
|
||||
Del(key interface{}) error
|
||||
}
|
||||
|
||||
// MemoryStore implements CacheStore provide local machine
|
||||
// memory store
|
||||
type MemoryStore struct {
|
||||
store map[interface{}]interface{}
|
||||
mutex sync.RWMutex
|
||||
store map[interface{}]interface{}
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMemoryStore() *MemoryStore {
|
||||
return &MemoryStore{store: make(map[interface{}]interface{})}
|
||||
return &MemoryStore{store: make(map[interface{}]interface{})}
|
||||
}
|
||||
|
||||
func (s *MemoryStore) Put(key, value interface{}) error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.store[key] = value
|
||||
return nil
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.store[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MemoryStore) Get(key interface{}) (interface{}, error) {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
if v, ok := s.store[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
if v, ok := s.store[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, ErrNotExist
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
|
||||
func (s *MemoryStore) Del(key interface{}) error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
delete(s.store, key)
|
||||
return nil
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
delete(s.store, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cacher is an interface to provide cache
|
||||
type Cacher interface {
|
||||
GetIds(tableName, sql string) interface{}
|
||||
GetBean(tableName string, id int64) interface{}
|
||||
PutIds(tableName, sql string, ids interface{})
|
||||
PutBean(tableName string, id int64, obj interface{})
|
||||
DelIds(tableName, sql string)
|
||||
DelBean(tableName string, id int64)
|
||||
ClearIds(tableName string)
|
||||
ClearBeans(tableName string)
|
||||
GetIds(tableName, sql string) interface{}
|
||||
GetBean(tableName string, id int64) interface{}
|
||||
PutIds(tableName, sql string, ids interface{})
|
||||
PutBean(tableName string, id int64, obj interface{})
|
||||
DelIds(tableName, sql string)
|
||||
DelBean(tableName string, id int64)
|
||||
ClearIds(tableName string)
|
||||
ClearBeans(tableName string)
|
||||
}
|
||||
|
||||
type idNode struct {
|
||||
tbName string
|
||||
id int64
|
||||
lastVisit time.Time
|
||||
tbName string
|
||||
id int64
|
||||
lastVisit time.Time
|
||||
}
|
||||
|
||||
type sqlNode struct {
|
||||
tbName string
|
||||
sql string
|
||||
lastVisit time.Time
|
||||
tbName string
|
||||
sql string
|
||||
lastVisit time.Time
|
||||
}
|
||||
|
||||
func newIdNode(tbName string, id int64) *idNode {
|
||||
return &idNode{tbName, id, time.Now()}
|
||||
return &idNode{tbName, id, time.Now()}
|
||||
}
|
||||
|
||||
func newSqlNode(tbName, sql string) *sqlNode {
|
||||
return &sqlNode{tbName, sql, time.Now()}
|
||||
return &sqlNode{tbName, sql, time.Now()}
|
||||
}
|
||||
|
||||
// LRUCacher implements Cacher according to LRU algorithm
|
||||
type LRUCacher struct {
|
||||
idList *list.List
|
||||
sqlList *list.List
|
||||
idIndex map[string]map[interface{}]*list.Element
|
||||
sqlIndex map[string]map[interface{}]*list.Element
|
||||
store CacheStore
|
||||
Max int
|
||||
mutex sync.Mutex
|
||||
Expired time.Duration
|
||||
maxSize int
|
||||
GcInterval time.Duration
|
||||
idList *list.List
|
||||
sqlList *list.List
|
||||
idIndex map[string]map[interface{}]*list.Element
|
||||
sqlIndex map[string]map[interface{}]*list.Element
|
||||
store CacheStore
|
||||
Max int
|
||||
mutex sync.Mutex
|
||||
Expired time.Duration
|
||||
maxSize int
|
||||
GcInterval time.Duration
|
||||
}
|
||||
|
||||
func newLRUCacher(store CacheStore, expired time.Duration, maxSize int, max int) *LRUCacher {
|
||||
cacher := &LRUCacher{store: store, idList: list.New(),
|
||||
sqlList: list.New(), Expired: expired, maxSize: maxSize,
|
||||
GcInterval: CacheGcInterval, Max: max,
|
||||
sqlIndex: make(map[string]map[interface{}]*list.Element),
|
||||
idIndex: make(map[string]map[interface{}]*list.Element),
|
||||
}
|
||||
cacher.RunGC()
|
||||
return cacher
|
||||
cacher := &LRUCacher{store: store, idList: list.New(),
|
||||
sqlList: list.New(), Expired: expired, maxSize: maxSize,
|
||||
GcInterval: CacheGcInterval, Max: max,
|
||||
sqlIndex: make(map[string]map[interface{}]*list.Element),
|
||||
idIndex: make(map[string]map[interface{}]*list.Element),
|
||||
}
|
||||
cacher.RunGC()
|
||||
return cacher
|
||||
}
|
||||
|
||||
func NewLRUCacher(store CacheStore, max int) *LRUCacher {
|
||||
return newLRUCacher(store, CacheExpired, CacheMaxMemory, max)
|
||||
return newLRUCacher(store, CacheExpired, CacheMaxMemory, max)
|
||||
}
|
||||
|
||||
func NewLRUCacher2(store CacheStore, expired time.Duration, max int) *LRUCacher {
|
||||
return newLRUCacher(store, expired, 0, max)
|
||||
return newLRUCacher(store, expired, 0, max)
|
||||
}
|
||||
|
||||
//func NewLRUCacher3(store CacheStore, expired time.Duration, maxSize int) *LRUCacher {
|
||||
|
@ -134,262 +134,262 @@ func NewLRUCacher2(store CacheStore, expired time.Duration, max int) *LRUCacher
|
|||
|
||||
// RunGC run once every m.GcInterval
|
||||
func (m *LRUCacher) RunGC() {
|
||||
time.AfterFunc(m.GcInterval, func() {
|
||||
m.RunGC()
|
||||
m.GC()
|
||||
})
|
||||
time.AfterFunc(m.GcInterval, func() {
|
||||
m.RunGC()
|
||||
m.GC()
|
||||
})
|
||||
}
|
||||
|
||||
// GC check ids lit and sql list to remove all element expired
|
||||
func (m *LRUCacher) GC() {
|
||||
//fmt.Println("begin gc ...")
|
||||
//defer fmt.Println("end gc ...")
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
var removedNum int
|
||||
for e := m.idList.Front(); e != nil; {
|
||||
if removedNum <= CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
//fmt.Println("removing ...", e.Value)
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
e = next
|
||||
} else {
|
||||
//fmt.Printf("removing %d cache nodes ..., left %d\n", removedNum, m.idList.Len())
|
||||
break
|
||||
}
|
||||
}
|
||||
//fmt.Println("begin gc ...")
|
||||
//defer fmt.Println("end gc ...")
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
var removedNum int
|
||||
for e := m.idList.Front(); e != nil; {
|
||||
if removedNum <= CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
//fmt.Println("removing ...", e.Value)
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
e = next
|
||||
} else {
|
||||
//fmt.Printf("removing %d cache nodes ..., left %d\n", removedNum, m.idList.Len())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
removedNum = 0
|
||||
for e := m.sqlList.Front(); e != nil; {
|
||||
if removedNum <= CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
//fmt.Println("removing ...", e.Value)
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
e = next
|
||||
} else {
|
||||
//fmt.Printf("removing %d cache nodes ..., left %d\n", removedNum, m.sqlList.Len())
|
||||
break
|
||||
}
|
||||
}
|
||||
removedNum = 0
|
||||
for e := m.sqlList.Front(); e != nil; {
|
||||
if removedNum <= CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
//fmt.Println("removing ...", e.Value)
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
e = next
|
||||
} else {
|
||||
//fmt.Printf("removing %d cache nodes ..., left %d\n", removedNum, m.sqlList.Len())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get all bean's ids according to sql and parameter from cache
|
||||
func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
if v, err := m.store.Get(sql); err == nil {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSqlNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
lastTime := el.Value.(*sqlNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delIds(tableName, sql)
|
||||
return nil
|
||||
}
|
||||
m.sqlList.MoveToBack(el)
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
return v
|
||||
} else {
|
||||
m.delIds(tableName, sql)
|
||||
}
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
if v, err := m.store.Get(sql); err == nil {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSqlNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
lastTime := el.Value.(*sqlNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delIds(tableName, sql)
|
||||
return nil
|
||||
}
|
||||
m.sqlList.MoveToBack(el)
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
return v
|
||||
} else {
|
||||
m.delIds(tableName, sql)
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get bean according tableName and id from cache
|
||||
func (m *LRUCacher) GetBean(tableName string, id int64) interface{} {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.idIndex[tableName]; !ok {
|
||||
m.idIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
tid := genId(tableName, id)
|
||||
if v, err := m.store.Get(tid); err == nil {
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
lastTime := el.Value.(*idNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delBean(tableName, id)
|
||||
//m.clearIds(tableName)
|
||||
return nil
|
||||
}
|
||||
m.idList.MoveToBack(el)
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
} else {
|
||||
el = m.idList.PushBack(newIdNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
}
|
||||
return v
|
||||
} else {
|
||||
// store bean is not exist, then remove memory's index
|
||||
m.delBean(tableName, id)
|
||||
//m.clearIds(tableName)
|
||||
return nil
|
||||
}
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.idIndex[tableName]; !ok {
|
||||
m.idIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
tid := genId(tableName, id)
|
||||
if v, err := m.store.Get(tid); err == nil {
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
lastTime := el.Value.(*idNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delBean(tableName, id)
|
||||
//m.clearIds(tableName)
|
||||
return nil
|
||||
}
|
||||
m.idList.MoveToBack(el)
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
} else {
|
||||
el = m.idList.PushBack(newIdNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
}
|
||||
return v
|
||||
} else {
|
||||
// store bean is not exist, then remove memory's index
|
||||
m.delBean(tableName, id)
|
||||
//m.clearIds(tableName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Clear all sql-ids mapping on table tableName from cache
|
||||
func (m *LRUCacher) clearIds(tableName string) {
|
||||
if tis, ok := m.sqlIndex[tableName]; ok {
|
||||
for sql, v := range tis {
|
||||
m.sqlList.Remove(v)
|
||||
m.store.Del(sql)
|
||||
}
|
||||
}
|
||||
m.sqlIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
if tis, ok := m.sqlIndex[tableName]; ok {
|
||||
for sql, v := range tis {
|
||||
m.sqlList.Remove(v)
|
||||
m.store.Del(sql)
|
||||
}
|
||||
}
|
||||
m.sqlIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) ClearIds(tableName string) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.clearIds(tableName)
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.clearIds(tableName)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) clearBeans(tableName string) {
|
||||
if tis, ok := m.idIndex[tableName]; ok {
|
||||
for id, v := range tis {
|
||||
m.idList.Remove(v)
|
||||
tid := genId(tableName, id.(int64))
|
||||
m.store.Del(tid)
|
||||
}
|
||||
}
|
||||
m.idIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
if tis, ok := m.idIndex[tableName]; ok {
|
||||
for id, v := range tis {
|
||||
m.idList.Remove(v)
|
||||
tid := genId(tableName, id.(int64))
|
||||
m.store.Del(tid)
|
||||
}
|
||||
}
|
||||
m.idIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) ClearBeans(tableName string) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.clearBeans(tableName)
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.clearBeans(tableName)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSqlNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
m.store.Put(sql, ids)
|
||||
if m.sqlList.Len() > m.Max {
|
||||
e := m.sqlList.Front()
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
}
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[interface{}]*list.Element)
|
||||
}
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSqlNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
m.store.Put(sql, ids)
|
||||
if m.sqlList.Len() > m.Max {
|
||||
e := m.sqlList.Front()
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LRUCacher) PutBean(tableName string, id int64, obj interface{}) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
var el *list.Element
|
||||
var ok bool
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
var el *list.Element
|
||||
var ok bool
|
||||
|
||||
if el, ok = m.idIndex[tableName][id]; !ok {
|
||||
el = m.idList.PushBack(newIdNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
} else {
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
}
|
||||
if el, ok = m.idIndex[tableName][id]; !ok {
|
||||
el = m.idList.PushBack(newIdNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
} else {
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
}
|
||||
|
||||
m.store.Put(genId(tableName, id), obj)
|
||||
if m.idList.Len() > m.Max {
|
||||
e := m.idList.Front()
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
}
|
||||
m.store.Put(genId(tableName, id), obj)
|
||||
if m.idList.Len() > m.Max {
|
||||
e := m.idList.Front()
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LRUCacher) delIds(tableName, sql string) {
|
||||
if _, ok := m.sqlIndex[tableName]; ok {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; ok {
|
||||
delete(m.sqlIndex[tableName], sql)
|
||||
m.sqlList.Remove(el)
|
||||
}
|
||||
}
|
||||
m.store.Del(sql)
|
||||
if _, ok := m.sqlIndex[tableName]; ok {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; ok {
|
||||
delete(m.sqlIndex[tableName], sql)
|
||||
m.sqlList.Remove(el)
|
||||
}
|
||||
}
|
||||
m.store.Del(sql)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) DelIds(tableName, sql string) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.delIds(tableName, sql)
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.delIds(tableName, sql)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) delBean(tableName string, id int64) {
|
||||
tid := genId(tableName, id)
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
delete(m.idIndex[tableName], id)
|
||||
m.idList.Remove(el)
|
||||
m.clearIds(tableName)
|
||||
}
|
||||
m.store.Del(tid)
|
||||
tid := genId(tableName, id)
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
delete(m.idIndex[tableName], id)
|
||||
m.idList.Remove(el)
|
||||
m.clearIds(tableName)
|
||||
}
|
||||
m.store.Del(tid)
|
||||
}
|
||||
|
||||
func (m *LRUCacher) DelBean(tableName string, id int64) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.delBean(tableName, id)
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
m.delBean(tableName, id)
|
||||
}
|
||||
|
||||
func encodeIds(ids []int64) (s string) {
|
||||
s = "["
|
||||
for _, id := range ids {
|
||||
s += fmt.Sprintf("%v,", id)
|
||||
}
|
||||
s = s[:len(s)-1] + "]"
|
||||
return
|
||||
s = "["
|
||||
for _, id := range ids {
|
||||
s += fmt.Sprintf("%v,", id)
|
||||
}
|
||||
s = s[:len(s)-1] + "]"
|
||||
return
|
||||
}
|
||||
|
||||
func decodeIds(s string) []int64 {
|
||||
res := make([]int64, 0)
|
||||
if len(s) >= 2 {
|
||||
ss := strings.Split(s[1:len(s)-1], ",")
|
||||
for _, s := range ss {
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return res
|
||||
}
|
||||
res = append(res, i)
|
||||
}
|
||||
}
|
||||
return res
|
||||
res := make([]int64, 0)
|
||||
if len(s) >= 2 {
|
||||
ss := strings.Split(s[1:len(s)-1], ",")
|
||||
for _, s := range ss {
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return res
|
||||
}
|
||||
res = append(res, i)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getCacheSql(m Cacher, tableName, sql string, args interface{}) ([]int64, error) {
|
||||
bytes := m.GetIds(tableName, genSqlKey(sql, args))
|
||||
if bytes == nil {
|
||||
return nil, errors.New("Not Exist")
|
||||
}
|
||||
objs := decodeIds(bytes.(string))
|
||||
return objs, nil
|
||||
bytes := m.GetIds(tableName, genSqlKey(sql, args))
|
||||
if bytes == nil {
|
||||
return nil, errors.New("Not Exist")
|
||||
}
|
||||
objs := decodeIds(bytes.(string))
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
func putCacheSql(m Cacher, ids []int64, tableName, sql string, args interface{}) error {
|
||||
bytes := encodeIds(ids)
|
||||
m.PutIds(tableName, genSqlKey(sql, args), bytes)
|
||||
return nil
|
||||
bytes := encodeIds(ids)
|
||||
m.PutIds(tableName, genSqlKey(sql, args), bytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func genSqlKey(sql string, args interface{}) string {
|
||||
return fmt.Sprintf("%v-%v", sql, args)
|
||||
return fmt.Sprintf("%v-%v", sql, args)
|
||||
}
|
||||
|
||||
func genId(prefix string, id int64) string {
|
||||
return fmt.Sprintf("%v-%v", prefix, id)
|
||||
return fmt.Sprintf("%v-%v", prefix, id)
|
||||
}
|
||||
|
|
16
error.go
16
error.go
|
@ -1,15 +1,15 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrParamsType error = errors.New("Params type error")
|
||||
ErrTableNotFound error = errors.New("Not found table")
|
||||
ErrUnSupportedType error = errors.New("Unsupported type error")
|
||||
ErrNotExist error = errors.New("Not exist error")
|
||||
ErrCacheFailed error = errors.New("Cache failed")
|
||||
ErrNeedDeletedCond error = errors.New("Delete need at least one condition")
|
||||
ErrNotImplemented error = errors.New("Not implemented.")
|
||||
ErrParamsType error = errors.New("Params type error")
|
||||
ErrTableNotFound error = errors.New("Not found table")
|
||||
ErrUnSupportedType error = errors.New("Unsupported type error")
|
||||
ErrNotExist error = errors.New("Not exist error")
|
||||
ErrCacheFailed error = errors.New("Cache failed")
|
||||
ErrNeedDeletedCond error = errors.New("Delete need at least one condition")
|
||||
ErrNotImplemented error = errors.New("Not implemented.")
|
||||
)
|
||||
|
|
40
filter.go
40
filter.go
|
@ -1,13 +1,13 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Filter is an interface to filter SQL
|
||||
type Filter interface {
|
||||
Do(sql string, session *Session) string
|
||||
Do(sql string, session *Session) string
|
||||
}
|
||||
|
||||
// PgSeqFilter filter SQL replace ?, ? ... to $1, $2 ...
|
||||
|
@ -15,16 +15,16 @@ type PgSeqFilter struct {
|
|||
}
|
||||
|
||||
func (s *PgSeqFilter) Do(sql string, session *Session) string {
|
||||
segs := strings.Split(sql, "?")
|
||||
size := len(segs)
|
||||
res := ""
|
||||
for i, c := range segs {
|
||||
if i < size-1 {
|
||||
res += c + fmt.Sprintf("$%v", i+1)
|
||||
}
|
||||
}
|
||||
res += segs[size-1]
|
||||
return res
|
||||
segs := strings.Split(sql, "?")
|
||||
size := len(segs)
|
||||
res := ""
|
||||
for i, c := range segs {
|
||||
if i < size-1 {
|
||||
res += c + fmt.Sprintf("$%v", i+1)
|
||||
}
|
||||
}
|
||||
res += segs[size-1]
|
||||
return res
|
||||
}
|
||||
|
||||
// QuoteFilter filter SQL replace ` to database's own quote character
|
||||
|
@ -32,7 +32,7 @@ type QuoteFilter struct {
|
|||
}
|
||||
|
||||
func (s *QuoteFilter) Do(sql string, session *Session) string {
|
||||
return strings.Replace(sql, "`", session.Engine.QuoteStr(), -1)
|
||||
return strings.Replace(sql, "`", session.Engine.QuoteStr(), -1)
|
||||
}
|
||||
|
||||
// IdFilter filter SQL replace (id) to primary key column name
|
||||
|
@ -40,10 +40,10 @@ type IdFilter struct {
|
|||
}
|
||||
|
||||
func (i *IdFilter) Do(sql string, session *Session) string {
|
||||
if session.Statement.RefTable != nil && session.Statement.RefTable.PrimaryKey != "" {
|
||||
sql = strings.Replace(sql, "`(id)`", session.Engine.Quote(session.Statement.RefTable.PrimaryKey), -1)
|
||||
sql = strings.Replace(sql, session.Engine.Quote("(id)"), session.Engine.Quote(session.Statement.RefTable.PrimaryKey), -1)
|
||||
return strings.Replace(sql, "(id)", session.Engine.Quote(session.Statement.RefTable.PrimaryKey), -1)
|
||||
}
|
||||
return sql
|
||||
if session.Statement.RefTable != nil && session.Statement.RefTable.PrimaryKey != "" {
|
||||
sql = strings.Replace(sql, "`(id)`", session.Engine.Quote(session.Statement.RefTable.PrimaryKey), -1)
|
||||
sql = strings.Replace(sql, session.Engine.Quote("(id)"), session.Engine.Quote(session.Statement.RefTable.PrimaryKey), -1)
|
||||
return strings.Replace(sql, "(id)", session.Engine.Quote(session.Statement.RefTable.PrimaryKey), -1)
|
||||
}
|
||||
return sql
|
||||
}
|
||||
|
|
74
helpers.go
74
helpers.go
|
@ -1,63 +1,63 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func indexNoCase(s, sep string) int {
|
||||
return strings.Index(strings.ToLower(s), strings.ToLower(sep))
|
||||
return strings.Index(strings.ToLower(s), strings.ToLower(sep))
|
||||
}
|
||||
|
||||
func splitNoCase(s, sep string) []string {
|
||||
idx := indexNoCase(s, sep)
|
||||
if idx < 0 {
|
||||
return []string{s}
|
||||
}
|
||||
return strings.Split(s, s[idx:idx+len(sep)])
|
||||
idx := indexNoCase(s, sep)
|
||||
if idx < 0 {
|
||||
return []string{s}
|
||||
}
|
||||
return strings.Split(s, s[idx:idx+len(sep)])
|
||||
}
|
||||
|
||||
func splitNNoCase(s, sep string, n int) []string {
|
||||
idx := indexNoCase(s, sep)
|
||||
if idx < 0 {
|
||||
return []string{s}
|
||||
}
|
||||
return strings.SplitN(s, s[idx:idx+len(sep)], n)
|
||||
idx := indexNoCase(s, sep)
|
||||
if idx < 0 {
|
||||
return []string{s}
|
||||
}
|
||||
return strings.SplitN(s, s[idx:idx+len(sep)], n)
|
||||
}
|
||||
|
||||
func makeArray(elem string, count int) []string {
|
||||
res := make([]string, count)
|
||||
for i := 0; i < count; i++ {
|
||||
res[i] = elem
|
||||
}
|
||||
return res
|
||||
res := make([]string, count)
|
||||
for i := 0; i < count; i++ {
|
||||
res[i] = elem
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func rType(bean interface{}) reflect.Type {
|
||||
sliceValue := reflect.Indirect(reflect.ValueOf(bean))
|
||||
return reflect.TypeOf(sliceValue.Interface())
|
||||
sliceValue := reflect.Indirect(reflect.ValueOf(bean))
|
||||
return reflect.TypeOf(sliceValue.Interface())
|
||||
}
|
||||
|
||||
func structName(v reflect.Type) string {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v.Name()
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v.Name()
|
||||
}
|
||||
|
||||
func sliceEq(left, right []string) bool {
|
||||
for _, l := range left {
|
||||
var find bool
|
||||
for _, r := range right {
|
||||
if l == r {
|
||||
find = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !find {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, l := range left {
|
||||
var find bool
|
||||
for _, r := range right {
|
||||
if l == r {
|
||||
find = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !find {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
|
90
mapper.go
90
mapper.go
|
@ -1,13 +1,13 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// name translation between struct, fields names and table, column names
|
||||
type IMapper interface {
|
||||
Obj2Table(string) string
|
||||
Table2Obj(string) string
|
||||
Obj2Table(string) string
|
||||
Table2Obj(string) string
|
||||
}
|
||||
|
||||
// SameMapper implements IMapper and provides same name between struct and
|
||||
|
@ -16,11 +16,11 @@ type SameMapper struct {
|
|||
}
|
||||
|
||||
func (m SameMapper) Obj2Table(o string) string {
|
||||
return o
|
||||
return o
|
||||
}
|
||||
|
||||
func (m SameMapper) Table2Obj(t string) string {
|
||||
return t
|
||||
return t
|
||||
}
|
||||
|
||||
// SnakeMapper implements IMapper and provides name transaltion between
|
||||
|
@ -29,18 +29,18 @@ type SnakeMapper struct {
|
|||
}
|
||||
|
||||
func snakeCasedName(name string) string {
|
||||
newstr := make([]rune, 0)
|
||||
for idx, chr := range name {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if idx > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
chr -= ('A' - 'a')
|
||||
}
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
newstr := make([]rune, 0)
|
||||
for idx, chr := range name {
|
||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
||||
if idx > 0 {
|
||||
newstr = append(newstr, '_')
|
||||
}
|
||||
chr -= ('A' - 'a')
|
||||
}
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
|
||||
return string(newstr)
|
||||
return string(newstr)
|
||||
}
|
||||
|
||||
/*func pascal2Sql(s string) (d string) {
|
||||
|
@ -63,69 +63,69 @@ func snakeCasedName(name string) string {
|
|||
}*/
|
||||
|
||||
func (mapper SnakeMapper) Obj2Table(name string) string {
|
||||
return snakeCasedName(name)
|
||||
return snakeCasedName(name)
|
||||
}
|
||||
|
||||
func titleCasedName(name string) string {
|
||||
newstr := make([]rune, 0)
|
||||
upNextChar := true
|
||||
newstr := make([]rune, 0)
|
||||
upNextChar := true
|
||||
|
||||
name = strings.ToLower(name)
|
||||
name = strings.ToLower(name)
|
||||
|
||||
for _, chr := range name {
|
||||
switch {
|
||||
case upNextChar:
|
||||
upNextChar = false
|
||||
if 'a' <= chr && chr <= 'z' {
|
||||
chr -= ('a' - 'A')
|
||||
}
|
||||
case chr == '_':
|
||||
upNextChar = true
|
||||
continue
|
||||
}
|
||||
for _, chr := range name {
|
||||
switch {
|
||||
case upNextChar:
|
||||
upNextChar = false
|
||||
if 'a' <= chr && chr <= 'z' {
|
||||
chr -= ('a' - 'A')
|
||||
}
|
||||
case chr == '_':
|
||||
upNextChar = true
|
||||
continue
|
||||
}
|
||||
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
newstr = append(newstr, chr)
|
||||
}
|
||||
|
||||
return string(newstr)
|
||||
return string(newstr)
|
||||
}
|
||||
|
||||
func (mapper SnakeMapper) Table2Obj(name string) string {
|
||||
return titleCasedName(name)
|
||||
return titleCasedName(name)
|
||||
}
|
||||
|
||||
// provide prefix table name support
|
||||
type PrefixMapper struct {
|
||||
Mapper IMapper
|
||||
Prefix string
|
||||
Mapper IMapper
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (mapper PrefixMapper) Obj2Table(name string) string {
|
||||
return mapper.Prefix + mapper.Mapper.Obj2Table(name)
|
||||
return mapper.Prefix + mapper.Mapper.Obj2Table(name)
|
||||
}
|
||||
|
||||
func (mapper PrefixMapper) Table2Obj(name string) string {
|
||||
return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):])
|
||||
return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):])
|
||||
}
|
||||
|
||||
func NewPrefixMapper(mapper IMapper, prefix string) PrefixMapper {
|
||||
return PrefixMapper{mapper, prefix}
|
||||
return PrefixMapper{mapper, prefix}
|
||||
}
|
||||
|
||||
// provide suffix table name support
|
||||
type SuffixMapper struct {
|
||||
Mapper IMapper
|
||||
Suffix string
|
||||
Mapper IMapper
|
||||
Suffix string
|
||||
}
|
||||
|
||||
func (mapper SuffixMapper) Obj2Table(name string) string {
|
||||
return mapper.Suffix + mapper.Mapper.Obj2Table(name)
|
||||
return mapper.Suffix + mapper.Mapper.Obj2Table(name)
|
||||
}
|
||||
|
||||
func (mapper SuffixMapper) Table2Obj(name string) string {
|
||||
return mapper.Mapper.Table2Obj(name[len(mapper.Suffix):])
|
||||
return mapper.Mapper.Table2Obj(name[len(mapper.Suffix):])
|
||||
}
|
||||
|
||||
func NewSuffixMapper(mapper IMapper, suffix string) SuffixMapper {
|
||||
return SuffixMapper{mapper, suffix}
|
||||
return SuffixMapper{mapper, suffix}
|
||||
}
|
||||
|
|
98
mymysql.go
98
mymysql.go
|
@ -1,67 +1,67 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type mymysql struct {
|
||||
mysql
|
||||
mysql
|
||||
}
|
||||
|
||||
type mymysqlParser struct {
|
||||
}
|
||||
|
||||
func (p *mymysqlParser) parse(driverName, dataSourceName string) (*uri, error) {
|
||||
db := &uri{dbType: MYSQL}
|
||||
db := &uri{dbType: MYSQL}
|
||||
|
||||
pd := strings.SplitN(dataSourceName, "*", 2)
|
||||
if len(pd) == 2 {
|
||||
// Parse protocol part of URI
|
||||
p := strings.SplitN(pd[0], ":", 2)
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("Wrong protocol part of URI")
|
||||
}
|
||||
db.proto = p[0]
|
||||
options := strings.Split(p[1], ",")
|
||||
db.raddr = options[0]
|
||||
for _, o := range options[1:] {
|
||||
kv := strings.SplitN(o, "=", 2)
|
||||
var k, v string
|
||||
if len(kv) == 2 {
|
||||
k, v = kv[0], kv[1]
|
||||
} else {
|
||||
k, v = o, "true"
|
||||
}
|
||||
switch k {
|
||||
case "laddr":
|
||||
db.laddr = v
|
||||
case "timeout":
|
||||
to, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.timeout = to
|
||||
default:
|
||||
return nil, errors.New("Unknown option: " + k)
|
||||
}
|
||||
}
|
||||
// Remove protocol part
|
||||
pd = pd[1:]
|
||||
}
|
||||
// Parse database part of URI
|
||||
dup := strings.SplitN(pd[0], "/", 3)
|
||||
if len(dup) != 3 {
|
||||
return nil, errors.New("Wrong database part of URI")
|
||||
}
|
||||
db.dbName = dup[0]
|
||||
db.user = dup[1]
|
||||
db.passwd = dup[2]
|
||||
pd := strings.SplitN(dataSourceName, "*", 2)
|
||||
if len(pd) == 2 {
|
||||
// Parse protocol part of URI
|
||||
p := strings.SplitN(pd[0], ":", 2)
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("Wrong protocol part of URI")
|
||||
}
|
||||
db.proto = p[0]
|
||||
options := strings.Split(p[1], ",")
|
||||
db.raddr = options[0]
|
||||
for _, o := range options[1:] {
|
||||
kv := strings.SplitN(o, "=", 2)
|
||||
var k, v string
|
||||
if len(kv) == 2 {
|
||||
k, v = kv[0], kv[1]
|
||||
} else {
|
||||
k, v = o, "true"
|
||||
}
|
||||
switch k {
|
||||
case "laddr":
|
||||
db.laddr = v
|
||||
case "timeout":
|
||||
to, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.timeout = to
|
||||
default:
|
||||
return nil, errors.New("Unknown option: " + k)
|
||||
}
|
||||
}
|
||||
// Remove protocol part
|
||||
pd = pd[1:]
|
||||
}
|
||||
// Parse database part of URI
|
||||
dup := strings.SplitN(pd[0], "/", 3)
|
||||
if len(dup) != 3 {
|
||||
return nil, errors.New("Wrong database part of URI")
|
||||
}
|
||||
db.dbName = dup[0]
|
||||
db.user = dup[1]
|
||||
db.passwd = dup[2]
|
||||
|
||||
return db, nil
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (db *mymysql) Init(drivername, uri string) error {
|
||||
return db.mysql.base.init(&mymysqlParser{}, drivername, uri)
|
||||
return db.mysql.base.init(&mymysqlParser{}, drivername, uri)
|
||||
}
|
||||
|
|
208
mymysql_test.go
208
mymysql_test.go
|
@ -1,10 +1,10 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ziutek/mymysql/godrv"
|
||||
_ "github.com/ziutek/mymysql/godrv"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -15,153 +15,153 @@ utf8 COLLATE utf8_general_ci;
|
|||
var showTestSql bool = true
|
||||
|
||||
func TestMyMysql(t *testing.T) {
|
||||
err := mymysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := NewEngine("mymysql", "xorm_test/root/")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
err := mymysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := NewEngine("mymysql", "xorm_test/root/")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
}
|
||||
|
||||
func TestMyMysqlWithCache(t *testing.T) {
|
||||
err := mymysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := NewEngine("mymysql", "xorm_test2/root/")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
err := mymysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := NewEngine("mymysql", "xorm_test2/root/")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
}
|
||||
|
||||
func newMyMysqlEngine() (*Engine, error) {
|
||||
return NewEngine("mymysql", "xorm_test2/root/")
|
||||
return NewEngine("mymysql", "xorm_test2/root/")
|
||||
}
|
||||
|
||||
func newMyMysqlDriverDB() (*sql.DB, error) {
|
||||
return sql.Open("mymysql", "xorm_test2/root/")
|
||||
return sql.Open("mymysql", "xorm_test2/root/")
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlDriverInsert(t *testing.B) {
|
||||
doBenchDriver(newMyMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverInsert, t)
|
||||
doBenchDriver(newMyMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverInsert, t)
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlDriverFind(t *testing.B) {
|
||||
doBenchDriver(newMyMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverFind, t)
|
||||
doBenchDriver(newMyMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverFind, t)
|
||||
}
|
||||
|
||||
func mymysqlDdlImport() error {
|
||||
engine, err := NewEngine("mymysql", "/root/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
engine, err := NewEngine("mymysql", "/root/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
sqlResults, _ := engine.Import("tests/mysql_ddl.sql")
|
||||
engine.LogDebug("sql results: %v", sqlResults)
|
||||
engine.Close()
|
||||
return nil
|
||||
sqlResults, _ := engine.Import("tests/mysql_ddl.sql")
|
||||
engine.LogDebug("sql results: %v", sqlResults)
|
||||
engine.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlNoCacheInsert(t *testing.B) {
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
|
||||
doBenchInsert(engine, t)
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlNoCacheFind(t *testing.B) {
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlNoCacheFindPtr(t *testing.B) {
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlCacheInsert(t *testing.B) {
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer engine.Close()
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchInsert(engine, t)
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlCacheFind(t *testing.B) {
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer engine.Close()
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchFind(engine, t)
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMyMysqlCacheFindPtr(t *testing.B) {
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine, err := newMyMysqlEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer engine.Close()
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchFindPtr(engine, t)
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
|
500
mysql.go
500
mysql.go
|
@ -1,323 +1,323 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type uri struct {
|
||||
dbType string
|
||||
proto string
|
||||
host string
|
||||
port string
|
||||
dbName string
|
||||
user string
|
||||
passwd string
|
||||
charset string
|
||||
laddr string
|
||||
raddr string
|
||||
timeout time.Duration
|
||||
dbType string
|
||||
proto string
|
||||
host string
|
||||
port string
|
||||
dbName string
|
||||
user string
|
||||
passwd string
|
||||
charset string
|
||||
laddr string
|
||||
raddr string
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
type parser interface {
|
||||
parse(driverName, dataSourceName string) (*uri, error)
|
||||
parse(driverName, dataSourceName string) (*uri, error)
|
||||
}
|
||||
|
||||
type mysqlParser struct {
|
||||
}
|
||||
|
||||
func (p *mysqlParser) parse(driverName, dataSourceName string) (*uri, error) {
|
||||
//cfg.params = make(map[string]string)
|
||||
dsnPattern := regexp.MustCompile(
|
||||
`^(?:(?P<user>.*?)(?::(?P<passwd>.*))?@)?` + // [user[:password]@]
|
||||
`(?:(?P<net>[^\(]*)(?:\((?P<addr>[^\)]*)\))?)?` + // [net[(addr)]]
|
||||
`\/(?P<dbname>.*?)` + // /dbname
|
||||
`(?:\?(?P<params>[^\?]*))?$`) // [?param1=value1¶mN=valueN]
|
||||
matches := dsnPattern.FindStringSubmatch(dataSourceName)
|
||||
//tlsConfigRegister := make(map[string]*tls.Config)
|
||||
names := dsnPattern.SubexpNames()
|
||||
//cfg.params = make(map[string]string)
|
||||
dsnPattern := regexp.MustCompile(
|
||||
`^(?:(?P<user>.*?)(?::(?P<passwd>.*))?@)?` + // [user[:password]@]
|
||||
`(?:(?P<net>[^\(]*)(?:\((?P<addr>[^\)]*)\))?)?` + // [net[(addr)]]
|
||||
`\/(?P<dbname>.*?)` + // /dbname
|
||||
`(?:\?(?P<params>[^\?]*))?$`) // [?param1=value1¶mN=valueN]
|
||||
matches := dsnPattern.FindStringSubmatch(dataSourceName)
|
||||
//tlsConfigRegister := make(map[string]*tls.Config)
|
||||
names := dsnPattern.SubexpNames()
|
||||
|
||||
uri := &uri{dbType: MYSQL}
|
||||
uri := &uri{dbType: MYSQL}
|
||||
|
||||
for i, match := range matches {
|
||||
switch names[i] {
|
||||
case "dbname":
|
||||
uri.dbName = match
|
||||
}
|
||||
}
|
||||
return uri, nil
|
||||
for i, match := range matches {
|
||||
switch names[i] {
|
||||
case "dbname":
|
||||
uri.dbName = match
|
||||
}
|
||||
}
|
||||
return uri, nil
|
||||
}
|
||||
|
||||
type base struct {
|
||||
parser parser
|
||||
driverName string
|
||||
dataSourceName string
|
||||
*uri
|
||||
parser parser
|
||||
driverName string
|
||||
dataSourceName string
|
||||
*uri
|
||||
}
|
||||
|
||||
func (b *base) init(parser parser, drivername, dataSourceName string) (err error) {
|
||||
b.parser = parser
|
||||
b.driverName, b.dataSourceName = drivername, dataSourceName
|
||||
b.uri, err = b.parser.parse(b.driverName, b.dataSourceName)
|
||||
return
|
||||
b.parser = parser
|
||||
b.driverName, b.dataSourceName = drivername, dataSourceName
|
||||
b.uri, err = b.parser.parse(b.driverName, b.dataSourceName)
|
||||
return
|
||||
}
|
||||
|
||||
type mysql struct {
|
||||
base
|
||||
net string
|
||||
addr string
|
||||
params map[string]string
|
||||
loc *time.Location
|
||||
timeout time.Duration
|
||||
tls *tls.Config
|
||||
allowAllFiles bool
|
||||
allowOldPasswords bool
|
||||
clientFoundRows bool
|
||||
base
|
||||
net string
|
||||
addr string
|
||||
params map[string]string
|
||||
loc *time.Location
|
||||
timeout time.Duration
|
||||
tls *tls.Config
|
||||
allowAllFiles bool
|
||||
allowOldPasswords bool
|
||||
clientFoundRows bool
|
||||
}
|
||||
|
||||
func (db *mysql) Init(drivername, uri string) error {
|
||||
return db.base.init(&mysqlParser{}, drivername, uri)
|
||||
return db.base.init(&mysqlParser{}, drivername, uri)
|
||||
}
|
||||
|
||||
func (db *mysql) SqlType(c *Column) string {
|
||||
var res string
|
||||
switch t := c.SQLType.Name; t {
|
||||
case Bool:
|
||||
res = TinyInt
|
||||
case Serial:
|
||||
c.IsAutoIncrement = true
|
||||
c.IsPrimaryKey = true
|
||||
c.Nullable = false
|
||||
res = Int
|
||||
case BigSerial:
|
||||
c.IsAutoIncrement = true
|
||||
c.IsPrimaryKey = true
|
||||
c.Nullable = false
|
||||
res = BigInt
|
||||
case Bytea:
|
||||
res = Blob
|
||||
case TimeStampz:
|
||||
res = Char
|
||||
c.Length = 64
|
||||
default:
|
||||
res = t
|
||||
}
|
||||
var res string
|
||||
switch t := c.SQLType.Name; t {
|
||||
case Bool:
|
||||
res = TinyInt
|
||||
case Serial:
|
||||
c.IsAutoIncrement = true
|
||||
c.IsPrimaryKey = true
|
||||
c.Nullable = false
|
||||
res = Int
|
||||
case BigSerial:
|
||||
c.IsAutoIncrement = true
|
||||
c.IsPrimaryKey = true
|
||||
c.Nullable = false
|
||||
res = BigInt
|
||||
case Bytea:
|
||||
res = Blob
|
||||
case TimeStampz:
|
||||
res = Char
|
||||
c.Length = 64
|
||||
default:
|
||||
res = t
|
||||
}
|
||||
|
||||
var hasLen1 bool = (c.Length > 0)
|
||||
var hasLen2 bool = (c.Length2 > 0)
|
||||
if hasLen1 {
|
||||
res += "(" + strconv.Itoa(c.Length) + ")"
|
||||
} else if hasLen2 {
|
||||
res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
|
||||
}
|
||||
return res
|
||||
var hasLen1 bool = (c.Length > 0)
|
||||
var hasLen2 bool = (c.Length2 > 0)
|
||||
if hasLen1 {
|
||||
res += "(" + strconv.Itoa(c.Length) + ")"
|
||||
} else if hasLen2 {
|
||||
res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (db *mysql) SupportInsertMany() bool {
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *mysql) QuoteStr() string {
|
||||
return "`"
|
||||
return "`"
|
||||
}
|
||||
|
||||
func (db *mysql) SupportEngine() bool {
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *mysql) AutoIncrStr() string {
|
||||
return "AUTO_INCREMENT"
|
||||
return "AUTO_INCREMENT"
|
||||
}
|
||||
|
||||
func (db *mysql) SupportCharset() bool {
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *mysql) IndexOnTable() bool {
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *mysql) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
|
||||
args := []interface{}{db.dbName, tableName, idxName}
|
||||
sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`"
|
||||
sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?"
|
||||
return sql, args
|
||||
args := []interface{}{db.dbName, tableName, idxName}
|
||||
sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`"
|
||||
sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?"
|
||||
return sql, args
|
||||
}
|
||||
|
||||
func (db *mysql) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
|
||||
args := []interface{}{db.dbName, tableName, colName}
|
||||
sql := "SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?"
|
||||
return sql, args
|
||||
args := []interface{}{db.dbName, tableName, colName}
|
||||
sql := "SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?"
|
||||
return sql, args
|
||||
}
|
||||
|
||||
func (db *mysql) TableCheckSql(tableName string) (string, []interface{}) {
|
||||
args := []interface{}{db.dbName, tableName}
|
||||
sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?"
|
||||
return sql, args
|
||||
args := []interface{}{db.dbName, tableName}
|
||||
sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?"
|
||||
return sql, args
|
||||
}
|
||||
|
||||
func (db *mysql) GetColumns(tableName string) ([]string, map[string]*Column, error) {
|
||||
args := []interface{}{db.dbName, tableName}
|
||||
s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," +
|
||||
" `COLUMN_KEY`, `EXTRA` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cols := make(map[string]*Column)
|
||||
colSeq := make([]string, 0)
|
||||
for _, record := range res {
|
||||
col := new(Column)
|
||||
col.Indexes = make(map[string]bool)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "COLUMN_NAME":
|
||||
col.Name = strings.Trim(string(content), "` ")
|
||||
case "IS_NULLABLE":
|
||||
if "YES" == string(content) {
|
||||
col.Nullable = true
|
||||
}
|
||||
case "COLUMN_DEFAULT":
|
||||
// add ''
|
||||
col.Default = string(content)
|
||||
case "COLUMN_TYPE":
|
||||
cts := strings.Split(string(content), "(")
|
||||
var len1, len2 int
|
||||
if len(cts) == 2 {
|
||||
idx := strings.Index(cts[1], ")")
|
||||
lens := strings.Split(cts[1][0:idx], ",")
|
||||
len1, err = strconv.Atoi(strings.TrimSpace(lens[0]))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(lens) == 2 {
|
||||
len2, err = strconv.Atoi(lens[1])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
colName := cts[0]
|
||||
colType := strings.ToUpper(colName)
|
||||
col.Length = len1
|
||||
col.Length2 = len2
|
||||
if _, ok := sqlTypes[colType]; ok {
|
||||
col.SQLType = SQLType{colType, len1, len2}
|
||||
} else {
|
||||
return nil, nil, errors.New(fmt.Sprintf("unkonw colType %v", colType))
|
||||
}
|
||||
case "COLUMN_KEY":
|
||||
key := string(content)
|
||||
if key == "PRI" {
|
||||
col.IsPrimaryKey = true
|
||||
}
|
||||
if key == "UNI" {
|
||||
//col.is
|
||||
}
|
||||
case "EXTRA":
|
||||
extra := string(content)
|
||||
if extra == "auto_increment" {
|
||||
col.IsAutoIncrement = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if col.SQLType.IsText() {
|
||||
if col.Default != "" {
|
||||
col.Default = "'" + col.Default + "'"
|
||||
}
|
||||
}
|
||||
cols[col.Name] = col
|
||||
colSeq = append(colSeq, col.Name)
|
||||
}
|
||||
return colSeq, cols, nil
|
||||
args := []interface{}{db.dbName, tableName}
|
||||
s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," +
|
||||
" `COLUMN_KEY`, `EXTRA` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cols := make(map[string]*Column)
|
||||
colSeq := make([]string, 0)
|
||||
for _, record := range res {
|
||||
col := new(Column)
|
||||
col.Indexes = make(map[string]bool)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "COLUMN_NAME":
|
||||
col.Name = strings.Trim(string(content), "` ")
|
||||
case "IS_NULLABLE":
|
||||
if "YES" == string(content) {
|
||||
col.Nullable = true
|
||||
}
|
||||
case "COLUMN_DEFAULT":
|
||||
// add ''
|
||||
col.Default = string(content)
|
||||
case "COLUMN_TYPE":
|
||||
cts := strings.Split(string(content), "(")
|
||||
var len1, len2 int
|
||||
if len(cts) == 2 {
|
||||
idx := strings.Index(cts[1], ")")
|
||||
lens := strings.Split(cts[1][0:idx], ",")
|
||||
len1, err = strconv.Atoi(strings.TrimSpace(lens[0]))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(lens) == 2 {
|
||||
len2, err = strconv.Atoi(lens[1])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
colName := cts[0]
|
||||
colType := strings.ToUpper(colName)
|
||||
col.Length = len1
|
||||
col.Length2 = len2
|
||||
if _, ok := sqlTypes[colType]; ok {
|
||||
col.SQLType = SQLType{colType, len1, len2}
|
||||
} else {
|
||||
return nil, nil, errors.New(fmt.Sprintf("unkonw colType %v", colType))
|
||||
}
|
||||
case "COLUMN_KEY":
|
||||
key := string(content)
|
||||
if key == "PRI" {
|
||||
col.IsPrimaryKey = true
|
||||
}
|
||||
if key == "UNI" {
|
||||
//col.is
|
||||
}
|
||||
case "EXTRA":
|
||||
extra := string(content)
|
||||
if extra == "auto_increment" {
|
||||
col.IsAutoIncrement = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if col.SQLType.IsText() {
|
||||
if col.Default != "" {
|
||||
col.Default = "'" + col.Default + "'"
|
||||
}
|
||||
}
|
||||
cols[col.Name] = col
|
||||
colSeq = append(colSeq, col.Name)
|
||||
}
|
||||
return colSeq, cols, nil
|
||||
}
|
||||
|
||||
func (db *mysql) GetTables() ([]*Table, error) {
|
||||
args := []interface{}{db.dbName}
|
||||
s := "SELECT `TABLE_NAME`, `ENGINE`, `TABLE_ROWS`, `AUTO_INCREMENT` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args := []interface{}{db.dbName}
|
||||
s := "SELECT `TABLE_NAME`, `ENGINE`, `TABLE_ROWS`, `AUTO_INCREMENT` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tables := make([]*Table, 0)
|
||||
for _, record := range res {
|
||||
table := new(Table)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "TABLE_NAME":
|
||||
table.Name = strings.Trim(string(content), "` ")
|
||||
case "ENGINE":
|
||||
}
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return tables, nil
|
||||
tables := make([]*Table, 0)
|
||||
for _, record := range res {
|
||||
table := new(Table)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "TABLE_NAME":
|
||||
table.Name = strings.Trim(string(content), "` ")
|
||||
case "ENGINE":
|
||||
}
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
func (db *mysql) GetIndexes(tableName string) (map[string]*Index, error) {
|
||||
args := []interface{}{db.dbName, tableName}
|
||||
s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args := []interface{}{db.dbName, tableName}
|
||||
s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexes := make(map[string]*Index, 0)
|
||||
for _, record := range res {
|
||||
var indexType int
|
||||
var indexName, colName string
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "NON_UNIQUE":
|
||||
if "YES" == string(content) || string(content) == "1" {
|
||||
indexType = IndexType
|
||||
} else {
|
||||
indexType = UniqueType
|
||||
}
|
||||
case "INDEX_NAME":
|
||||
indexName = string(content)
|
||||
case "COLUMN_NAME":
|
||||
colName = strings.Trim(string(content), "` ")
|
||||
}
|
||||
}
|
||||
if indexName == "PRIMARY" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
|
||||
indexName = indexName[5+len(tableName) : len(indexName)]
|
||||
}
|
||||
indexes := make(map[string]*Index, 0)
|
||||
for _, record := range res {
|
||||
var indexType int
|
||||
var indexName, colName string
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "NON_UNIQUE":
|
||||
if "YES" == string(content) || string(content) == "1" {
|
||||
indexType = IndexType
|
||||
} else {
|
||||
indexType = UniqueType
|
||||
}
|
||||
case "INDEX_NAME":
|
||||
indexName = string(content)
|
||||
case "COLUMN_NAME":
|
||||
colName = strings.Trim(string(content), "` ")
|
||||
}
|
||||
}
|
||||
if indexName == "PRIMARY" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
|
||||
indexName = indexName[5+len(tableName) : len(indexName)]
|
||||
}
|
||||
|
||||
var index *Index
|
||||
var ok bool
|
||||
if index, ok = indexes[indexName]; !ok {
|
||||
index = new(Index)
|
||||
index.Type = indexType
|
||||
index.Name = indexName
|
||||
indexes[indexName] = index
|
||||
}
|
||||
index.AddColumn(colName)
|
||||
}
|
||||
return indexes, nil
|
||||
var index *Index
|
||||
var ok bool
|
||||
if index, ok = indexes[indexName]; !ok {
|
||||
index = new(Index)
|
||||
index.Type = indexType
|
||||
index.Name = indexName
|
||||
indexes[indexName] = index
|
||||
}
|
||||
index.AddColumn(colName)
|
||||
}
|
||||
return indexes, nil
|
||||
}
|
||||
|
|
214
mysql_test.go
214
mysql_test.go
|
@ -1,10 +1,10 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -15,155 +15,155 @@ utf8 COLLATE utf8_general_ci;
|
|||
var mysqlShowTestSql bool = true
|
||||
|
||||
func TestMysql(t *testing.T) {
|
||||
err := mysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
err := mysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
engine, err := NewEngine("mysql", "root:@/xorm_test?charset=utf8")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.ShowSQL = mysqlShowTestSql
|
||||
engine.ShowErr = mysqlShowTestSql
|
||||
engine.ShowWarn = mysqlShowTestSql
|
||||
engine.ShowDebug = mysqlShowTestSql
|
||||
engine, err := NewEngine("mysql", "root:@/xorm_test?charset=utf8")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.ShowSQL = mysqlShowTestSql
|
||||
engine.ShowErr = mysqlShowTestSql
|
||||
engine.ShowWarn = mysqlShowTestSql
|
||||
engine.ShowDebug = mysqlShowTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
}
|
||||
|
||||
func TestMysqlWithCache(t *testing.T) {
|
||||
err := mysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
err := mysqlDdlImport()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
engine, err := NewEngine("mysql", "root:@/xorm_test?charset=utf8")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine.ShowSQL = mysqlShowTestSql
|
||||
engine.ShowErr = mysqlShowTestSql
|
||||
engine.ShowWarn = mysqlShowTestSql
|
||||
engine.ShowDebug = mysqlShowTestSql
|
||||
engine, err := NewEngine("mysql", "root:@/xorm_test?charset=utf8")
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine.ShowSQL = mysqlShowTestSql
|
||||
engine.ShowErr = mysqlShowTestSql
|
||||
engine.ShowWarn = mysqlShowTestSql
|
||||
engine.ShowDebug = mysqlShowTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
}
|
||||
|
||||
func newMysqlEngine() (*Engine, error) {
|
||||
return NewEngine("mysql", "root:@/xorm_test?charset=utf8")
|
||||
return NewEngine("mysql", "root:@/xorm_test?charset=utf8")
|
||||
}
|
||||
|
||||
func mysqlDdlImport() error {
|
||||
engine, err := NewEngine("mysql", "root:@/?charset=utf8")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
engine.ShowSQL = mysqlShowTestSql
|
||||
engine.ShowErr = mysqlShowTestSql
|
||||
engine.ShowWarn = mysqlShowTestSql
|
||||
engine.ShowDebug = mysqlShowTestSql
|
||||
engine, err := NewEngine("mysql", "root:@/?charset=utf8")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
engine.ShowSQL = mysqlShowTestSql
|
||||
engine.ShowErr = mysqlShowTestSql
|
||||
engine.ShowWarn = mysqlShowTestSql
|
||||
engine.ShowDebug = mysqlShowTestSql
|
||||
|
||||
sqlResults, _ := engine.Import("tests/mysql_ddl.sql")
|
||||
engine.LogDebug("sql results: %v", sqlResults)
|
||||
engine.Close()
|
||||
return nil
|
||||
sqlResults, _ := engine.Import("tests/mysql_ddl.sql")
|
||||
engine.LogDebug("sql results: %v", sqlResults)
|
||||
engine.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMysqlDriverDB() (*sql.DB, error) {
|
||||
return sql.Open("mysql", "root:@/xorm_test?charset=utf8")
|
||||
return sql.Open("mysql", "root:@/xorm_test?charset=utf8")
|
||||
}
|
||||
|
||||
const (
|
||||
createTableMySql = "CREATE TABLE IF NOT EXISTS `big_struct` (`id` BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL, `name` VARCHAR(255) NULL, `title` VARCHAR(255) NULL, `age` VARCHAR(255) NULL, `alias` VARCHAR(255) NULL, `nick_name` VARCHAR(255) NULL);"
|
||||
dropTableMySql = "DROP TABLE IF EXISTS `big_struct`;"
|
||||
createTableMySql = "CREATE TABLE IF NOT EXISTS `big_struct` (`id` BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL, `name` VARCHAR(255) NULL, `title` VARCHAR(255) NULL, `age` VARCHAR(255) NULL, `alias` VARCHAR(255) NULL, `nick_name` VARCHAR(255) NULL);"
|
||||
dropTableMySql = "DROP TABLE IF EXISTS `big_struct`;"
|
||||
)
|
||||
|
||||
func BenchmarkMysqlDriverInsert(t *testing.B) {
|
||||
doBenchDriver(newMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverInsert, t)
|
||||
doBenchDriver(newMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverInsert, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlDriverFind(t *testing.B) {
|
||||
doBenchDriver(newMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverFind, t)
|
||||
doBenchDriver(newMysqlDriverDB, createTableMySql, dropTableMySql,
|
||||
doBenchDriverFind, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlNoCacheInsert(t *testing.B) {
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchInsert(engine, t)
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlNoCacheFind(t *testing.B) {
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlNoCacheFindPtr(t *testing.B) {
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlCacheInsert(t *testing.B) {
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchInsert(engine, t)
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlCacheFind(t *testing.B) {
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchFind(engine, t)
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkMysqlCacheFindPtr(t *testing.B) {
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine, err := newMysqlEngine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchFindPtr(engine, t)
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
|
290
pool.go
290
pool.go
|
@ -1,13 +1,13 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
//"fmt"
|
||||
"sync"
|
||||
//"sync/atomic"
|
||||
"container/list"
|
||||
"reflect"
|
||||
"time"
|
||||
"database/sql"
|
||||
//"fmt"
|
||||
"sync"
|
||||
//"sync/atomic"
|
||||
"container/list"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Interface IConnecPool is a connection pool interface, all implements should implement
|
||||
|
@ -17,14 +17,14 @@ import (
|
|||
// ReleaseDB for releasing a db connection;
|
||||
// Close for invoking when engine.Close
|
||||
type IConnectPool interface {
|
||||
Init(engine *Engine) error
|
||||
RetrieveDB(engine *Engine) (*sql.DB, error)
|
||||
ReleaseDB(engine *Engine, db *sql.DB)
|
||||
Close(engine *Engine) error
|
||||
SetMaxIdleConns(conns int)
|
||||
MaxIdleConns() int
|
||||
SetMaxConns(conns int)
|
||||
MaxConns() int
|
||||
Init(engine *Engine) error
|
||||
RetrieveDB(engine *Engine) (*sql.DB, error)
|
||||
ReleaseDB(engine *Engine, db *sql.DB)
|
||||
Close(engine *Engine) error
|
||||
SetMaxIdleConns(conns int)
|
||||
MaxIdleConns() int
|
||||
SetMaxConns(conns int)
|
||||
MaxConns() int
|
||||
}
|
||||
|
||||
// Struct NoneConnectPool is a implement for IConnectPool. It provides directly invoke driver's
|
||||
|
@ -34,35 +34,35 @@ type NoneConnectPool struct {
|
|||
|
||||
// NewNoneConnectPool new a NoneConnectPool.
|
||||
func NewNoneConnectPool() IConnectPool {
|
||||
return &NoneConnectPool{}
|
||||
return &NoneConnectPool{}
|
||||
}
|
||||
|
||||
// Init do nothing
|
||||
func (p *NoneConnectPool) Init(engine *Engine) error {
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetrieveDB directly open a connection
|
||||
func (p *NoneConnectPool) RetrieveDB(engine *Engine) (db *sql.DB, err error) {
|
||||
db, err = engine.OpenDB()
|
||||
return
|
||||
db, err = engine.OpenDB()
|
||||
return
|
||||
}
|
||||
|
||||
// ReleaseDB directly close a connection
|
||||
func (p *NoneConnectPool) ReleaseDB(engine *Engine, db *sql.DB) {
|
||||
db.Close()
|
||||
db.Close()
|
||||
}
|
||||
|
||||
// Close do nothing
|
||||
func (p *NoneConnectPool) Close(engine *Engine) error {
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *NoneConnectPool) SetMaxIdleConns(conns int) {
|
||||
}
|
||||
|
||||
func (p *NoneConnectPool) MaxIdleConns() int {
|
||||
return 0
|
||||
return 0
|
||||
}
|
||||
|
||||
// not implemented
|
||||
|
@ -71,133 +71,133 @@ func (p *NoneConnectPool) SetMaxConns(conns int) {
|
|||
|
||||
// not implemented
|
||||
func (p *NoneConnectPool) MaxConns() int {
|
||||
return -1
|
||||
return -1
|
||||
}
|
||||
|
||||
// Struct SysConnectPool is a simple wrapper for using system default connection pool.
|
||||
// About the system connection pool, you can review the code database/sql/sql.go
|
||||
// It's currently default Pool implments.
|
||||
type SysConnectPool struct {
|
||||
db *sql.DB
|
||||
maxIdleConns int
|
||||
maxConns int
|
||||
curConns int
|
||||
mutex *sync.Mutex
|
||||
queue *list.List
|
||||
db *sql.DB
|
||||
maxIdleConns int
|
||||
maxConns int
|
||||
curConns int
|
||||
mutex *sync.Mutex
|
||||
queue *list.List
|
||||
}
|
||||
|
||||
// NewSysConnectPool new a SysConnectPool.
|
||||
func NewSysConnectPool() IConnectPool {
|
||||
return &SysConnectPool{}
|
||||
return &SysConnectPool{}
|
||||
}
|
||||
|
||||
// Init create a db immediately and keep it util engine closed.
|
||||
func (s *SysConnectPool) Init(engine *Engine) error {
|
||||
db, err := engine.OpenDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.db = db
|
||||
s.maxIdleConns = 2
|
||||
s.maxConns = -1
|
||||
s.curConns = 0
|
||||
s.mutex = &sync.Mutex{}
|
||||
s.queue = list.New()
|
||||
return nil
|
||||
db, err := engine.OpenDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.db = db
|
||||
s.maxIdleConns = 2
|
||||
s.maxConns = -1
|
||||
s.curConns = 0
|
||||
s.mutex = &sync.Mutex{}
|
||||
s.queue = list.New()
|
||||
return nil
|
||||
}
|
||||
|
||||
type node struct {
|
||||
mutex sync.Mutex
|
||||
cond *sync.Cond
|
||||
mutex sync.Mutex
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
func newCondNode() *node {
|
||||
n := &node{}
|
||||
n.cond = sync.NewCond(&n.mutex)
|
||||
return n
|
||||
n := &node{}
|
||||
n.cond = sync.NewCond(&n.mutex)
|
||||
return n
|
||||
}
|
||||
|
||||
// RetrieveDB just return the only db
|
||||
func (s *SysConnectPool) RetrieveDB(engine *Engine) (db *sql.DB, err error) {
|
||||
/*if s.maxConns > 0 {
|
||||
fmt.Println("before retrieve")
|
||||
s.mutex.Lock()
|
||||
for s.curConns >= s.maxConns {
|
||||
fmt.Println("before waiting...", s.curConns, s.queue.Len())
|
||||
s.mutex.Unlock()
|
||||
n := NewNode()
|
||||
n.cond.L.Lock()
|
||||
s.queue.PushBack(n)
|
||||
n.cond.Wait()
|
||||
n.cond.L.Unlock()
|
||||
s.mutex.Lock()
|
||||
fmt.Println("after waiting...", s.curConns, s.queue.Len())
|
||||
}
|
||||
s.curConns += 1
|
||||
s.mutex.Unlock()
|
||||
fmt.Println("after retrieve")
|
||||
}*/
|
||||
return s.db, nil
|
||||
/*if s.maxConns > 0 {
|
||||
fmt.Println("before retrieve")
|
||||
s.mutex.Lock()
|
||||
for s.curConns >= s.maxConns {
|
||||
fmt.Println("before waiting...", s.curConns, s.queue.Len())
|
||||
s.mutex.Unlock()
|
||||
n := NewNode()
|
||||
n.cond.L.Lock()
|
||||
s.queue.PushBack(n)
|
||||
n.cond.Wait()
|
||||
n.cond.L.Unlock()
|
||||
s.mutex.Lock()
|
||||
fmt.Println("after waiting...", s.curConns, s.queue.Len())
|
||||
}
|
||||
s.curConns += 1
|
||||
s.mutex.Unlock()
|
||||
fmt.Println("after retrieve")
|
||||
}*/
|
||||
return s.db, nil
|
||||
}
|
||||
|
||||
// ReleaseDB do nothing
|
||||
func (s *SysConnectPool) ReleaseDB(engine *Engine, db *sql.DB) {
|
||||
/*if s.maxConns > 0 {
|
||||
s.mutex.Lock()
|
||||
fmt.Println("before release", s.queue.Len())
|
||||
s.curConns -= 1
|
||||
/*if s.maxConns > 0 {
|
||||
s.mutex.Lock()
|
||||
fmt.Println("before release", s.queue.Len())
|
||||
s.curConns -= 1
|
||||
|
||||
if e := s.queue.Front(); e != nil {
|
||||
n := e.Value.(*node)
|
||||
//n.cond.L.Lock()
|
||||
n.cond.Signal()
|
||||
fmt.Println("signaled...")
|
||||
s.queue.Remove(e)
|
||||
//n.cond.L.Unlock()
|
||||
}
|
||||
fmt.Println("after released", s.queue.Len())
|
||||
s.mutex.Unlock()
|
||||
}*/
|
||||
if e := s.queue.Front(); e != nil {
|
||||
n := e.Value.(*node)
|
||||
//n.cond.L.Lock()
|
||||
n.cond.Signal()
|
||||
fmt.Println("signaled...")
|
||||
s.queue.Remove(e)
|
||||
//n.cond.L.Unlock()
|
||||
}
|
||||
fmt.Println("after released", s.queue.Len())
|
||||
s.mutex.Unlock()
|
||||
}*/
|
||||
}
|
||||
|
||||
// Close closed the only db
|
||||
func (p *SysConnectPool) Close(engine *Engine) error {
|
||||
return p.db.Close()
|
||||
return p.db.Close()
|
||||
}
|
||||
|
||||
func (p *SysConnectPool) SetMaxIdleConns(conns int) {
|
||||
p.db.SetMaxIdleConns(conns)
|
||||
p.maxIdleConns = conns
|
||||
p.db.SetMaxIdleConns(conns)
|
||||
p.maxIdleConns = conns
|
||||
}
|
||||
|
||||
func (p *SysConnectPool) MaxIdleConns() int {
|
||||
return p.maxIdleConns
|
||||
return p.maxIdleConns
|
||||
}
|
||||
|
||||
// not implemented
|
||||
func (p *SysConnectPool) SetMaxConns(conns int) {
|
||||
p.maxConns = conns
|
||||
// if support SetMaxOpenConns, go 1.2+, then set
|
||||
if reflect.ValueOf(p.db).MethodByName("SetMaxOpenConns").IsValid() {
|
||||
reflect.ValueOf(p.db).MethodByName("SetMaxOpenConns").Call([]reflect.Value{reflect.ValueOf(conns)})
|
||||
}
|
||||
//p.db.SetMaxOpenConns(conns)
|
||||
p.maxConns = conns
|
||||
// if support SetMaxOpenConns, go 1.2+, then set
|
||||
if reflect.ValueOf(p.db).MethodByName("SetMaxOpenConns").IsValid() {
|
||||
reflect.ValueOf(p.db).MethodByName("SetMaxOpenConns").Call([]reflect.Value{reflect.ValueOf(conns)})
|
||||
}
|
||||
//p.db.SetMaxOpenConns(conns)
|
||||
}
|
||||
|
||||
// not implemented
|
||||
func (p *SysConnectPool) MaxConns() int {
|
||||
return p.maxConns
|
||||
return p.maxConns
|
||||
}
|
||||
|
||||
// NewSimpleConnectPool new a SimpleConnectPool
|
||||
func NewSimpleConnectPool() IConnectPool {
|
||||
return &SimpleConnectPool{releasedConnects: make([]*sql.DB, 10),
|
||||
usingConnects: map[*sql.DB]time.Time{},
|
||||
cur: -1,
|
||||
maxWaitTimeOut: 14400,
|
||||
maxIdleConns: 10,
|
||||
mutex: &sync.Mutex{},
|
||||
}
|
||||
return &SimpleConnectPool{releasedConnects: make([]*sql.DB, 10),
|
||||
usingConnects: map[*sql.DB]time.Time{},
|
||||
cur: -1,
|
||||
maxWaitTimeOut: 14400,
|
||||
maxIdleConns: 10,
|
||||
mutex: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Struct SimpleConnectPool is a simple implementation for IConnectPool.
|
||||
|
@ -205,75 +205,75 @@ func NewSimpleConnectPool() IConnectPool {
|
|||
// Opening or Closing a database connection must be enter a lock.
|
||||
// This implements will be improved in furture.
|
||||
type SimpleConnectPool struct {
|
||||
releasedConnects []*sql.DB
|
||||
cur int
|
||||
usingConnects map[*sql.DB]time.Time
|
||||
maxWaitTimeOut int
|
||||
mutex *sync.Mutex
|
||||
maxIdleConns int
|
||||
releasedConnects []*sql.DB
|
||||
cur int
|
||||
usingConnects map[*sql.DB]time.Time
|
||||
maxWaitTimeOut int
|
||||
mutex *sync.Mutex
|
||||
maxIdleConns int
|
||||
}
|
||||
|
||||
func (s *SimpleConnectPool) Init(engine *Engine) error {
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetrieveDB get a connection from connection pool
|
||||
func (p *SimpleConnectPool) RetrieveDB(engine *Engine) (*sql.DB, error) {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
var db *sql.DB = nil
|
||||
var err error = nil
|
||||
//fmt.Printf("%x, rbegin - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
if p.cur < 0 {
|
||||
db, err = engine.OpenDB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.usingConnects[db] = time.Now()
|
||||
} else {
|
||||
db = p.releasedConnects[p.cur]
|
||||
p.usingConnects[db] = time.Now()
|
||||
p.releasedConnects[p.cur] = nil
|
||||
p.cur = p.cur - 1
|
||||
}
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
var db *sql.DB = nil
|
||||
var err error = nil
|
||||
//fmt.Printf("%x, rbegin - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
if p.cur < 0 {
|
||||
db, err = engine.OpenDB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.usingConnects[db] = time.Now()
|
||||
} else {
|
||||
db = p.releasedConnects[p.cur]
|
||||
p.usingConnects[db] = time.Now()
|
||||
p.releasedConnects[p.cur] = nil
|
||||
p.cur = p.cur - 1
|
||||
}
|
||||
|
||||
//fmt.Printf("%x, rend - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
return db, nil
|
||||
//fmt.Printf("%x, rend - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ReleaseDB release a db from connection pool
|
||||
func (p *SimpleConnectPool) ReleaseDB(engine *Engine, db *sql.DB) {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
//fmt.Printf("%x, lbegin - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
if p.cur >= p.maxIdleConns-1 {
|
||||
db.Close()
|
||||
} else {
|
||||
p.cur = p.cur + 1
|
||||
p.releasedConnects[p.cur] = db
|
||||
}
|
||||
delete(p.usingConnects, db)
|
||||
//fmt.Printf("%x, lend - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
//fmt.Printf("%x, lbegin - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
if p.cur >= p.maxIdleConns-1 {
|
||||
db.Close()
|
||||
} else {
|
||||
p.cur = p.cur + 1
|
||||
p.releasedConnects[p.cur] = db
|
||||
}
|
||||
delete(p.usingConnects, db)
|
||||
//fmt.Printf("%x, lend - released:%v, using:%v\n", &p, p.cur+1, len(p.usingConnects))
|
||||
}
|
||||
|
||||
// Close release all db
|
||||
func (p *SimpleConnectPool) Close(engine *Engine) error {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
for len(p.releasedConnects) > 0 {
|
||||
p.releasedConnects[0].Close()
|
||||
p.releasedConnects = p.releasedConnects[1:]
|
||||
}
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
for len(p.releasedConnects) > 0 {
|
||||
p.releasedConnects[0].Close()
|
||||
p.releasedConnects = p.releasedConnects[1:]
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SimpleConnectPool) SetMaxIdleConns(conns int) {
|
||||
p.maxIdleConns = conns
|
||||
p.maxIdleConns = conns
|
||||
}
|
||||
|
||||
func (p *SimpleConnectPool) MaxIdleConns() int {
|
||||
return p.maxIdleConns
|
||||
return p.maxIdleConns
|
||||
}
|
||||
|
||||
// not implemented
|
||||
|
@ -282,5 +282,5 @@ func (p *SimpleConnectPool) SetMaxConns(conns int) {
|
|||
|
||||
// not implemented
|
||||
func (p *SimpleConnectPool) MaxConns() int {
|
||||
return -1
|
||||
return -1
|
||||
}
|
||||
|
|
450
postgres.go
450
postgres.go
|
@ -1,305 +1,305 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type postgres struct {
|
||||
base
|
||||
base
|
||||
}
|
||||
|
||||
type values map[string]string
|
||||
|
||||
func (vs values) Set(k, v string) {
|
||||
vs[k] = v
|
||||
vs[k] = v
|
||||
}
|
||||
|
||||
func (vs values) Get(k string) (v string) {
|
||||
return vs[k]
|
||||
return vs[k]
|
||||
}
|
||||
|
||||
func errorf(s string, args ...interface{}) {
|
||||
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
|
||||
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
|
||||
}
|
||||
|
||||
func parseOpts(name string, o values) {
|
||||
if len(name) == 0 {
|
||||
return
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
name = strings.TrimSpace(name)
|
||||
name = strings.TrimSpace(name)
|
||||
|
||||
ps := strings.Split(name, " ")
|
||||
for _, p := range ps {
|
||||
kv := strings.Split(p, "=")
|
||||
if len(kv) < 2 {
|
||||
errorf("invalid option: %q", p)
|
||||
}
|
||||
o.Set(kv[0], kv[1])
|
||||
}
|
||||
ps := strings.Split(name, " ")
|
||||
for _, p := range ps {
|
||||
kv := strings.Split(p, "=")
|
||||
if len(kv) < 2 {
|
||||
errorf("invalid option: %q", p)
|
||||
}
|
||||
o.Set(kv[0], kv[1])
|
||||
}
|
||||
}
|
||||
|
||||
type postgresParser struct {
|
||||
}
|
||||
|
||||
func (p *postgresParser) parse(driverName, dataSourceName string) (*uri, error) {
|
||||
db := &uri{dbType: POSTGRES}
|
||||
o := make(values)
|
||||
parseOpts(dataSourceName, o)
|
||||
db := &uri{dbType: POSTGRES}
|
||||
o := make(values)
|
||||
parseOpts(dataSourceName, o)
|
||||
|
||||
db.dbName = o.Get("dbname")
|
||||
if db.dbName == "" {
|
||||
return nil, errors.New("dbname is empty")
|
||||
}
|
||||
return db, nil
|
||||
db.dbName = o.Get("dbname")
|
||||
if db.dbName == "" {
|
||||
return nil, errors.New("dbname is empty")
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (db *postgres) Init(drivername, uri string) error {
|
||||
return db.base.init(&postgresParser{}, drivername, uri)
|
||||
return db.base.init(&postgresParser{}, drivername, uri)
|
||||
}
|
||||
|
||||
func (db *postgres) SqlType(c *Column) string {
|
||||
var res string
|
||||
switch t := c.SQLType.Name; t {
|
||||
case TinyInt:
|
||||
res = SmallInt
|
||||
case MediumInt, Int, Integer:
|
||||
return Integer
|
||||
case Serial, BigSerial:
|
||||
c.IsAutoIncrement = true
|
||||
c.Nullable = false
|
||||
res = t
|
||||
case Binary, VarBinary:
|
||||
return Bytea
|
||||
case DateTime:
|
||||
res = TimeStamp
|
||||
case TimeStampz:
|
||||
return "timestamp with time zone"
|
||||
case Float:
|
||||
res = Real
|
||||
case TinyText, MediumText, LongText:
|
||||
res = Text
|
||||
case Blob, TinyBlob, MediumBlob, LongBlob:
|
||||
return Bytea
|
||||
case Double:
|
||||
return "DOUBLE PRECISION"
|
||||
default:
|
||||
if c.IsAutoIncrement {
|
||||
return Serial
|
||||
}
|
||||
res = t
|
||||
}
|
||||
var res string
|
||||
switch t := c.SQLType.Name; t {
|
||||
case TinyInt:
|
||||
res = SmallInt
|
||||
case MediumInt, Int, Integer:
|
||||
return Integer
|
||||
case Serial, BigSerial:
|
||||
c.IsAutoIncrement = true
|
||||
c.Nullable = false
|
||||
res = t
|
||||
case Binary, VarBinary:
|
||||
return Bytea
|
||||
case DateTime:
|
||||
res = TimeStamp
|
||||
case TimeStampz:
|
||||
return "timestamp with time zone"
|
||||
case Float:
|
||||
res = Real
|
||||
case TinyText, MediumText, LongText:
|
||||
res = Text
|
||||
case Blob, TinyBlob, MediumBlob, LongBlob:
|
||||
return Bytea
|
||||
case Double:
|
||||
return "DOUBLE PRECISION"
|
||||
default:
|
||||
if c.IsAutoIncrement {
|
||||
return Serial
|
||||
}
|
||||
res = t
|
||||
}
|
||||
|
||||
var hasLen1 bool = (c.Length > 0)
|
||||
var hasLen2 bool = (c.Length2 > 0)
|
||||
if hasLen1 {
|
||||
res += "(" + strconv.Itoa(c.Length) + ")"
|
||||
} else if hasLen2 {
|
||||
res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
|
||||
}
|
||||
return res
|
||||
var hasLen1 bool = (c.Length > 0)
|
||||
var hasLen2 bool = (c.Length2 > 0)
|
||||
if hasLen1 {
|
||||
res += "(" + strconv.Itoa(c.Length) + ")"
|
||||
} else if hasLen2 {
|
||||
res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")"
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (db *postgres) SupportInsertMany() bool {
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *postgres) QuoteStr() string {
|
||||
return "\""
|
||||
return "\""
|
||||
}
|
||||
|
||||
func (db *postgres) AutoIncrStr() string {
|
||||
return ""
|
||||
return ""
|
||||
}
|
||||
|
||||
func (db *postgres) SupportEngine() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *postgres) SupportCharset() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *postgres) IndexOnTable() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *postgres) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName, idxName}
|
||||
return `SELECT indexname FROM pg_indexes ` +
|
||||
`WHERE tablename = ? AND indexname = ?`, args
|
||||
args := []interface{}{tableName, idxName}
|
||||
return `SELECT indexname FROM pg_indexes ` +
|
||||
`WHERE tablename = ? AND indexname = ?`, args
|
||||
}
|
||||
|
||||
func (db *postgres) TableCheckSql(tableName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName}
|
||||
return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args
|
||||
args := []interface{}{tableName}
|
||||
return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args
|
||||
}
|
||||
|
||||
func (db *postgres) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName, colName}
|
||||
return "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = ?" +
|
||||
" AND column_name = ?", args
|
||||
args := []interface{}{tableName, colName}
|
||||
return "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = ?" +
|
||||
" AND column_name = ?", args
|
||||
}
|
||||
|
||||
func (db *postgres) GetColumns(tableName string) ([]string, map[string]*Column, error) {
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT column_name, column_default, is_nullable, data_type, character_maximum_length" +
|
||||
", numeric_precision, numeric_precision_radix FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1"
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT column_name, column_default, is_nullable, data_type, character_maximum_length" +
|
||||
", numeric_precision, numeric_precision_radix FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1"
|
||||
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cols := make(map[string]*Column)
|
||||
colSeq := make([]string, 0)
|
||||
for _, record := range res {
|
||||
col := new(Column)
|
||||
col.Indexes = make(map[string]bool)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "column_name":
|
||||
col.Name = strings.Trim(string(content), `" `)
|
||||
case "column_default":
|
||||
if strings.HasPrefix(string(content), "nextval") {
|
||||
col.IsPrimaryKey = true
|
||||
} else {
|
||||
col.Default = string(content)
|
||||
}
|
||||
case "is_nullable":
|
||||
if string(content) == "YES" {
|
||||
col.Nullable = true
|
||||
} else {
|
||||
col.Nullable = false
|
||||
}
|
||||
case "data_type":
|
||||
ct := string(content)
|
||||
switch ct {
|
||||
case "character varying", "character":
|
||||
col.SQLType = SQLType{Varchar, 0, 0}
|
||||
case "timestamp without time zone":
|
||||
col.SQLType = SQLType{DateTime, 0, 0}
|
||||
case "timestamp with time zone":
|
||||
col.SQLType = SQLType{TimeStampz, 0, 0}
|
||||
case "double precision":
|
||||
col.SQLType = SQLType{Double, 0, 0}
|
||||
case "boolean":
|
||||
col.SQLType = SQLType{Bool, 0, 0}
|
||||
case "time without time zone":
|
||||
col.SQLType = SQLType{Time, 0, 0}
|
||||
default:
|
||||
col.SQLType = SQLType{strings.ToUpper(ct), 0, 0}
|
||||
}
|
||||
if _, ok := sqlTypes[col.SQLType.Name]; !ok {
|
||||
return nil, nil, errors.New(fmt.Sprintf("unkonw colType %v", ct))
|
||||
}
|
||||
case "character_maximum_length":
|
||||
i, err := strconv.Atoi(string(content))
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("retrieve length error")
|
||||
}
|
||||
col.Length = i
|
||||
case "numeric_precision":
|
||||
case "numeric_precision_radix":
|
||||
}
|
||||
}
|
||||
if col.SQLType.IsText() {
|
||||
if col.Default != "" {
|
||||
col.Default = "'" + col.Default + "'"
|
||||
}
|
||||
}
|
||||
cols[col.Name] = col
|
||||
colSeq = append(colSeq, col.Name)
|
||||
}
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cols := make(map[string]*Column)
|
||||
colSeq := make([]string, 0)
|
||||
for _, record := range res {
|
||||
col := new(Column)
|
||||
col.Indexes = make(map[string]bool)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "column_name":
|
||||
col.Name = strings.Trim(string(content), `" `)
|
||||
case "column_default":
|
||||
if strings.HasPrefix(string(content), "nextval") {
|
||||
col.IsPrimaryKey = true
|
||||
} else {
|
||||
col.Default = string(content)
|
||||
}
|
||||
case "is_nullable":
|
||||
if string(content) == "YES" {
|
||||
col.Nullable = true
|
||||
} else {
|
||||
col.Nullable = false
|
||||
}
|
||||
case "data_type":
|
||||
ct := string(content)
|
||||
switch ct {
|
||||
case "character varying", "character":
|
||||
col.SQLType = SQLType{Varchar, 0, 0}
|
||||
case "timestamp without time zone":
|
||||
col.SQLType = SQLType{DateTime, 0, 0}
|
||||
case "timestamp with time zone":
|
||||
col.SQLType = SQLType{TimeStampz, 0, 0}
|
||||
case "double precision":
|
||||
col.SQLType = SQLType{Double, 0, 0}
|
||||
case "boolean":
|
||||
col.SQLType = SQLType{Bool, 0, 0}
|
||||
case "time without time zone":
|
||||
col.SQLType = SQLType{Time, 0, 0}
|
||||
default:
|
||||
col.SQLType = SQLType{strings.ToUpper(ct), 0, 0}
|
||||
}
|
||||
if _, ok := sqlTypes[col.SQLType.Name]; !ok {
|
||||
return nil, nil, errors.New(fmt.Sprintf("unkonw colType %v", ct))
|
||||
}
|
||||
case "character_maximum_length":
|
||||
i, err := strconv.Atoi(string(content))
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("retrieve length error")
|
||||
}
|
||||
col.Length = i
|
||||
case "numeric_precision":
|
||||
case "numeric_precision_radix":
|
||||
}
|
||||
}
|
||||
if col.SQLType.IsText() {
|
||||
if col.Default != "" {
|
||||
col.Default = "'" + col.Default + "'"
|
||||
}
|
||||
}
|
||||
cols[col.Name] = col
|
||||
colSeq = append(colSeq, col.Name)
|
||||
}
|
||||
|
||||
return colSeq, cols, nil
|
||||
return colSeq, cols, nil
|
||||
}
|
||||
|
||||
func (db *postgres) GetTables() ([]*Table, error) {
|
||||
args := []interface{}{}
|
||||
s := "SELECT tablename FROM pg_tables where schemaname = 'public'"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args := []interface{}{}
|
||||
s := "SELECT tablename FROM pg_tables where schemaname = 'public'"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tables := make([]*Table, 0)
|
||||
for _, record := range res {
|
||||
table := new(Table)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "tablename":
|
||||
table.Name = string(content)
|
||||
}
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return tables, nil
|
||||
tables := make([]*Table, 0)
|
||||
for _, record := range res {
|
||||
table := new(Table)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "tablename":
|
||||
table.Name = string(content)
|
||||
}
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
func (db *postgres) GetIndexes(tableName string) (map[string]*Index, error) {
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT tablename, indexname, indexdef FROM pg_indexes WHERE schemaname = 'public' and tablename = $1"
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT tablename, indexname, indexdef FROM pg_indexes WHERE schemaname = 'public' and tablename = $1"
|
||||
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexes := make(map[string]*Index, 0)
|
||||
for _, record := range res {
|
||||
var indexType int
|
||||
var indexName string
|
||||
var colNames []string
|
||||
indexes := make(map[string]*Index, 0)
|
||||
for _, record := range res {
|
||||
var indexType int
|
||||
var indexName string
|
||||
var colNames []string
|
||||
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "indexname":
|
||||
indexName = strings.Trim(string(content), `" `)
|
||||
case "indexdef":
|
||||
c := string(content)
|
||||
if strings.HasPrefix(c, "CREATE UNIQUE INDEX") {
|
||||
indexType = UniqueType
|
||||
} else {
|
||||
indexType = IndexType
|
||||
}
|
||||
cs := strings.Split(c, "(")
|
||||
colNames = strings.Split(cs[1][0:len(cs[1])-1], ",")
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(indexName, "_pkey") {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
|
||||
newIdxName := indexName[5+len(tableName) : len(indexName)]
|
||||
if newIdxName != "" {
|
||||
indexName = newIdxName
|
||||
}
|
||||
}
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "indexname":
|
||||
indexName = strings.Trim(string(content), `" `)
|
||||
case "indexdef":
|
||||
c := string(content)
|
||||
if strings.HasPrefix(c, "CREATE UNIQUE INDEX") {
|
||||
indexType = UniqueType
|
||||
} else {
|
||||
indexType = IndexType
|
||||
}
|
||||
cs := strings.Split(c, "(")
|
||||
colNames = strings.Split(cs[1][0:len(cs[1])-1], ",")
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(indexName, "_pkey") {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
|
||||
newIdxName := indexName[5+len(tableName) : len(indexName)]
|
||||
if newIdxName != "" {
|
||||
indexName = newIdxName
|
||||
}
|
||||
}
|
||||
|
||||
index := &Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
|
||||
for _, colName := range colNames {
|
||||
index.Cols = append(index.Cols, strings.Trim(colName, `" `))
|
||||
}
|
||||
indexes[index.Name] = index
|
||||
}
|
||||
return indexes, nil
|
||||
index := &Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
|
||||
for _, colName := range colNames {
|
||||
index.Cols = append(index.Cols, strings.Trim(colName, `" `))
|
||||
}
|
||||
indexes[index.Name] = index
|
||||
}
|
||||
return indexes, nil
|
||||
}
|
||||
|
|
170
postgres_test.go
170
postgres_test.go
|
@ -1,52 +1,52 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func newPostgresEngine() (*Engine, error) {
|
||||
return NewEngine("postgres", "dbname=xorm_test sslmode=disable")
|
||||
return NewEngine("postgres", "dbname=xorm_test sslmode=disable")
|
||||
}
|
||||
|
||||
func newPostgresDriverDB() (*sql.DB, error) {
|
||||
return sql.Open("postgres", "dbname=xorm_test sslmode=disable")
|
||||
return sql.Open("postgres", "dbname=xorm_test sslmode=disable")
|
||||
}
|
||||
|
||||
func TestPostgres(t *testing.T) {
|
||||
engine, err := newPostgresEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
engine, err := newPostgresEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer engine.Close()
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
}
|
||||
|
||||
func TestPostgresWithCache(t *testing.T) {
|
||||
engine, err := newPostgresEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
engine, err := newPostgresEngine()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -147,91 +147,91 @@ func TestPostgres2(t *testing.T) {
|
|||
}*/
|
||||
|
||||
const (
|
||||
createTablePostgres = `CREATE TABLE IF NOT EXISTS "big_struct" ("id" SERIAL PRIMARY KEY NOT NULL, "name" VARCHAR(255) NULL, "title" VARCHAR(255) NULL, "age" VARCHAR(255) NULL, "alias" VARCHAR(255) NULL, "nick_name" VARCHAR(255) NULL);`
|
||||
dropTablePostgres = `DROP TABLE IF EXISTS "big_struct";`
|
||||
createTablePostgres = `CREATE TABLE IF NOT EXISTS "big_struct" ("id" SERIAL PRIMARY KEY NOT NULL, "name" VARCHAR(255) NULL, "title" VARCHAR(255) NULL, "age" VARCHAR(255) NULL, "alias" VARCHAR(255) NULL, "nick_name" VARCHAR(255) NULL);`
|
||||
dropTablePostgres = `DROP TABLE IF EXISTS "big_struct";`
|
||||
)
|
||||
|
||||
func BenchmarkPostgresDriverInsert(t *testing.B) {
|
||||
doBenchDriver(newPostgresDriverDB, createTablePostgres, dropTablePostgres,
|
||||
doBenchDriverInsert, t)
|
||||
doBenchDriver(newPostgresDriverDB, createTablePostgres, dropTablePostgres,
|
||||
doBenchDriverInsert, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresDriverFind(t *testing.B) {
|
||||
doBenchDriver(newPostgresDriverDB, createTablePostgres, dropTablePostgres,
|
||||
doBenchDriverFind, t)
|
||||
doBenchDriver(newPostgresDriverDB, createTablePostgres, dropTablePostgres,
|
||||
doBenchDriverFind, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresNoCacheInsert(t *testing.B) {
|
||||
engine, err := newPostgresEngine()
|
||||
engine, err := newPostgresEngine()
|
||||
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchInsert(engine, t)
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresNoCacheFind(t *testing.B) {
|
||||
engine, err := newPostgresEngine()
|
||||
engine, err := newPostgresEngine()
|
||||
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresNoCacheFindPtr(t *testing.B) {
|
||||
engine, err := newPostgresEngine()
|
||||
engine, err := newPostgresEngine()
|
||||
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresCacheInsert(t *testing.B) {
|
||||
engine, err := newPostgresEngine()
|
||||
engine, err := newPostgresEngine()
|
||||
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchInsert(engine, t)
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresCacheFind(t *testing.B) {
|
||||
engine, err := newPostgresEngine()
|
||||
engine, err := newPostgresEngine()
|
||||
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchFind(engine, t)
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkPostgresCacheFindPtr(t *testing.B) {
|
||||
engine, err := newPostgresEngine()
|
||||
engine, err := newPostgresEngine()
|
||||
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
|
||||
doBenchFindPtr(engine, t)
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
|
|
@ -2,17 +2,17 @@ package xorm
|
|||
|
||||
// Executed before an object is initially persisted to the database
|
||||
type BeforeInsertProcessor interface {
|
||||
BeforeInsert()
|
||||
BeforeInsert()
|
||||
}
|
||||
|
||||
// Executed before an object is updated
|
||||
type BeforeUpdateProcessor interface {
|
||||
BeforeUpdate()
|
||||
BeforeUpdate()
|
||||
}
|
||||
|
||||
// Executed before an object is deleted
|
||||
type BeforeDeleteProcessor interface {
|
||||
BeforeDelete()
|
||||
BeforeDelete()
|
||||
}
|
||||
|
||||
// !nashtsai! TODO enable BeforeValidateProcessor when xorm start to support validations
|
||||
|
@ -24,16 +24,15 @@ type BeforeDeleteProcessor interface {
|
|||
|
||||
// Executed after an object is persisted to the database
|
||||
type AfterInsertProcessor interface {
|
||||
AfterInsert()
|
||||
AfterInsert()
|
||||
}
|
||||
|
||||
// Executed after an object has been updated
|
||||
type AfterUpdateProcessor interface {
|
||||
AfterUpdate()
|
||||
AfterUpdate()
|
||||
}
|
||||
|
||||
// Executed after an object has been deleted
|
||||
type AfterDeleteProcessor interface {
|
||||
AfterDelete()
|
||||
AfterDelete()
|
||||
}
|
||||
|
||||
|
|
4523
session.go
4523
session.go
File diff suppressed because it is too large
Load Diff
328
sqlite3.go
328
sqlite3.go
|
@ -1,229 +1,229 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"strings"
|
||||
"database/sql"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type sqlite3 struct {
|
||||
base
|
||||
base
|
||||
}
|
||||
|
||||
type sqlite3Parser struct {
|
||||
}
|
||||
|
||||
func (p *sqlite3Parser) parse(driverName, dataSourceName string) (*uri, error) {
|
||||
return &uri{dbType: SQLITE, dbName: dataSourceName}, nil
|
||||
return &uri{dbType: SQLITE, dbName: dataSourceName}, nil
|
||||
}
|
||||
|
||||
func (db *sqlite3) Init(drivername, dataSourceName string) error {
|
||||
return db.base.init(&sqlite3Parser{}, drivername, dataSourceName)
|
||||
return db.base.init(&sqlite3Parser{}, drivername, dataSourceName)
|
||||
}
|
||||
|
||||
func (db *sqlite3) SqlType(c *Column) string {
|
||||
switch t := c.SQLType.Name; t {
|
||||
case Date, DateTime, TimeStamp, Time:
|
||||
return Numeric
|
||||
case TimeStampz:
|
||||
return Text
|
||||
case Char, Varchar, TinyText, Text, MediumText, LongText:
|
||||
return Text
|
||||
case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, BigInt, Bool:
|
||||
return Integer
|
||||
case Float, Double, Real:
|
||||
return Real
|
||||
case Decimal, Numeric:
|
||||
return Numeric
|
||||
case TinyBlob, Blob, MediumBlob, LongBlob, Bytea, Binary, VarBinary:
|
||||
return Blob
|
||||
case Serial, BigSerial:
|
||||
c.IsPrimaryKey = true
|
||||
c.IsAutoIncrement = true
|
||||
c.Nullable = false
|
||||
return Integer
|
||||
default:
|
||||
return t
|
||||
}
|
||||
switch t := c.SQLType.Name; t {
|
||||
case Date, DateTime, TimeStamp, Time:
|
||||
return Numeric
|
||||
case TimeStampz:
|
||||
return Text
|
||||
case Char, Varchar, TinyText, Text, MediumText, LongText:
|
||||
return Text
|
||||
case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, BigInt, Bool:
|
||||
return Integer
|
||||
case Float, Double, Real:
|
||||
return Real
|
||||
case Decimal, Numeric:
|
||||
return Numeric
|
||||
case TinyBlob, Blob, MediumBlob, LongBlob, Bytea, Binary, VarBinary:
|
||||
return Blob
|
||||
case Serial, BigSerial:
|
||||
c.IsPrimaryKey = true
|
||||
c.IsAutoIncrement = true
|
||||
c.Nullable = false
|
||||
return Integer
|
||||
default:
|
||||
return t
|
||||
}
|
||||
}
|
||||
|
||||
func (db *sqlite3) SupportInsertMany() bool {
|
||||
return true
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *sqlite3) QuoteStr() string {
|
||||
return "`"
|
||||
return "`"
|
||||
}
|
||||
|
||||
func (db *sqlite3) AutoIncrStr() string {
|
||||
return "AUTOINCREMENT"
|
||||
return "AUTOINCREMENT"
|
||||
}
|
||||
|
||||
func (db *sqlite3) SupportEngine() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *sqlite3) SupportCharset() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *sqlite3) IndexOnTable() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
func (db *sqlite3) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
|
||||
args := []interface{}{idxName}
|
||||
return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args
|
||||
args := []interface{}{idxName}
|
||||
return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args
|
||||
}
|
||||
|
||||
func (db *sqlite3) TableCheckSql(tableName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName}
|
||||
return "SELECT name FROM sqlite_master WHERE type='table' and name = ?", args
|
||||
args := []interface{}{tableName}
|
||||
return "SELECT name FROM sqlite_master WHERE type='table' and name = ?", args
|
||||
}
|
||||
|
||||
func (db *sqlite3) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName}
|
||||
sql := "SELECT name FROM sqlite_master WHERE type='table' and name = ? and ((sql like '%`" + colName + "`%') or (sql like '%[" + colName + "]%'))"
|
||||
return sql, args
|
||||
args := []interface{}{tableName}
|
||||
sql := "SELECT name FROM sqlite_master WHERE type='table' and name = ? and ((sql like '%`" + colName + "`%') or (sql like '%[" + colName + "]%'))"
|
||||
return sql, args
|
||||
}
|
||||
|
||||
func (db *sqlite3) GetColumns(tableName string) ([]string, map[string]*Column, error) {
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var sql string
|
||||
for _, record := range res {
|
||||
for name, content := range record {
|
||||
if name == "sql" {
|
||||
sql = string(content)
|
||||
}
|
||||
}
|
||||
}
|
||||
var sql string
|
||||
for _, record := range res {
|
||||
for name, content := range record {
|
||||
if name == "sql" {
|
||||
sql = string(content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nStart := strings.Index(sql, "(")
|
||||
nEnd := strings.Index(sql, ")")
|
||||
colCreates := strings.Split(sql[nStart+1:nEnd], ",")
|
||||
cols := make(map[string]*Column)
|
||||
colSeq := make([]string, 0)
|
||||
for _, colStr := range colCreates {
|
||||
fields := strings.Fields(strings.TrimSpace(colStr))
|
||||
col := new(Column)
|
||||
col.Indexes = make(map[string]bool)
|
||||
col.Nullable = true
|
||||
for idx, field := range fields {
|
||||
if idx == 0 {
|
||||
col.Name = strings.Trim(field, "`[] ")
|
||||
continue
|
||||
} else if idx == 1 {
|
||||
col.SQLType = SQLType{field, 0, 0}
|
||||
}
|
||||
switch field {
|
||||
case "PRIMARY":
|
||||
col.IsPrimaryKey = true
|
||||
case "AUTOINCREMENT":
|
||||
col.IsAutoIncrement = true
|
||||
case "NULL":
|
||||
if fields[idx-1] == "NOT" {
|
||||
col.Nullable = false
|
||||
} else {
|
||||
col.Nullable = true
|
||||
}
|
||||
}
|
||||
}
|
||||
cols[col.Name] = col
|
||||
colSeq = append(colSeq, col.Name)
|
||||
}
|
||||
return colSeq, cols, nil
|
||||
nStart := strings.Index(sql, "(")
|
||||
nEnd := strings.Index(sql, ")")
|
||||
colCreates := strings.Split(sql[nStart+1:nEnd], ",")
|
||||
cols := make(map[string]*Column)
|
||||
colSeq := make([]string, 0)
|
||||
for _, colStr := range colCreates {
|
||||
fields := strings.Fields(strings.TrimSpace(colStr))
|
||||
col := new(Column)
|
||||
col.Indexes = make(map[string]bool)
|
||||
col.Nullable = true
|
||||
for idx, field := range fields {
|
||||
if idx == 0 {
|
||||
col.Name = strings.Trim(field, "`[] ")
|
||||
continue
|
||||
} else if idx == 1 {
|
||||
col.SQLType = SQLType{field, 0, 0}
|
||||
}
|
||||
switch field {
|
||||
case "PRIMARY":
|
||||
col.IsPrimaryKey = true
|
||||
case "AUTOINCREMENT":
|
||||
col.IsAutoIncrement = true
|
||||
case "NULL":
|
||||
if fields[idx-1] == "NOT" {
|
||||
col.Nullable = false
|
||||
} else {
|
||||
col.Nullable = true
|
||||
}
|
||||
}
|
||||
}
|
||||
cols[col.Name] = col
|
||||
colSeq = append(colSeq, col.Name)
|
||||
}
|
||||
return colSeq, cols, nil
|
||||
}
|
||||
|
||||
func (db *sqlite3) GetTables() ([]*Table, error) {
|
||||
args := []interface{}{}
|
||||
s := "SELECT name FROM sqlite_master WHERE type='table'"
|
||||
args := []interface{}{}
|
||||
s := "SELECT name FROM sqlite_master WHERE type='table'"
|
||||
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tables := make([]*Table, 0)
|
||||
for _, record := range res {
|
||||
table := new(Table)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "name":
|
||||
table.Name = string(content)
|
||||
}
|
||||
}
|
||||
if table.Name == "sqlite_sequence" {
|
||||
continue
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return tables, nil
|
||||
tables := make([]*Table, 0)
|
||||
for _, record := range res {
|
||||
table := new(Table)
|
||||
for name, content := range record {
|
||||
switch name {
|
||||
case "name":
|
||||
table.Name = string(content)
|
||||
}
|
||||
}
|
||||
if table.Name == "sqlite_sequence" {
|
||||
continue
|
||||
}
|
||||
tables = append(tables, table)
|
||||
}
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
func (db *sqlite3) GetIndexes(tableName string) (map[string]*Index, error) {
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args := []interface{}{tableName}
|
||||
s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?"
|
||||
cnn, err := sql.Open(db.driverName, db.dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cnn.Close()
|
||||
res, err := query(cnn, s, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexes := make(map[string]*Index, 0)
|
||||
for _, record := range res {
|
||||
var sql string
|
||||
index := new(Index)
|
||||
for name, content := range record {
|
||||
if name == "sql" {
|
||||
sql = string(content)
|
||||
}
|
||||
}
|
||||
indexes := make(map[string]*Index, 0)
|
||||
for _, record := range res {
|
||||
var sql string
|
||||
index := new(Index)
|
||||
for name, content := range record {
|
||||
if name == "sql" {
|
||||
sql = string(content)
|
||||
}
|
||||
}
|
||||
|
||||
nNStart := strings.Index(sql, "INDEX")
|
||||
nNEnd := strings.Index(sql, "ON")
|
||||
indexName := strings.Trim(sql[nNStart+6:nNEnd], "` []")
|
||||
//fmt.Println(indexName)
|
||||
if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
|
||||
index.Name = indexName[5+len(tableName) : len(indexName)]
|
||||
} else {
|
||||
index.Name = indexName
|
||||
}
|
||||
nNStart := strings.Index(sql, "INDEX")
|
||||
nNEnd := strings.Index(sql, "ON")
|
||||
indexName := strings.Trim(sql[nNStart+6:nNEnd], "` []")
|
||||
//fmt.Println(indexName)
|
||||
if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) {
|
||||
index.Name = indexName[5+len(tableName) : len(indexName)]
|
||||
} else {
|
||||
index.Name = indexName
|
||||
}
|
||||
|
||||
if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") {
|
||||
index.Type = UniqueType
|
||||
} else {
|
||||
index.Type = IndexType
|
||||
}
|
||||
if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") {
|
||||
index.Type = UniqueType
|
||||
} else {
|
||||
index.Type = IndexType
|
||||
}
|
||||
|
||||
nStart := strings.Index(sql, "(")
|
||||
nEnd := strings.Index(sql, ")")
|
||||
colIndexes := strings.Split(sql[nStart+1:nEnd], ",")
|
||||
nStart := strings.Index(sql, "(")
|
||||
nEnd := strings.Index(sql, ")")
|
||||
colIndexes := strings.Split(sql[nStart+1:nEnd], ",")
|
||||
|
||||
index.Cols = make([]string, 0)
|
||||
for _, col := range colIndexes {
|
||||
index.Cols = append(index.Cols, strings.Trim(col, "` []"))
|
||||
}
|
||||
indexes[index.Name] = index
|
||||
}
|
||||
index.Cols = make([]string, 0)
|
||||
for _, col := range colIndexes {
|
||||
index.Cols = append(index.Cols, strings.Trim(col, "` []"))
|
||||
}
|
||||
indexes[index.Name] = index
|
||||
}
|
||||
|
||||
return indexes, nil
|
||||
return indexes, nil
|
||||
}
|
||||
|
|
188
sqlite3_test.go
188
sqlite3_test.go
|
@ -1,140 +1,140 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func newSqlite3Engine() (*Engine, error) {
|
||||
os.Remove("./test.db")
|
||||
return NewEngine("sqlite3", "./test.db")
|
||||
os.Remove("./test.db")
|
||||
return NewEngine("sqlite3", "./test.db")
|
||||
}
|
||||
|
||||
func newSqlite3DriverDB() (*sql.DB, error) {
|
||||
os.Remove("./test.db")
|
||||
return sql.Open("sqlite3", "./test.db")
|
||||
os.Remove("./test.db")
|
||||
return sql.Open("sqlite3", "./test.db")
|
||||
}
|
||||
|
||||
func TestSqlite3(t *testing.T) {
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll3(engine, t)
|
||||
}
|
||||
|
||||
func TestSqlite3WithCache(t *testing.T) {
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
engine.ShowSQL = showTestSql
|
||||
engine.ShowErr = showTestSql
|
||||
engine.ShowWarn = showTestSql
|
||||
engine.ShowDebug = showTestSql
|
||||
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
testAll(engine, t)
|
||||
testAll2(engine, t)
|
||||
}
|
||||
|
||||
const (
|
||||
createTableSqlite3 = "CREATE TABLE IF NOT EXISTS `big_struct` (`id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, `name` TEXT NULL, `title` TEXT NULL, `age` TEXT NULL, `alias` TEXT NULL, `nick_name` TEXT NULL);"
|
||||
dropTableSqlite3 = "DROP TABLE IF EXISTS `big_struct`;"
|
||||
createTableSqlite3 = "CREATE TABLE IF NOT EXISTS `big_struct` (`id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, `name` TEXT NULL, `title` TEXT NULL, `age` TEXT NULL, `alias` TEXT NULL, `nick_name` TEXT NULL);"
|
||||
dropTableSqlite3 = "DROP TABLE IF EXISTS `big_struct`;"
|
||||
)
|
||||
|
||||
func BenchmarkSqlite3DriverInsert(t *testing.B) {
|
||||
doBenchDriver(newSqlite3DriverDB, createTableSqlite3, dropTableSqlite3,
|
||||
doBenchDriverInsert, t)
|
||||
doBenchDriver(newSqlite3DriverDB, createTableSqlite3, dropTableSqlite3,
|
||||
doBenchDriverInsert, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3DriverFind(t *testing.B) {
|
||||
doBenchDriver(newSqlite3DriverDB, createTableSqlite3, dropTableSqlite3,
|
||||
doBenchDriverFind, t)
|
||||
doBenchDriver(newSqlite3DriverDB, createTableSqlite3, dropTableSqlite3,
|
||||
doBenchDriverFind, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3NoCacheInsert(t *testing.B) {
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchInsert(engine, t)
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3NoCacheFind(t *testing.B) {
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3NoCacheFindPtr(t *testing.B) {
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
//engine.ShowSQL = true
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3CacheInsert(t *testing.B) {
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
doBenchInsert(engine, t)
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
doBenchInsert(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3CacheFind(t *testing.B) {
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
doBenchFind(engine, t)
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
doBenchFind(engine, t)
|
||||
}
|
||||
|
||||
func BenchmarkSqlite3CacheFindPtr(t *testing.B) {
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
doBenchFindPtr(engine, t)
|
||||
t.StopTimer()
|
||||
engine, err := newSqlite3Engine()
|
||||
defer engine.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
|
||||
doBenchFindPtr(engine, t)
|
||||
}
|
||||
|
|
1090
statement.go
1090
statement.go
File diff suppressed because it is too large
Load Diff
600
table.go
600
table.go
|
@ -1,335 +1,335 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// xorm SQL types
|
||||
type SQLType struct {
|
||||
Name string
|
||||
DefaultLength int
|
||||
DefaultLength2 int
|
||||
Name string
|
||||
DefaultLength int
|
||||
DefaultLength2 int
|
||||
}
|
||||
|
||||
func (s *SQLType) IsText() bool {
|
||||
return s.Name == Char || s.Name == Varchar || s.Name == TinyText ||
|
||||
s.Name == Text || s.Name == MediumText || s.Name == LongText
|
||||
return s.Name == Char || s.Name == Varchar || s.Name == TinyText ||
|
||||
s.Name == Text || s.Name == MediumText || s.Name == LongText
|
||||
}
|
||||
|
||||
func (s *SQLType) IsBlob() bool {
|
||||
return (s.Name == TinyBlob) || (s.Name == Blob) ||
|
||||
s.Name == MediumBlob || s.Name == LongBlob ||
|
||||
s.Name == Binary || s.Name == VarBinary || s.Name == Bytea
|
||||
return (s.Name == TinyBlob) || (s.Name == Blob) ||
|
||||
s.Name == MediumBlob || s.Name == LongBlob ||
|
||||
s.Name == Binary || s.Name == VarBinary || s.Name == Bytea
|
||||
}
|
||||
|
||||
const ()
|
||||
|
||||
var (
|
||||
Bit = "BIT"
|
||||
TinyInt = "TINYINT"
|
||||
SmallInt = "SMALLINT"
|
||||
MediumInt = "MEDIUMINT"
|
||||
Int = "INT"
|
||||
Integer = "INTEGER"
|
||||
BigInt = "BIGINT"
|
||||
Bit = "BIT"
|
||||
TinyInt = "TINYINT"
|
||||
SmallInt = "SMALLINT"
|
||||
MediumInt = "MEDIUMINT"
|
||||
Int = "INT"
|
||||
Integer = "INTEGER"
|
||||
BigInt = "BIGINT"
|
||||
|
||||
Char = "CHAR"
|
||||
Varchar = "VARCHAR"
|
||||
TinyText = "TINYTEXT"
|
||||
Text = "TEXT"
|
||||
MediumText = "MEDIUMTEXT"
|
||||
LongText = "LONGTEXT"
|
||||
Char = "CHAR"
|
||||
Varchar = "VARCHAR"
|
||||
TinyText = "TINYTEXT"
|
||||
Text = "TEXT"
|
||||
MediumText = "MEDIUMTEXT"
|
||||
LongText = "LONGTEXT"
|
||||
|
||||
Date = "DATE"
|
||||
DateTime = "DATETIME"
|
||||
Time = "TIME"
|
||||
TimeStamp = "TIMESTAMP"
|
||||
TimeStampz = "TIMESTAMPZ"
|
||||
Date = "DATE"
|
||||
DateTime = "DATETIME"
|
||||
Time = "TIME"
|
||||
TimeStamp = "TIMESTAMP"
|
||||
TimeStampz = "TIMESTAMPZ"
|
||||
|
||||
Decimal = "DECIMAL"
|
||||
Numeric = "NUMERIC"
|
||||
Decimal = "DECIMAL"
|
||||
Numeric = "NUMERIC"
|
||||
|
||||
Real = "REAL"
|
||||
Float = "FLOAT"
|
||||
Double = "DOUBLE"
|
||||
Real = "REAL"
|
||||
Float = "FLOAT"
|
||||
Double = "DOUBLE"
|
||||
|
||||
Binary = "BINARY"
|
||||
VarBinary = "VARBINARY"
|
||||
TinyBlob = "TINYBLOB"
|
||||
Blob = "BLOB"
|
||||
MediumBlob = "MEDIUMBLOB"
|
||||
LongBlob = "LONGBLOB"
|
||||
Bytea = "BYTEA"
|
||||
Binary = "BINARY"
|
||||
VarBinary = "VARBINARY"
|
||||
TinyBlob = "TINYBLOB"
|
||||
Blob = "BLOB"
|
||||
MediumBlob = "MEDIUMBLOB"
|
||||
LongBlob = "LONGBLOB"
|
||||
Bytea = "BYTEA"
|
||||
|
||||
Bool = "BOOL"
|
||||
Bool = "BOOL"
|
||||
|
||||
Serial = "SERIAL"
|
||||
BigSerial = "BIGSERIAL"
|
||||
Serial = "SERIAL"
|
||||
BigSerial = "BIGSERIAL"
|
||||
|
||||
sqlTypes = map[string]bool{
|
||||
Bit: true,
|
||||
TinyInt: true,
|
||||
SmallInt: true,
|
||||
MediumInt: true,
|
||||
Int: true,
|
||||
Integer: true,
|
||||
BigInt: true,
|
||||
sqlTypes = map[string]bool{
|
||||
Bit: true,
|
||||
TinyInt: true,
|
||||
SmallInt: true,
|
||||
MediumInt: true,
|
||||
Int: true,
|
||||
Integer: true,
|
||||
BigInt: true,
|
||||
|
||||
Char: true,
|
||||
Varchar: true,
|
||||
TinyText: true,
|
||||
Text: true,
|
||||
MediumText: true,
|
||||
LongText: true,
|
||||
Char: true,
|
||||
Varchar: true,
|
||||
TinyText: true,
|
||||
Text: true,
|
||||
MediumText: true,
|
||||
LongText: true,
|
||||
|
||||
Date: true,
|
||||
DateTime: true,
|
||||
Time: true,
|
||||
TimeStamp: true,
|
||||
TimeStampz: true,
|
||||
Date: true,
|
||||
DateTime: true,
|
||||
Time: true,
|
||||
TimeStamp: true,
|
||||
TimeStampz: true,
|
||||
|
||||
Decimal: true,
|
||||
Numeric: true,
|
||||
Decimal: true,
|
||||
Numeric: true,
|
||||
|
||||
Binary: true,
|
||||
VarBinary: true,
|
||||
Real: true,
|
||||
Float: true,
|
||||
Double: true,
|
||||
TinyBlob: true,
|
||||
Blob: true,
|
||||
MediumBlob: true,
|
||||
LongBlob: true,
|
||||
Bytea: true,
|
||||
Binary: true,
|
||||
VarBinary: true,
|
||||
Real: true,
|
||||
Float: true,
|
||||
Double: true,
|
||||
TinyBlob: true,
|
||||
Blob: true,
|
||||
MediumBlob: true,
|
||||
LongBlob: true,
|
||||
Bytea: true,
|
||||
|
||||
Bool: true,
|
||||
Bool: true,
|
||||
|
||||
Serial: true,
|
||||
BigSerial: true,
|
||||
}
|
||||
Serial: true,
|
||||
BigSerial: true,
|
||||
}
|
||||
|
||||
intTypes = sort.StringSlice{"*int", "*int16", "*int32", "*int8"}
|
||||
uintTypes = sort.StringSlice{"*uint", "*uint16", "*uint32", "*uint8"}
|
||||
intTypes = sort.StringSlice{"*int", "*int16", "*int32", "*int8"}
|
||||
uintTypes = sort.StringSlice{"*uint", "*uint16", "*uint32", "*uint8"}
|
||||
)
|
||||
|
||||
var b byte
|
||||
var tm time.Time
|
||||
|
||||
func Type2SQLType(t reflect.Type) (st SQLType) {
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
st = SQLType{Int, 0, 0}
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
st = SQLType{BigInt, 0, 0}
|
||||
case reflect.Float32:
|
||||
st = SQLType{Float, 0, 0}
|
||||
case reflect.Float64:
|
||||
st = SQLType{Double, 0, 0}
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
st = SQLType{Varchar, 64, 0}
|
||||
case reflect.Array, reflect.Slice, reflect.Map:
|
||||
if t.Elem() == reflect.TypeOf(b) {
|
||||
st = SQLType{Blob, 0, 0}
|
||||
} else {
|
||||
st = SQLType{Text, 0, 0}
|
||||
}
|
||||
case reflect.Bool:
|
||||
st = SQLType{Bool, 0, 0}
|
||||
case reflect.String:
|
||||
st = SQLType{Varchar, 255, 0}
|
||||
case reflect.Struct:
|
||||
if t == reflect.TypeOf(tm) {
|
||||
st = SQLType{DateTime, 0, 0}
|
||||
} else {
|
||||
st = SQLType{Text, 0, 0}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
st, _ = ptrType2SQLType(t)
|
||||
default:
|
||||
st = SQLType{Text, 0, 0}
|
||||
}
|
||||
return
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
st = SQLType{Int, 0, 0}
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
st = SQLType{BigInt, 0, 0}
|
||||
case reflect.Float32:
|
||||
st = SQLType{Float, 0, 0}
|
||||
case reflect.Float64:
|
||||
st = SQLType{Double, 0, 0}
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
st = SQLType{Varchar, 64, 0}
|
||||
case reflect.Array, reflect.Slice, reflect.Map:
|
||||
if t.Elem() == reflect.TypeOf(b) {
|
||||
st = SQLType{Blob, 0, 0}
|
||||
} else {
|
||||
st = SQLType{Text, 0, 0}
|
||||
}
|
||||
case reflect.Bool:
|
||||
st = SQLType{Bool, 0, 0}
|
||||
case reflect.String:
|
||||
st = SQLType{Varchar, 255, 0}
|
||||
case reflect.Struct:
|
||||
if t == reflect.TypeOf(tm) {
|
||||
st = SQLType{DateTime, 0, 0}
|
||||
} else {
|
||||
st = SQLType{Text, 0, 0}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
st, _ = ptrType2SQLType(t)
|
||||
default:
|
||||
st = SQLType{Text, 0, 0}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ptrType2SQLType(t reflect.Type) (st SQLType, has bool) {
|
||||
has = true
|
||||
has = true
|
||||
|
||||
switch t {
|
||||
case reflect.TypeOf(&c_EMPTY_STRING):
|
||||
st = SQLType{Varchar, 255, 0}
|
||||
return
|
||||
case reflect.TypeOf(&c_BOOL_DEFAULT):
|
||||
st = SQLType{Bool, 0, 0}
|
||||
case reflect.TypeOf(&c_COMPLEX64_DEFAULT), reflect.TypeOf(&c_COMPLEX128_DEFAULT):
|
||||
st = SQLType{Varchar, 64, 0}
|
||||
case reflect.TypeOf(&c_FLOAT32_DEFAULT):
|
||||
st = SQLType{Float, 0, 0}
|
||||
case reflect.TypeOf(&c_FLOAT64_DEFAULT):
|
||||
st = SQLType{Double, 0, 0}
|
||||
case reflect.TypeOf(&c_INT64_DEFAULT), reflect.TypeOf(&c_UINT64_DEFAULT):
|
||||
st = SQLType{BigInt, 0, 0}
|
||||
case reflect.TypeOf(&c_TIME_DEFAULT):
|
||||
st = SQLType{DateTime, 0, 0}
|
||||
case reflect.TypeOf(&c_INT_DEFAULT), reflect.TypeOf(&c_INT32_DEFAULT), reflect.TypeOf(&c_INT8_DEFAULT), reflect.TypeOf(&c_INT16_DEFAULT), reflect.TypeOf(&c_UINT_DEFAULT), reflect.TypeOf(&c_UINT32_DEFAULT), reflect.TypeOf(&c_UINT8_DEFAULT), reflect.TypeOf(&c_UINT16_DEFAULT):
|
||||
st = SQLType{Int, 0, 0}
|
||||
default:
|
||||
has = false
|
||||
}
|
||||
return
|
||||
switch t {
|
||||
case reflect.TypeOf(&c_EMPTY_STRING):
|
||||
st = SQLType{Varchar, 255, 0}
|
||||
return
|
||||
case reflect.TypeOf(&c_BOOL_DEFAULT):
|
||||
st = SQLType{Bool, 0, 0}
|
||||
case reflect.TypeOf(&c_COMPLEX64_DEFAULT), reflect.TypeOf(&c_COMPLEX128_DEFAULT):
|
||||
st = SQLType{Varchar, 64, 0}
|
||||
case reflect.TypeOf(&c_FLOAT32_DEFAULT):
|
||||
st = SQLType{Float, 0, 0}
|
||||
case reflect.TypeOf(&c_FLOAT64_DEFAULT):
|
||||
st = SQLType{Double, 0, 0}
|
||||
case reflect.TypeOf(&c_INT64_DEFAULT), reflect.TypeOf(&c_UINT64_DEFAULT):
|
||||
st = SQLType{BigInt, 0, 0}
|
||||
case reflect.TypeOf(&c_TIME_DEFAULT):
|
||||
st = SQLType{DateTime, 0, 0}
|
||||
case reflect.TypeOf(&c_INT_DEFAULT), reflect.TypeOf(&c_INT32_DEFAULT), reflect.TypeOf(&c_INT8_DEFAULT), reflect.TypeOf(&c_INT16_DEFAULT), reflect.TypeOf(&c_UINT_DEFAULT), reflect.TypeOf(&c_UINT32_DEFAULT), reflect.TypeOf(&c_UINT8_DEFAULT), reflect.TypeOf(&c_UINT16_DEFAULT):
|
||||
st = SQLType{Int, 0, 0}
|
||||
default:
|
||||
has = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// default sql type change to go types
|
||||
func SQLType2Type(st SQLType) reflect.Type {
|
||||
name := strings.ToUpper(st.Name)
|
||||
switch name {
|
||||
case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial:
|
||||
return reflect.TypeOf(1)
|
||||
case BigInt, BigSerial:
|
||||
return reflect.TypeOf(int64(1))
|
||||
case Float, Real:
|
||||
return reflect.TypeOf(float32(1))
|
||||
case Double:
|
||||
return reflect.TypeOf(float64(1))
|
||||
case Char, Varchar, TinyText, Text, MediumText, LongText:
|
||||
return reflect.TypeOf("")
|
||||
case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary:
|
||||
return reflect.TypeOf([]byte{})
|
||||
case Bool:
|
||||
return reflect.TypeOf(true)
|
||||
case DateTime, Date, Time, TimeStamp, TimeStampz:
|
||||
return reflect.TypeOf(tm)
|
||||
case Decimal, Numeric:
|
||||
return reflect.TypeOf("")
|
||||
default:
|
||||
return reflect.TypeOf("")
|
||||
}
|
||||
name := strings.ToUpper(st.Name)
|
||||
switch name {
|
||||
case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial:
|
||||
return reflect.TypeOf(1)
|
||||
case BigInt, BigSerial:
|
||||
return reflect.TypeOf(int64(1))
|
||||
case Float, Real:
|
||||
return reflect.TypeOf(float32(1))
|
||||
case Double:
|
||||
return reflect.TypeOf(float64(1))
|
||||
case Char, Varchar, TinyText, Text, MediumText, LongText:
|
||||
return reflect.TypeOf("")
|
||||
case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary:
|
||||
return reflect.TypeOf([]byte{})
|
||||
case Bool:
|
||||
return reflect.TypeOf(true)
|
||||
case DateTime, Date, Time, TimeStamp, TimeStampz:
|
||||
return reflect.TypeOf(tm)
|
||||
case Decimal, Numeric:
|
||||
return reflect.TypeOf("")
|
||||
default:
|
||||
return reflect.TypeOf("")
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
IndexType = iota + 1
|
||||
UniqueType
|
||||
IndexType = iota + 1
|
||||
UniqueType
|
||||
)
|
||||
|
||||
// database index
|
||||
type Index struct {
|
||||
Name string
|
||||
Type int
|
||||
Cols []string
|
||||
Name string
|
||||
Type int
|
||||
Cols []string
|
||||
}
|
||||
|
||||
// add columns which will be composite index
|
||||
func (index *Index) AddColumn(cols ...string) {
|
||||
for _, col := range cols {
|
||||
index.Cols = append(index.Cols, col)
|
||||
}
|
||||
for _, col := range cols {
|
||||
index.Cols = append(index.Cols, col)
|
||||
}
|
||||
}
|
||||
|
||||
// new an index
|
||||
func NewIndex(name string, indexType int) *Index {
|
||||
return &Index{name, indexType, make([]string, 0)}
|
||||
return &Index{name, indexType, make([]string, 0)}
|
||||
}
|
||||
|
||||
const (
|
||||
TWOSIDES = iota + 1
|
||||
ONLYTODB
|
||||
ONLYFROMDB
|
||||
TWOSIDES = iota + 1
|
||||
ONLYTODB
|
||||
ONLYFROMDB
|
||||
)
|
||||
|
||||
// database column
|
||||
type Column struct {
|
||||
Name string
|
||||
FieldName string
|
||||
SQLType SQLType
|
||||
Length int
|
||||
Length2 int
|
||||
Nullable bool
|
||||
Default string
|
||||
Indexes map[string]bool
|
||||
IsPrimaryKey bool
|
||||
IsAutoIncrement bool
|
||||
MapType int
|
||||
IsCreated bool
|
||||
IsUpdated bool
|
||||
IsCascade bool
|
||||
IsVersion bool
|
||||
Name string
|
||||
FieldName string
|
||||
SQLType SQLType
|
||||
Length int
|
||||
Length2 int
|
||||
Nullable bool
|
||||
Default string
|
||||
Indexes map[string]bool
|
||||
IsPrimaryKey bool
|
||||
IsAutoIncrement bool
|
||||
MapType int
|
||||
IsCreated bool
|
||||
IsUpdated bool
|
||||
IsCascade bool
|
||||
IsVersion bool
|
||||
}
|
||||
|
||||
// generate column description string according dialect
|
||||
func (col *Column) String(d dialect) string {
|
||||
sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
|
||||
sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
|
||||
|
||||
sql += d.SqlType(col) + " "
|
||||
sql += d.SqlType(col) + " "
|
||||
|
||||
if col.IsPrimaryKey {
|
||||
sql += "PRIMARY KEY "
|
||||
if col.IsAutoIncrement {
|
||||
sql += d.AutoIncrStr() + " "
|
||||
}
|
||||
}
|
||||
if col.IsPrimaryKey {
|
||||
sql += "PRIMARY KEY "
|
||||
if col.IsAutoIncrement {
|
||||
sql += d.AutoIncrStr() + " "
|
||||
}
|
||||
}
|
||||
|
||||
if col.Nullable {
|
||||
sql += "NULL "
|
||||
} else {
|
||||
sql += "NOT NULL "
|
||||
}
|
||||
if col.Nullable {
|
||||
sql += "NULL "
|
||||
} else {
|
||||
sql += "NOT NULL "
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
|
||||
return sql
|
||||
return sql
|
||||
}
|
||||
|
||||
func (col *Column) stringNoPk(d dialect) string {
|
||||
sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
|
||||
sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
|
||||
|
||||
sql += d.SqlType(col) + " "
|
||||
sql += d.SqlType(col) + " "
|
||||
|
||||
if col.Nullable {
|
||||
sql += "NULL "
|
||||
} else {
|
||||
sql += "NOT NULL "
|
||||
}
|
||||
if col.Nullable {
|
||||
sql += "NULL "
|
||||
} else {
|
||||
sql += "NOT NULL "
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
|
||||
return sql
|
||||
return sql
|
||||
}
|
||||
|
||||
// return col's filed of struct's value
|
||||
func (col *Column) ValueOf(bean interface{}) reflect.Value {
|
||||
var fieldValue reflect.Value
|
||||
if strings.Contains(col.FieldName, ".") {
|
||||
fields := strings.Split(col.FieldName, ".")
|
||||
if len(fields) > 2 {
|
||||
return reflect.ValueOf(nil)
|
||||
}
|
||||
var fieldValue reflect.Value
|
||||
if strings.Contains(col.FieldName, ".") {
|
||||
fields := strings.Split(col.FieldName, ".")
|
||||
if len(fields) > 2 {
|
||||
return reflect.ValueOf(nil)
|
||||
}
|
||||
|
||||
fieldValue = reflect.Indirect(reflect.ValueOf(bean)).FieldByName(fields[0])
|
||||
fieldValue = fieldValue.FieldByName(fields[1])
|
||||
} else {
|
||||
fieldValue = reflect.Indirect(reflect.ValueOf(bean)).FieldByName(col.FieldName)
|
||||
}
|
||||
return fieldValue
|
||||
fieldValue = reflect.Indirect(reflect.ValueOf(bean)).FieldByName(fields[0])
|
||||
fieldValue = fieldValue.FieldByName(fields[1])
|
||||
} else {
|
||||
fieldValue = reflect.Indirect(reflect.ValueOf(bean)).FieldByName(col.FieldName)
|
||||
}
|
||||
return fieldValue
|
||||
}
|
||||
|
||||
// database table
|
||||
type Table struct {
|
||||
Name string
|
||||
Type reflect.Type
|
||||
ColumnsSeq []string
|
||||
Columns map[string]*Column
|
||||
Indexes map[string]*Index
|
||||
PrimaryKey string
|
||||
Created map[string]bool
|
||||
Updated string
|
||||
Version string
|
||||
Cacher Cacher
|
||||
Name string
|
||||
Type reflect.Type
|
||||
ColumnsSeq []string
|
||||
Columns map[string]*Column
|
||||
Indexes map[string]*Index
|
||||
PrimaryKey string
|
||||
Created map[string]bool
|
||||
Updated string
|
||||
Version string
|
||||
Cacher Cacher
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -344,90 +344,90 @@ func NewTable(name string, t reflect.Type) *Table {
|
|||
|
||||
// if has primary key, return column
|
||||
func (table *Table) PKColumn() *Column {
|
||||
return table.Columns[table.PrimaryKey]
|
||||
return table.Columns[table.PrimaryKey]
|
||||
}
|
||||
|
||||
func (table *Table) VersionColumn() *Column {
|
||||
return table.Columns[table.Version]
|
||||
return table.Columns[table.Version]
|
||||
}
|
||||
|
||||
// add a column to table
|
||||
func (table *Table) AddColumn(col *Column) {
|
||||
table.ColumnsSeq = append(table.ColumnsSeq, col.Name)
|
||||
table.Columns[col.Name] = col
|
||||
if col.IsPrimaryKey {
|
||||
table.PrimaryKey = col.Name
|
||||
}
|
||||
if col.IsCreated {
|
||||
table.Created[col.Name] = true
|
||||
}
|
||||
if col.IsUpdated {
|
||||
table.Updated = col.Name
|
||||
}
|
||||
if col.IsVersion {
|
||||
table.Version = col.Name
|
||||
}
|
||||
table.ColumnsSeq = append(table.ColumnsSeq, col.Name)
|
||||
table.Columns[col.Name] = col
|
||||
if col.IsPrimaryKey {
|
||||
table.PrimaryKey = col.Name
|
||||
}
|
||||
if col.IsCreated {
|
||||
table.Created[col.Name] = true
|
||||
}
|
||||
if col.IsUpdated {
|
||||
table.Updated = col.Name
|
||||
}
|
||||
if col.IsVersion {
|
||||
table.Version = col.Name
|
||||
}
|
||||
}
|
||||
|
||||
// add an index or an unique to table
|
||||
func (table *Table) AddIndex(index *Index) {
|
||||
table.Indexes[index.Name] = index
|
||||
table.Indexes[index.Name] = index
|
||||
}
|
||||
|
||||
func (table *Table) genCols(session *Session, bean interface{}, useCol bool, includeQuote bool) ([]string, []interface{}, error) {
|
||||
colNames := make([]string, 0)
|
||||
args := make([]interface{}, 0)
|
||||
colNames := make([]string, 0)
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
for _, col := range table.Columns {
|
||||
if useCol && !col.IsVersion && !col.IsCreated && !col.IsUpdated {
|
||||
if _, ok := session.Statement.columnMap[col.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if col.MapType == ONLYFROMDB {
|
||||
continue
|
||||
}
|
||||
for _, col := range table.Columns {
|
||||
if useCol && !col.IsVersion && !col.IsCreated && !col.IsUpdated {
|
||||
if _, ok := session.Statement.columnMap[col.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if col.MapType == ONLYFROMDB {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldValue := col.ValueOf(bean)
|
||||
if col.IsAutoIncrement && fieldValue.Int() == 0 {
|
||||
continue
|
||||
}
|
||||
fieldValue := col.ValueOf(bean)
|
||||
if col.IsAutoIncrement && fieldValue.Int() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if session.Statement.ColumnStr != "" {
|
||||
if _, ok := session.Statement.columnMap[col.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if session.Statement.OmitStr != "" {
|
||||
if _, ok := session.Statement.columnMap[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if session.Statement.ColumnStr != "" {
|
||||
if _, ok := session.Statement.columnMap[col.Name]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if session.Statement.OmitStr != "" {
|
||||
if _, ok := session.Statement.columnMap[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (col.IsCreated || col.IsUpdated) && session.Statement.UseAutoTime {
|
||||
args = append(args, time.Now())
|
||||
} else if col.IsVersion && session.Statement.checkVersion {
|
||||
args = append(args, 1)
|
||||
} else {
|
||||
arg, err := session.value2Interface(col, fieldValue)
|
||||
if err != nil {
|
||||
return colNames, args, err
|
||||
}
|
||||
args = append(args, arg)
|
||||
}
|
||||
if (col.IsCreated || col.IsUpdated) && session.Statement.UseAutoTime {
|
||||
args = append(args, time.Now())
|
||||
} else if col.IsVersion && session.Statement.checkVersion {
|
||||
args = append(args, 1)
|
||||
} else {
|
||||
arg, err := session.value2Interface(col, fieldValue)
|
||||
if err != nil {
|
||||
return colNames, args, err
|
||||
}
|
||||
args = append(args, arg)
|
||||
}
|
||||
|
||||
if includeQuote {
|
||||
colNames = append(colNames, session.Engine.Quote(col.Name)+" = ?")
|
||||
} else {
|
||||
colNames = append(colNames, col.Name)
|
||||
}
|
||||
}
|
||||
return colNames, args, nil
|
||||
if includeQuote {
|
||||
colNames = append(colNames, session.Engine.Quote(col.Name)+" = ?")
|
||||
} else {
|
||||
colNames = append(colNames, col.Name)
|
||||
}
|
||||
}
|
||||
return colNames, args, nil
|
||||
}
|
||||
|
||||
// Conversion is an interface. A type implements Conversion will according
|
||||
// the custom method to fill into database and retrieve from database.
|
||||
type Conversion interface {
|
||||
FromDB([]byte) error
|
||||
ToDB() ([]byte, error)
|
||||
FromDB([]byte) error
|
||||
ToDB() ([]byte, error)
|
||||
}
|
||||
|
|
78
xorm.go
78
xorm.go
|
@ -1,58 +1,58 @@
|
|||
package xorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
version string = "0.2.3"
|
||||
version string = "0.2.3"
|
||||
)
|
||||
|
||||
func close(engine *Engine) {
|
||||
engine.Close()
|
||||
engine.Close()
|
||||
}
|
||||
|
||||
// new a db manager according to the parameter. Currently support four
|
||||
// drivers
|
||||
func NewEngine(driverName string, dataSourceName string) (*Engine, error) {
|
||||
engine := &Engine{DriverName: driverName,
|
||||
DataSourceName: dataSourceName, Filters: make([]Filter, 0)}
|
||||
engine.SetMapper(SnakeMapper{})
|
||||
engine := &Engine{DriverName: driverName,
|
||||
DataSourceName: dataSourceName, Filters: make([]Filter, 0)}
|
||||
engine.SetMapper(SnakeMapper{})
|
||||
|
||||
if driverName == SQLITE {
|
||||
engine.dialect = &sqlite3{}
|
||||
} else if driverName == MYSQL {
|
||||
engine.dialect = &mysql{}
|
||||
} else if driverName == POSTGRES {
|
||||
engine.dialect = &postgres{}
|
||||
engine.Filters = append(engine.Filters, &PgSeqFilter{})
|
||||
engine.Filters = append(engine.Filters, &QuoteFilter{})
|
||||
} else if driverName == MYMYSQL {
|
||||
engine.dialect = &mymysql{}
|
||||
} else {
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported driver name: %v", driverName))
|
||||
}
|
||||
err := engine.dialect.Init(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if driverName == SQLITE {
|
||||
engine.dialect = &sqlite3{}
|
||||
} else if driverName == MYSQL {
|
||||
engine.dialect = &mysql{}
|
||||
} else if driverName == POSTGRES {
|
||||
engine.dialect = &postgres{}
|
||||
engine.Filters = append(engine.Filters, &PgSeqFilter{})
|
||||
engine.Filters = append(engine.Filters, &QuoteFilter{})
|
||||
} else if driverName == MYMYSQL {
|
||||
engine.dialect = &mymysql{}
|
||||
} else {
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported driver name: %v", driverName))
|
||||
}
|
||||
err := engine.dialect.Init(driverName, dataSourceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
engine.Tables = make(map[reflect.Type]*Table)
|
||||
engine.mutex = &sync.Mutex{}
|
||||
engine.TagIdentifier = "xorm"
|
||||
engine.Tables = make(map[reflect.Type]*Table)
|
||||
engine.mutex = &sync.Mutex{}
|
||||
engine.TagIdentifier = "xorm"
|
||||
|
||||
engine.Filters = append(engine.Filters, &IdFilter{})
|
||||
engine.Logger = os.Stdout
|
||||
engine.Filters = append(engine.Filters, &IdFilter{})
|
||||
engine.Logger = os.Stdout
|
||||
|
||||
//engine.Pool = NewSimpleConnectPool()
|
||||
//engine.Pool = NewNoneConnectPool()
|
||||
//engine.Cacher = NewLRUCacher()
|
||||
err = engine.SetPool(NewSysConnectPool())
|
||||
runtime.SetFinalizer(engine, close)
|
||||
return engine, err
|
||||
//engine.Pool = NewSimpleConnectPool()
|
||||
//engine.Pool = NewNoneConnectPool()
|
||||
//engine.Cacher = NewLRUCacher()
|
||||
err = engine.SetPool(NewSysConnectPool())
|
||||
runtime.SetFinalizer(engine, close)
|
||||
return engine, err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue