Merge pull request #37 from nashtsai/scrolling-rs

Scrolling rs
This commit is contained in:
lunny 2013-12-25 23:12:08 -08:00
commit 17d74617ea
5 changed files with 246 additions and 102 deletions

View File

@ -1873,6 +1873,27 @@ func testIterate(engine *Engine, t *testing.T) {
} }
} }
func testRows(engine *Engine, t *testing.T) {
rows, err := engine.Omit("is_man").Rows(new(Userinfo))
if err != nil {
t.Error(err)
panic(err)
}
defer rows.Close()
idx := 0
user := new(Userinfo)
for rows.Next() {
err = rows.Scan(user)
if err != nil {
t.Error(err)
panic(err)
}
fmt.Println(idx, "--", user)
idx++
}
}
type StrangeName struct { type StrangeName struct {
Id_t int64 `xorm:"pk autoincr"` Id_t int64 `xorm:"pk autoincr"`
Name string Name string
@ -3500,29 +3521,35 @@ func testNullValue(engine *Engine, t *testing.T) {
// t.Error(errors.New(fmt.Sprintf("inserted value unmatch: [%v]", *nullDataGet.Complex128Ptr))) // t.Error(errors.New(fmt.Sprintf("inserted value unmatch: [%v]", *nullDataGet.Complex128Ptr)))
// } // }
/*if (*nullDataGet.TimePtr).Unix() != (*nullDataUpdate.TimePtr).Unix() { // !nashtsai! skipped mymysql test due to driver will round up time caused inaccuracy comparison
// skipped postgres test due to postgres driver doesn't read time.Time's timzezone info when stored in the db
// mysql and sqlite3 seem have done this correctly by storing datatime in UTC timezone, I think postgres driver
// prefer using timestamp with timezone to sovle the issue
if engine.DriverName != POSTGRES && engine.DriverName != MYMYSQL {
if (*nullDataGet.TimePtr).Unix() != (*nullDataUpdate.TimePtr).Unix() {
t.Error(errors.New(fmt.Sprintf("inserted value unmatch: [%v]:[%v]", *nullDataGet.TimePtr, *nullDataUpdate.TimePtr))) t.Error(errors.New(fmt.Sprintf("inserted value unmatch: [%v]:[%v]", *nullDataGet.TimePtr, *nullDataUpdate.TimePtr)))
} else { } else {
// !nashtsai! mymysql driver will failed this test case, due the time is roundup to nearest second, I would considered this is a bug in mymysql driver // !nashtsai! mymysql driver will failed this test case, due the time is roundup to nearest second, I would considered this is a bug in mymysql driver
// inserted value unmatch: [2013-12-25 12:12:45 +0800 CST]:[2013-12-25 12:12:44.878903653 +0800 CST]
fmt.Printf("time value: [%v]:[%v]", *nullDataGet.TimePtr, *nullDataUpdate.TimePtr) fmt.Printf("time value: [%v]:[%v]", *nullDataGet.TimePtr, *nullDataUpdate.TimePtr)
fmt.Println() fmt.Println()
}*/ }
// -- }
// update to null values // update to null values
/*nullDataUpdate = NullData{} nullDataUpdate = NullData{}
cnt, err = engine.Id(nullData.Id).Update(&nullDataUpdate) cnt, err = engine.Id(nullData.Id).Cols("string_ptr").Update(&nullDataUpdate)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
panic(err) panic(err)
} else if cnt != 1 { } else if cnt != 1 {
t.Error(errors.New("update count == 0, how can this happen!?")) t.Error(errors.New("update count == 0, how can this happen!?"))
return return
}*/ }
// verify get values // verify get values
/*nullDataGet = NullData{} nullDataGet = NullData{}
has, err = engine.Id(nullData.Id).Get(&nullDataGet) has, err = engine.Id(nullData.Id).Get(&nullDataGet)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
@ -3538,7 +3565,7 @@ func testNullValue(engine *Engine, t *testing.T) {
if nullDataGet.StringPtr != nil { if nullDataGet.StringPtr != nil {
t.Error(errors.New(fmt.Sprintf("not null value: [%v]", *nullDataGet.StringPtr))) t.Error(errors.New(fmt.Sprintf("not null value: [%v]", *nullDataGet.StringPtr)))
} }
/*
if nullDataGet.StringPtr2 != nil { if nullDataGet.StringPtr2 != nil {
t.Error(errors.New(fmt.Sprintf("not null value: [%v]", *nullDataGet.StringPtr2))) t.Error(errors.New(fmt.Sprintf("not null value: [%v]", *nullDataGet.StringPtr2)))
} }
@ -3829,6 +3856,8 @@ func testAll2(engine *Engine, t *testing.T) {
testMetaInfo(engine, t) testMetaInfo(engine, t)
fmt.Println("-------------- testIterate --------------") fmt.Println("-------------- testIterate --------------")
testIterate(engine, t) testIterate(engine, t)
fmt.Println("-------------- testRows --------------")
testRows(engine, t)
fmt.Println("-------------- testStrangeName --------------") fmt.Println("-------------- testStrangeName --------------")
testStrangeName(engine, t) testStrangeName(engine, t)
fmt.Println("-------------- testVersion --------------") fmt.Println("-------------- testVersion --------------")

View File

@ -934,6 +934,13 @@ func (engine *Engine) Iterate(bean interface{}, fun IterFunc) error {
return session.Iterate(bean, fun) return session.Iterate(bean, fun)
} }
// Return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields
// are conditions.
func (engine *Engine) Rows(bean interface{}) (*Rows, error) {
session := engine.NewSession()
return session.Rows(bean)
}
// Count counts the records. bean's non-empty fields // Count counts the records. bean's non-empty fields
// are conditions. // are conditions.
func (engine *Engine) Count(bean interface{}) (int64, error) { func (engine *Engine) Count(bean interface{}) (int64, error) {

View File

@ -12,8 +12,6 @@ CREATE DATABASE IF NOT EXISTS xorm_test CHARACTER SET
utf8 COLLATE utf8_general_ci; utf8 COLLATE utf8_general_ci;
*/ */
var mysqlShowTestSql bool = true
func TestMysql(t *testing.T) { func TestMysql(t *testing.T) {
err := mysqlDdlImport() err := mysqlDdlImport()
if err != nil { if err != nil {
@ -27,10 +25,10 @@ func TestMysql(t *testing.T) {
t.Error(err) t.Error(err)
return return
} }
engine.ShowSQL = mysqlShowTestSql engine.ShowSQL = showTestSql
engine.ShowErr = mysqlShowTestSql engine.ShowErr = showTestSql
engine.ShowWarn = mysqlShowTestSql engine.ShowWarn = showTestSql
engine.ShowDebug = mysqlShowTestSql engine.ShowDebug = showTestSql
testAll(engine, t) testAll(engine, t)
testAll2(engine, t) testAll2(engine, t)
@ -51,10 +49,10 @@ func TestMysqlWithCache(t *testing.T) {
return return
} }
engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000)) engine.SetDefaultCacher(NewLRUCacher(NewMemoryStore(), 1000))
engine.ShowSQL = mysqlShowTestSql engine.ShowSQL = showTestSql
engine.ShowErr = mysqlShowTestSql engine.ShowErr = showTestSql
engine.ShowWarn = mysqlShowTestSql engine.ShowWarn = showTestSql
engine.ShowDebug = mysqlShowTestSql engine.ShowDebug = showTestSql
testAll(engine, t) testAll(engine, t)
testAll2(engine, t) testAll2(engine, t)
@ -69,10 +67,10 @@ func mysqlDdlImport() error {
if err != nil { if err != nil {
return err return err
} }
engine.ShowSQL = mysqlShowTestSql engine.ShowSQL = showTestSql
engine.ShowErr = mysqlShowTestSql engine.ShowErr = showTestSql
engine.ShowWarn = mysqlShowTestSql engine.ShowWarn = showTestSql
engine.ShowDebug = mysqlShowTestSql engine.ShowDebug = showTestSql
sqlResults, _ := engine.Import("tests/mysql_ddl.sql") sqlResults, _ := engine.Import("tests/mysql_ddl.sql")
engine.LogDebug("sql results: %v", sqlResults) engine.LogDebug("sql results: %v", sqlResults)

141
rows.go Normal file
View File

@ -0,0 +1,141 @@
package xorm
import (
"database/sql"
"fmt"
"reflect"
)
type Rows struct {
NoTypeCheck bool
session *Session
stmt *sql.Stmt
rows *sql.Rows
fields []string
beanType reflect.Type
lastError error
}
func newRows(session *Session, bean interface{}) (*Rows, error) {
rows := new(Rows)
rows.session = session
rows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type()
err := rows.session.newDb()
if err != nil {
return nil, err
}
defer rows.session.Statement.Init()
var sql string
var args []interface{}
rows.session.Statement.RefTable = rows.session.Engine.autoMap(bean)
if rows.session.Statement.RawSQL == "" {
sql, args = rows.session.Statement.genGetSql(bean)
} else {
sql = rows.session.Statement.RawSQL
args = rows.session.Statement.RawParams
}
for _, filter := range rows.session.Engine.Filters {
sql = filter.Do(sql, session)
}
rows.session.Engine.LogSQL(sql)
rows.session.Engine.LogSQL(args)
rows.stmt, err = rows.session.Db.Prepare(sql)
if err != nil {
rows.lastError = err
defer rows.Close()
return nil, err
}
rows.rows, err = rows.stmt.Query(args...)
if err != nil {
rows.lastError = err
defer rows.Close()
return nil, err
}
rows.fields, err = rows.rows.Columns()
if err != nil {
rows.lastError = err
defer rows.Close()
return nil, err
}
return rows, nil
}
// move cursor to next record, return false if end has reached
func (rows *Rows) Next() bool {
if rows.lastError == nil && rows.rows != nil {
hasNext := rows.rows.Next()
if !hasNext {
rows.lastError = sql.ErrNoRows
}
return hasNext
}
return false
}
// Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close.
func (rows *Rows) Err() error {
return rows.lastError
}
// scan row record to bean properties
func (rows *Rows) Scan(bean interface{}) error {
if rows.lastError != nil {
return rows.lastError
}
if !rows.NoTypeCheck && reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType {
return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType)
}
result, err := row2map(rows.rows, rows.fields) // !nashtsai! TODO remove row2map then scanMapIntoStruct conversation for better performance
if err == nil {
err = rows.session.scanMapIntoStruct(bean, result)
}
return err
}
// // Columns returns the column names. Columns returns an error if the rows are closed, or if the rows are from QueryRow and there was a deferred error.
// func (rows *Rows) Columns() ([]string, error) {
// if rows.lastError == nil && rows.rows != nil {
// return rows.rows.Columns()
// }
// return nil, rows.lastError
// }
// close session if session.IsAutoClose is true, and claimed any opened resources
func (rows *Rows) Close() error {
if rows.session.IsAutoClose {
defer rows.session.Close()
}
if rows.lastError == nil {
if rows.rows != nil {
rows.lastError = rows.rows.Close()
if rows.lastError != nil {
defer rows.stmt.Close()
return rows.lastError
}
}
if rows.stmt != nil {
rows.lastError = rows.stmt.Close()
}
} else {
if rows.stmt != nil {
defer rows.stmt.Close()
}
if rows.rows != nil {
defer rows.rows.Close()
}
}
return rows.lastError
}

View File

@ -24,9 +24,6 @@ type Session struct {
IsAutoClose bool IsAutoClose bool
// !nashtsai! storing these beans due to yet committed tx // !nashtsai! storing these beans due to yet committed tx
// afterInsertBeans []interface{}
// afterUpdateBeans []interface{}
// afterDeleteBeans []interface{}
afterInsertBeans map[interface{}]*[]func(interface{}) afterInsertBeans map[interface{}]*[]func(interface{})
afterUpdateBeans map[interface{}]*[]func(interface{}) afterUpdateBeans map[interface{}]*[]func(interface{})
afterDeleteBeans map[interface{}]*[]func(interface{}) afterDeleteBeans map[interface{}]*[]func(interface{})
@ -835,69 +832,38 @@ func (session *Session) cacheFind(t reflect.Type, sql string, rowsSlicePtr inter
// IterFunc only use by Iterate // IterFunc only use by Iterate
type IterFunc func(idx int, bean interface{}) error type IterFunc func(idx int, bean interface{}) error
// Return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields
// are conditions.
func (session *Session) Rows(bean interface{}) (*Rows, error) {
return newRows(session, bean)
}
// Iterate record by record handle records from table, condiBeans's non-empty fields // Iterate record by record handle records from table, condiBeans's non-empty fields
// are conditions. beans could be []Struct, []*Struct, map[int64]Struct // are conditions. beans could be []Struct, []*Struct, map[int64]Struct
// map[int64]*Struct // map[int64]*Struct
func (session *Session) Iterate(bean interface{}, fun IterFunc) error { func (session *Session) Iterate(bean interface{}, fun IterFunc) error {
err := session.newDb()
rows, err := session.Rows(bean)
if err != nil { if err != nil {
return err return err
}
defer session.Statement.Init()
if session.IsAutoClose {
defer session.Close()
}
var sql string
var args []interface{}
session.Statement.RefTable = session.Engine.autoMap(bean)
if session.Statement.RawSQL == "" {
sql, args = session.Statement.genGetSql(bean)
} else { } else {
sql = session.Statement.RawSQL
args = session.Statement.RawParams
}
for _, filter := range session.Engine.Filters {
sql = filter.Do(sql, session)
}
session.Engine.LogSQL(sql)
session.Engine.LogSQL(args)
s, err := session.Db.Prepare(sql)
if err != nil {
return err
}
defer s.Close()
rows, err := s.Query(args...)
if err != nil {
return err
}
defer rows.Close() defer rows.Close()
//b := reflect.New(iterator.beanType).Interface()
fields, err := rows.Columns()
if err != nil {
return err
}
t := reflect.Indirect(reflect.ValueOf(bean)).Type()
b := reflect.New(t).Interface()
i := 0 i := 0
for rows.Next() { for rows.Next() {
result, err := row2map(rows, fields) b := reflect.New(rows.beanType).Interface()
if err == nil { err = rows.Scan(b)
err = session.scanMapIntoStruct(b, result)
}
if err == nil {
err = fun(i, b)
i = i + 1
}
if err != nil { if err != nil {
return err return err
} }
err = fun(i, b)
if err != nil {
return err
}
i++
}
return err
} }
return nil return nil
} }
@ -1770,6 +1736,9 @@ func (session *Session) bytes2Value(col *Column, fieldValue *reflect.Value, data
return errors.New("arg " + key + " as int: " + err.Error()) return errors.New("arg " + key + " as int: " + err.Error())
} }
if x != 0 { if x != 0 {
// !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch
// however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne
// property to be fetched lazily
structInter := reflect.New(fieldValue.Type()) structInter := reflect.New(fieldValue.Type())
newsession := session.Engine.NewSession() newsession := session.Engine.NewSession()
defer newsession.Close() defer newsession.Close()