Merge branch 'master' into master

This commit is contained in:
raizen666 2021-07-15 04:56:52 +08:00
commit 7be7d168c1
25 changed files with 382 additions and 199 deletions

View File

@ -138,7 +138,7 @@ test: go-check
test-cockroach: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \
-conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \
-ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-cockroach\#%
test-cockroach\#%: go-check
@ -152,7 +152,7 @@ test-mssql: go-check
-conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \
-default_varchar=$(TEST_MSSQL_DEFAULT_VARCHAR) -default_char=$(TEST_MSSQL_DEFAULT_CHAR) \
-do_nvarchar_override_test=$(TEST_MSSQL_DO_NVARCHAR_OVERRIDE_TEST) \
-coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PNONY: test-mssql\#%
test-mssql\#%: go-check
@ -166,7 +166,7 @@ test-mssql\#%: go-check
test-mymysql: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
-conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \
-coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PNONY: test-mymysql\#%
test-mymysql\#%: go-check
@ -178,7 +178,7 @@ test-mymysql\#%: go-check
test-mysql: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \
-conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \
-coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-mysql\#%
test-mysql\#%: go-check
@ -190,7 +190,7 @@ test-mysql\#%: go-check
test-postgres: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \
-conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \
-quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-postgres\#%
test-postgres\#%: go-check
@ -201,12 +201,12 @@ test-postgres\#%: go-check
.PHONY: test-sqlite3
test-sqlite3: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-sqlite3-schema
test-sqlite3-schema: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite3.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-sqlite3\#%
test-sqlite3\#%: go-check
@ -216,12 +216,12 @@ test-sqlite3\#%: go-check
.PHONY: test-sqlite
test-sqlite: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-sqlite-schema
test-sqlite-schema: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite -conn_str="./test.db?cache=shared&mode=rwc" \
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-sqlite\#%
test-sqlite\#%: go-check
@ -233,7 +233,7 @@ test-sqlite\#%: go-check
test-tidb: go-check
$(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \
-conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \
-quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic
-quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic -timeout=20m
.PHONY: test-tidb\#%
test-tidb\#%: go-check

View File

@ -348,7 +348,6 @@ func convertAssign(dest, src interface{}, originalLocation *time.Location, conve
*d = cloneBytes(s)
return nil
}
case time.Time:
switch d := dest.(type) {
case *string:

View File

@ -19,7 +19,14 @@ func String2Time(s string, originalLocation *time.Location, convertedLocation *t
dt = dt.In(convertedLocation)
return &dt, nil
} else if len(s) == 20 && s[10] == 'T' && s[19] == 'Z' {
dt, err := time.ParseInLocation("2006-01-02T15:04:05Z", s, originalLocation)
dt, err := time.ParseInLocation(time.RFC3339, s, originalLocation)
if err != nil {
return nil, err
}
dt = dt.In(convertedLocation)
return &dt, nil
} else if len(s) == 25 && s[10] == 'T' && s[19] == '+' && s[22] == ':' {
dt, err := time.Parse(time.RFC3339, s)
if err != nil {
return nil, err
}

30
convert/time_test.go Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2021 The Xorm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package convert
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestString2Time(t *testing.T) {
expectedLoc, err := time.LoadLocation("Asia/Shanghai")
assert.NoError(t, err)
var kases = map[string]time.Time{
"2021-06-06T22:58:20+08:00": time.Date(2021, 6, 6, 22, 58, 20, 0, expectedLoc),
"2021-07-11 10:44:00": time.Date(2021, 7, 11, 18, 44, 0, 0, expectedLoc),
"2021-08-10T10:33:04Z": time.Date(2021, 8, 10, 18, 33, 04, 0, expectedLoc),
}
for layout, tm := range kases {
t.Run(layout, func(t *testing.T) {
target, err := String2Time(layout, time.UTC, expectedLoc)
assert.NoError(t, err)
assert.EqualValues(t, tm, *target)
})
}
}

View File

@ -42,10 +42,11 @@ func (uri *URI) SetSchema(schema string) {
type Dialect interface {
Init(*URI) error
URI() *URI
Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error)
SQLType(*schemas.Column) string
Alias(string) string // return what a sql type's alias of
FormatBytes(b []byte) string
Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error)
ColumnTypeKind(string) int // database column type kind
IsReserved(string) bool
Quoter() schemas.Quoter
@ -102,11 +103,6 @@ func (db *Base) URI() *URI {
return db.uri
}
// FormatBytes formats bytes
func (db *Base) FormatBytes(bs []byte) string {
return fmt.Sprintf("0x%x", bs)
}
// DropTableSQL returns drop table SQL
func (db *Base) DropTableSQL(tableName string) (string, bool) {
quote := db.dialect.Quoter().Quote

View File

@ -364,6 +364,19 @@ func (db *mssql) SQLType(c *schemas.Column) string {
return res
}
func (db *mssql) ColumnTypeKind(t string) int {
switch strings.ToUpper(t) {
case "DATE", "DATETIME", "DATETIME2", "TIME":
return schemas.TIME_TYPE
case "VARCHAR", "TEXT", "CHAR", "NVARCHAR", "NCHAR", "NTEXT":
return schemas.TEXT_TYPE
case "FLOAT", "REAL", "BIGINT", "DATETIMEOFFSET", "TINYINT", "SMALLINT", "INT":
return schemas.NUMERIC_TYPE
default:
return schemas.UNKNOW_TYPE
}
}
func (db *mssql) IsReserved(name string) bool {
_, ok := mssqlReservedWords[strings.ToUpper(name)]
return ok

View File

@ -337,6 +337,21 @@ func (db *mysql) SQLType(c *schemas.Column) string {
return res
}
func (db *mysql) ColumnTypeKind(t string) int {
switch strings.ToUpper(t) {
case "DATETIME":
return schemas.TIME_TYPE
case "CHAR", "VARCHAR", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT", "ENUM", "SET":
return schemas.TEXT_TYPE
case "BIGINT", "TINYINT", "SMALLINT", "MEDIUMINT", "INT", "FLOAT", "REAL", "DOUBLE PRECISION", "DECIMAL", "NUMERIC", "BIT":
return schemas.NUMERIC_TYPE
case "BINARY", "VARBINARY", "TINYBLOB", "BLOB", "MEDIUMBLOB", "LONGBLOB":
return schemas.BLOB_TYPE
default:
return schemas.UNKNOW_TYPE
}
}
func (db *mysql) IsReserved(name string) bool {
_, ok := mysqlReservedWords[strings.ToUpper(name)]
return ok

View File

@ -568,6 +568,21 @@ func (db *oracle) SQLType(c *schemas.Column) string {
return res
}
func (db *oracle) ColumnTypeKind(t string) int {
switch strings.ToUpper(t) {
case "DATE":
return schemas.TIME_TYPE
case "CHAR", "NCHAR", "VARCHAR", "VARCHAR2", "NVARCHAR2", "LONG", "CLOB", "NCLOB":
return schemas.TEXT_TYPE
case "NUMBER":
return schemas.NUMERIC_TYPE
case "BLOB":
return schemas.BLOB_TYPE
default:
return schemas.UNKNOW_TYPE
}
}
func (db *oracle) AutoIncrStr() string {
return "AUTO_INCREMENT"
}

View File

@ -873,11 +873,6 @@ func (db *postgres) SetQuotePolicy(quotePolicy QuotePolicy) {
}
}
// FormatBytes formats bytes
func (db *postgres) FormatBytes(bs []byte) string {
return fmt.Sprintf("E'\\x%x'", bs)
}
func (db *postgres) SQLType(c *schemas.Column) string {
var res string
switch t := c.SQLType.Name; t {
@ -943,6 +938,21 @@ func (db *postgres) SQLType(c *schemas.Column) string {
return res
}
func (db *postgres) ColumnTypeKind(t string) int {
switch strings.ToUpper(t) {
case "DATETIME", "TIMESTAMP":
return schemas.TIME_TYPE
case "VARCHAR", "TEXT":
return schemas.TEXT_TYPE
case "BIGINT", "BIGSERIAL", "SMALLINT", "INT", "INT8", "INT4", "INTEGER", "SERIAL", "FLOAT", "FLOAT4", "REAL", "DOUBLE PRECISION":
return schemas.NUMERIC_TYPE
case "BOOL":
return schemas.BOOL_TYPE
default:
return schemas.UNKNOW_TYPE
}
}
func (db *postgres) IsReserved(name string) bool {
_, ok := postgresReservedWords[strings.ToUpper(name)]
return ok

View File

@ -233,8 +233,19 @@ func (db *sqlite3) SQLType(c *schemas.Column) string {
}
}
func (db *sqlite3) FormatBytes(bs []byte) string {
return fmt.Sprintf("X'%x'", bs)
func (db *sqlite3) ColumnTypeKind(t string) int {
switch strings.ToUpper(t) {
case "DATETIME":
return schemas.TIME_TYPE
case "TEXT":
return schemas.TEXT_TYPE
case "INTEGER", "REAL", "NUMERIC", "DECIMAL":
return schemas.NUMERIC_TYPE
case "BLOB":
return schemas.BLOB_TYPE
default:
return schemas.UNKNOW_TYPE
}
}
func (db *sqlite3) IsReserved(name string) bool {

180
engine.go
View File

@ -13,7 +13,6 @@ import (
"os"
"reflect"
"runtime"
"strconv"
"strings"
"time"
@ -21,7 +20,6 @@ import (
"xorm.io/xorm/contexts"
"xorm.io/xorm/core"
"xorm.io/xorm/dialects"
"xorm.io/xorm/internal/json"
"xorm.io/xorm/internal/utils"
"xorm.io/xorm/log"
"xorm.io/xorm/names"
@ -446,93 +444,14 @@ func (engine *Engine) DumpTables(tables []*schemas.Table, w io.Writer, tp ...sch
return engine.dumpTables(tables, w, tp...)
}
func formatColumnValue(dbLocation *time.Location, dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string {
if d == nil {
return "NULL"
}
if dq, ok := d.(bool); ok && (dstDialect.URI().DBType == schemas.SQLITE ||
dstDialect.URI().DBType == schemas.MSSQL) {
if dq {
func formatBool(s string, dstDialect dialects.Dialect) string {
if dstDialect.URI().DBType == schemas.MSSQL {
switch s {
case "true":
return "1"
}
case "false":
return "0"
}
if col.SQLType.IsText() {
var v string
switch reflect.TypeOf(d).Kind() {
case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
bytes, err := json.DefaultJSONHandler.Marshal(d)
if err != nil {
v = fmt.Sprintf("%s", d)
} else {
v = string(bytes)
}
default:
v = fmt.Sprintf("%s", d)
}
return "'" + strings.Replace(v, "'", "''", -1) + "'"
} else if col.SQLType.IsTime() {
if t, ok := d.(time.Time); ok {
return "'" + t.In(dbLocation).Format("2006-01-02 15:04:05") + "'"
}
var v = fmt.Sprintf("%s", d)
if strings.HasSuffix(v, " +0000 UTC") {
return fmt.Sprintf("'%s'", v[0:len(v)-len(" +0000 UTC")])
} else if strings.HasSuffix(v, " +0000 +0000") {
return fmt.Sprintf("'%s'", v[0:len(v)-len(" +0000 +0000")])
}
return "'" + strings.Replace(v, "'", "''", -1) + "'"
} else if col.SQLType.IsBlob() {
if reflect.TypeOf(d).Kind() == reflect.Slice {
return fmt.Sprintf("%s", dstDialect.FormatBytes(d.([]byte)))
} else if reflect.TypeOf(d).Kind() == reflect.String {
return fmt.Sprintf("'%s'", d.(string))
}
} else if col.SQLType.IsNumeric() {
switch reflect.TypeOf(d).Kind() {
case reflect.Slice:
if col.SQLType.Name == schemas.Bool {
return fmt.Sprintf("%v", strconv.FormatBool(d.([]byte)[0] != byte('0')))
}
return fmt.Sprintf("%s", string(d.([]byte)))
case reflect.Int16, reflect.Int8, reflect.Int32, reflect.Int64, reflect.Int:
if col.SQLType.Name == schemas.Bool {
v := reflect.ValueOf(d).Int() > 0
if dstDialect.URI().DBType == schemas.SQLITE {
if v {
return "1"
}
return "0"
}
return fmt.Sprintf("%v", strconv.FormatBool(v))
}
return fmt.Sprintf("%d", d)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if col.SQLType.Name == schemas.Bool {
v := reflect.ValueOf(d).Uint() > 0
if dstDialect.URI().DBType == schemas.SQLITE {
if v {
return "1"
}
return "0"
}
return fmt.Sprintf("%v", strconv.FormatBool(v))
}
return fmt.Sprintf("%d", d)
default:
return fmt.Sprintf("%v", d)
}
}
s := fmt.Sprintf("%v", d)
if strings.Contains(s, ":") || strings.Contains(s, "-") {
if strings.HasSuffix(s, " +0000 UTC") {
return fmt.Sprintf("'%s'", s[0:len(s)-len(" +0000 UTC")])
}
return fmt.Sprintf("'%s'", s)
}
return s
}
@ -545,7 +464,7 @@ func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...sch
} else {
dstDialect = dialects.QueryDialect(tp[0])
if dstDialect == nil {
return errors.New("Unsupported database type")
return fmt.Errorf("unsupported database type %v", tp[0])
}
uri := engine.dialect.URI()
@ -619,75 +538,70 @@ func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...sch
}
defer rows.Close()
if table.Type != nil {
types, err := rows.ColumnTypes()
if err != nil {
return err
}
sess := engine.NewSession()
defer sess.Close()
for rows.Next() {
beanValue := reflect.New(table.Type)
bean := beanValue.Interface()
fields, err := rows.Columns()
if err != nil {
return err
}
scanResults, err := sess.row2Slice(rows, fields, bean)
if err != nil {
return err
}
dataStruct := utils.ReflectValue(bean)
_, err = sess.slice2Bean(scanResults, fields, bean, &dataStruct, table)
if err != nil {
return err
}
_, err = io.WriteString(w, "INSERT INTO "+dstDialect.Quoter().Quote(dstTableName)+" ("+destColNames+") VALUES (")
if err != nil {
return err
}
var temp string
for _, d := range dstCols {
col := table.GetColumn(d)
if col == nil {
return errors.New("unknown column error")
}
field := dataStruct.FieldByIndex(col.FieldIndex)
temp += "," + formatColumnValue(engine.DatabaseTZ, dstDialect, field.Interface(), col)
}
_, err = io.WriteString(w, temp[1:]+");\n")
scanResults, err := sess.engine.scanStringInterface(rows, types)
if err != nil {
return err
}
for i, scanResult := range scanResults {
stp := schemas.SQLType{Name: types[i].DatabaseTypeName()}
if stp.IsNumeric() {
s := scanResult.(*sql.NullString)
if s.Valid {
if _, err = io.WriteString(w, formatBool(s.String, dstDialect)); err != nil {
return err
}
} else {
if _, err = io.WriteString(w, "NULL"); err != nil {
return err
}
}
} else if stp.IsBool() {
s := scanResult.(*sql.NullString)
if s.Valid {
if _, err = io.WriteString(w, formatBool(s.String, dstDialect)); err != nil {
return err
}
} else {
if _, err = io.WriteString(w, "NULL"); err != nil {
return err
}
}
} else {
for rows.Next() {
dest := make([]interface{}, len(cols))
err = rows.ScanSlice(&dest)
if err != nil {
s := scanResult.(*sql.NullString)
if s.Valid {
if _, err = io.WriteString(w, "'"+strings.ReplaceAll(s.String, "'", "''")+"'"); err != nil {
return err
}
_, err = io.WriteString(w, "INSERT INTO "+dstDialect.Quoter().Quote(dstTableName)+" ("+destColNames+") VALUES (")
if err != nil {
} else {
if _, err = io.WriteString(w, "NULL"); err != nil {
return err
}
var temp string
for i, d := range dest {
col := table.GetColumn(cols[i])
if col == nil {
return errors.New("unknow column error")
}
temp += "," + formatColumnValue(engine.DatabaseTZ, dstDialect, d, col)
}
_, err = io.WriteString(w, temp[1:]+");\n")
if err != nil {
if i < len(scanResults)-1 {
if _, err = io.WriteString(w, ","); err != nil {
return err
}
}
}
_, err = io.WriteString(w, ");\n")
if err != nil {
return err
}
}
// FIXME: Hack for postgres
if dstDialect.URI().DBType == schemas.POSTGRES && table.AutoIncrColumn() != nil {

View File

@ -172,8 +172,21 @@ func TestDumpTables(t *testing.T) {
name := fmt.Sprintf("dump_%v-table.sql", tp)
t.Run(name, func(t *testing.T) {
assert.NoError(t, testEngine.(*xorm.Engine).DumpTablesToFile([]*schemas.Table{tb}, name, tp))
})
}
assert.NoError(t, testEngine.DropTables(new(TestDumpTableStruct)))
importPath := fmt.Sprintf("dump_%v-table.sql", testEngine.Dialect().URI().DBType)
t.Run("import_"+importPath, func(t *testing.T) {
sess := testEngine.NewSession()
defer sess.Close()
assert.NoError(t, sess.Begin())
_, err = sess.ImportFile(importPath)
assert.NoError(t, err)
assert.NoError(t, sess.Commit())
})
}
func TestDumpTables2(t *testing.T) {

View File

@ -904,3 +904,24 @@ func TestGetDecimal(t *testing.T) {
//assert.True(t, m.Cmp(gf.Money) == 0, "%v != %v", m.String(), gf.Money.String())
}
}
func TestGetTime(t *testing.T) {
type GetTimeStruct struct {
Id int64
CreateTime time.Time
}
assert.NoError(t, PrepareEngine())
assertSync(t, new(GetTimeStruct))
var gts = GetTimeStruct{
CreateTime: time.Now(),
}
_, err := testEngine.Insert(&gts)
assert.NoError(t, err)
var gn time.Time
has, err := testEngine.Table("get_time_struct").Cols(colMapper.Obj2Table("CreateTime")).Get(&gn)
assert.NoError(t, err)
assert.True(t, has)
assert.EqualValues(t, gts.CreateTime.Format(time.RFC3339), gn.Format(time.RFC3339))
}

View File

@ -173,6 +173,16 @@ func TestUintId(t *testing.T) {
err = testEngine.CreateTables(&UintId{})
assert.NoError(t, err)
tables, err := testEngine.DBMetas()
assert.NoError(t, err)
assert.EqualValues(t, 1, len(tables))
cols := tables[0].PKColumns()
assert.EqualValues(t, 1, len(cols))
if testEngine.Dialect().URI().DBType == schemas.MYSQL {
assert.EqualValues(t, "UNSIGNED INT", cols[0].SQLType.Name)
}
cnt, err := testEngine.Insert(&UintId{Name: "test"})
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)

View File

@ -7,6 +7,7 @@ package integrations
import (
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
@ -35,3 +36,32 @@ func TestExecAndQuery(t *testing.T) {
assert.EqualValues(t, 1, id)
assert.Equal(t, "user", string(results[0]["name"]))
}
func TestExecTime(t *testing.T) {
assert.NoError(t, PrepareEngine())
type UserinfoExecTime struct {
Uid int
Name string
Created time.Time
}
assert.NoError(t, testEngine.Sync2(new(UserinfoExecTime)))
now := time.Now()
res, err := testEngine.Exec("INSERT INTO "+testEngine.TableName("`userinfo_exec_time`", true)+" (uid, name, created) VALUES (?, ?, ?)", 1, "user", now)
assert.NoError(t, err)
cnt, err := res.RowsAffected()
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)
results, err := testEngine.QueryString("SELECT * FROM " + testEngine.TableName("`userinfo_exec_time`", true))
assert.NoError(t, err)
assert.EqualValues(t, 1, len(results))
assert.EqualValues(t, now.In(testEngine.GetTZLocation()).Format("2006-01-02 15:04:05"), results[0]["created"])
var uet UserinfoExecTime
has, err := testEngine.Where("uid=?", 1).Get(&uet)
assert.NoError(t, err)
assert.True(t, has)
assert.EqualValues(t, now.In(testEngine.GetTZLocation()).Format("2006-01-02 15:04:05"), uet.Created.Format("2006-01-02 15:04:05"))
}

View File

@ -1396,15 +1396,22 @@ func TestNilFromDB(t *testing.T) {
assert.NoError(t, PrepareEngine())
assertSync(t, new(TestTable1))
cnt, err := testEngine.Insert(&TestTable1{
var tt0 = TestTable1{
Field1: &TestFieldType1{
cb: []byte("string"),
},
UpdateTime: time.Now(),
})
}
cnt, err := testEngine.Insert(&tt0)
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)
var tt1 TestTable1
has, err := testEngine.ID(tt0.Id).Get(&tt1)
assert.NoError(t, err)
assert.True(t, has)
assert.EqualValues(t, "string", string(tt1.Field1.cb))
cnt, err = testEngine.Update(TestTable1{
UpdateTime: time.Now().Add(time.Second),
}, TestTable1{
@ -1418,4 +1425,37 @@ func TestNilFromDB(t *testing.T) {
})
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)
var tt = TestTable1{
UpdateTime: time.Now(),
Field1: &TestFieldType1{
cb: nil,
},
}
cnt, err = testEngine.Insert(&tt)
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)
var tt2 TestTable1
has, err = testEngine.ID(tt.Id).Get(&tt2)
assert.NoError(t, err)
assert.True(t, has)
assert.Nil(t, tt2.Field1)
var tt3 = TestTable1{
UpdateTime: time.Now(),
Field1: &TestFieldType1{
cb: []byte{},
},
}
cnt, err = testEngine.Insert(&tt3)
assert.NoError(t, err)
assert.EqualValues(t, 1, cnt)
var tt4 TestTable1
has, err = testEngine.ID(tt3.Id).Get(&tt4)
assert.NoError(t, err)
assert.True(t, has)
assert.NotNil(t, tt4.Field1)
assert.NotNil(t, tt4.Field1.cb)
}

View File

@ -314,7 +314,7 @@ func (statement *Statement) genSelectSQL(columnStr string, needLimit, needOrderB
fmt.Fprint(&buf, " LIMIT ", *pLimitN)
}
} else if dialect.URI().DBType == schemas.ORACLE {
if statement.Start != 0 || pLimitN != nil {
if statement.Start != 0 && pLimitN != nil {
oldString := buf.String()
buf.Reset()
rawColStr := columnStr

View File

@ -942,16 +942,29 @@ func (statement *Statement) quoteColumnStr(columnStr string) string {
// ConvertSQLOrArgs converts sql or args
func (statement *Statement) ConvertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) {
sql, args, err := convertSQLOrArgs(sqlOrArgs...)
sql, args, err := statement.convertSQLOrArgs(sqlOrArgs...)
if err != nil {
return "", nil, err
}
return statement.ReplaceQuote(sql), args, nil
}
func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) {
func (statement *Statement) convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) {
switch sqlOrArgs[0].(type) {
case string:
if len(sqlOrArgs) > 1 {
var newArgs = make([]interface{}, 0, len(sqlOrArgs)-1)
for _, arg := range sqlOrArgs[1:] {
if v, ok := arg.(*time.Time); ok {
newArgs = append(newArgs, v.In(statement.defaultTimeZone).Format("2006-01-02 15:04:05"))
} else if v, ok := arg.(time.Time); ok {
newArgs = append(newArgs, v.In(statement.defaultTimeZone).Format("2006-01-02 15:04:05"))
} else {
newArgs = append(newArgs, arg)
}
}
return sqlOrArgs[0].(string), newArgs, nil
}
return sqlOrArgs[0].(string), sqlOrArgs[1:], nil
case *builder.Builder:
return sqlOrArgs[0].(*builder.Builder).ToSQL()

View File

@ -127,8 +127,9 @@ func (statement *Statement) BuildUpdates(tableValue reflect.Value,
if err != nil {
return nil, nil, err
}
if data != nil {
val = data
}
goto APPEND
}
}
@ -138,8 +139,9 @@ func (statement *Statement) BuildUpdates(tableValue reflect.Value,
if err != nil {
return nil, nil, err
}
if data != nil {
val = data
}
goto APPEND
}

View File

@ -31,6 +31,12 @@ func (statement *Statement) Value2Interface(col *schemas.Column, fieldValue refl
if err != nil {
return nil, err
}
if data == nil {
if col.Nullable {
return nil, nil
}
data = []byte{}
}
if col.SQLType.IsBlob() {
return data, nil
}
@ -45,12 +51,15 @@ func (statement *Statement) Value2Interface(col *schemas.Column, fieldValue refl
if err != nil {
return nil, err
}
if data == nil {
if col.Nullable {
return nil, nil
}
data = []byte{}
}
if col.SQLType.IsBlob() {
return data, nil
}
if nil == data {
return nil, nil
}
return string(data), nil
}
}

28
scan.go
View File

@ -14,6 +14,7 @@ import (
"xorm.io/xorm/convert"
"xorm.io/xorm/core"
"xorm.io/xorm/dialects"
"xorm.io/xorm/schemas"
)
// genScanResultsByBeanNullabale generates scan result
@ -22,7 +23,9 @@ func genScanResultsByBeanNullable(bean interface{}) (interface{}, bool, error) {
case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString, *sql.RawBytes:
return t, false, nil
case *time.Time:
return &sql.NullTime{}, true, nil
return &sql.NullString{}, true, nil
case *sql.NullTime:
return &sql.NullString{}, true, nil
case *string:
return &sql.NullString{}, true, nil
case *int, *int8, *int16, *int32:
@ -75,8 +78,8 @@ func genScanResultsByBean(bean interface{}) (interface{}, bool, error) {
*float32, *float64,
*bool:
return t, false, nil
case *time.Time:
return &sql.NullTime{}, true, nil
case *time.Time, *sql.NullTime:
return &sql.NullString{}, true, nil
case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString,
time.Time,
string,
@ -121,7 +124,7 @@ func genScanResultsByBean(bean interface{}) (interface{}, bool, error) {
}
}
func row2mapStr(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string]string, error) {
func (engine *Engine) row2mapStr(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string]string, error) {
var scanResults = make([]interface{}, len(fields))
for i := 0; i < len(fields); i++ {
var s sql.NullString
@ -133,10 +136,23 @@ func row2mapStr(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[
}
result := make(map[string]string, len(fields))
for ii, key := range fields {
s := scanResults[ii].(*sql.NullString)
for i, key := range fields {
s := scanResults[i].(*sql.NullString)
if s.String == "" {
result[key] = ""
continue
}
if schemas.TIME_TYPE == engine.dialect.ColumnTypeKind(types[i].DatabaseTypeName()) {
t, err := convert.String2Time(s.String, engine.DatabaseTZ, engine.TZLocation)
if err != nil {
return nil, err
}
result[key] = t.Format("2006-01-02 15:04:05")
} else {
result[key] = s.String
}
}
return result, nil
}

View File

@ -39,6 +39,7 @@ const (
TIME_TYPE
NUMERIC_TYPE
ARRAY_TYPE
BOOL_TYPE
)
// IsType reutrns ture if the column type is the same as the parameter
@ -64,6 +65,10 @@ func (s *SQLType) IsTime() bool {
return s.IsType(TIME_TYPE)
}
func (s *SQLType) IsBool() bool {
return s.IsType(BOOL_TYPE)
}
// IsNumeric returns true if column is a numeric type
func (s *SQLType) IsNumeric() bool {
return s.IsType(NUMERIC_TYPE)
@ -209,7 +214,8 @@ var (
Bytea: BLOB_TYPE,
UniqueIdentifier: BLOB_TYPE,
Bool: NUMERIC_TYPE,
Bool: BOOL_TYPE,
Boolean: BOOL_TYPE,
Serial: NUMERIC_TYPE,
BigSerial: NUMERIC_TYPE,

View File

@ -768,7 +768,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b
fieldValue, err := session.getField(dataStruct, key, table, idx)
if err != nil {
if !strings.Contains(err.Error(), "is not valid") {
if _, ok := err.(ErrFieldIsNotValid); !ok {
session.engine.logger.Warnf("%v", err)
}
continue

View File

@ -35,6 +35,19 @@ func (session *Session) Get(bean interface{}) (bool, error) {
return session.get(bean)
}
func isPtrOfTime(v interface{}) bool {
if _, ok := v.(*time.Time); ok {
return true
}
el := reflect.ValueOf(v).Elem()
if el.Kind() != reflect.Struct {
return false
}
return el.Type().ConvertibleTo(schemas.TimeType)
}
func (session *Session) get(bean interface{}) (bool, error) {
defer session.resetStatement()
@ -51,7 +64,7 @@ func (session *Session) get(bean interface{}) (bool, error) {
return false, ErrObjectIsNil
}
if beanValue.Elem().Kind() == reflect.Struct {
if beanValue.Elem().Kind() == reflect.Struct && !isPtrOfTime(bean) {
if err := session.statement.SetRefBean(bean); err != nil {
return false, err
}

View File

@ -33,7 +33,7 @@ func (session *Session) rows2Strings(rows *core.Rows) (resultsSlice []map[string
}
for rows.Next() {
result, err := row2mapStr(rows, types, fields)
result, err := session.engine.row2mapStr(rows, types, fields)
if err != nil {
return nil, err
}