mvdan.cc/xurls/v2 v2.2.0
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
xorm.io/builder v0.3.9
- xorm.io/xorm v1.1.0
+ xorm.io/xorm v1.1.1
)
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc=
xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
-xorm.io/xorm v1.1.0 h1:mkEsQXLauZajiOld2cB2PkFcUZKePepPgs1bC1dw8RA=
-xorm.io/xorm v1.1.0/go.mod h1:EDzNHMuCVZNszkIRSLL2nI0zX+nQE8RstAVranlSfqI=
+xorm.io/xorm v1.1.1 h1:cc1yot5rhoBucfk2lgZPZPEuI/9QsVvHuQpjI0wmcf8=
+xorm.io/xorm v1.1.1/go.mod h1:Cb0DKYTHbyECMaSfgRnIZp5aiUgQozxcJJ0vzcLGJSg=
# xorm.io/builder v0.3.9
## explicit
xorm.io/builder
-# xorm.io/xorm v1.1.0
+# xorm.io/xorm v1.1.1
## explicit
xorm.io/xorm
xorm.io/xorm/caches
---
kind: pipeline
-name: testing
+name: test-mysql
+environment:
+ GO111MODULE: "on"
+ GOPROXY: "https://goproxy.io"
+ CGO_ENABLED: 1
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-vet
image: golang:1.15
- environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
- commands:
- - make vet
- - make fmt-check
- volumes:
- - name: cache
- path: /go
- when:
- event:
- - push
- - pull_request
-
-- name: rebuild-cache
- image: meltwater/drone-cache
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
-volumes:
- - name: cache
- temp: {}
-
----
-kind: pipeline
-name: test-sqlite
-depends_on:
- - testing
-steps:
-- name: restore-cache
- image: meltwater/drone-cache:dev
pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
volumes:
- name: cache
- path: /go
-
+ path: /go/pkg/mod
+ commands:
+ - make vet
- name: test-sqlite3
image: golang:1.15
- environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
- commands:
- - make test-sqlite3
- - TEST_CACHE_ENABLE=true make test-sqlite3
- - TEST_QUOTE_POLICY=reserved make test-sqlite3
volumes:
- name: cache
- path: /go
-
-- name: test-sqlite
- image: golang:1.15
- environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
+ path: /go/pkg/mod
+ depends_on:
+ - test-vet
commands:
- - make test-sqlite
- - TEST_CACHE_ENABLE=true make test-sqlite
- - TEST_QUOTE_POLICY=reserved make test-sqlite
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
-volumes:
- - name: cache
- temp: {}
-
----
-kind: pipeline
-name: test-mysql
-depends_on:
- - testing
-steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
+ - make fmt-check
+ - make test
+ - make test-sqlite3
+ - TEST_CACHE_ENABLE=true make test-sqlite3
+ - TEST_QUOTE_POLICY=reserved make test-sqlite3
+ - make test-sqlite
+ - TEST_CACHE_ENABLE=true make test-sqlite
+ - TEST_QUOTE_POLICY=reserved make test-sqlite
- name: test-mysql
image: golang:1.15
+ pull: never
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
+ depends_on:
+ - test-vet
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_MYSQL_HOST: mysql
TEST_MYSQL_CHARSET: utf8
TEST_MYSQL_DBNAME: xorm_test
TEST_MYSQL_USERNAME: root
TEST_MYSQL_PASSWORD:
commands:
- - make test
- - make test-mysql
- TEST_CACHE_ENABLE=true make test-mysql
- - TEST_QUOTE_POLICY=reserved make test-mysql
- volumes:
- - name: cache
- path: /go
- name: test-mysql-utf8mb4
image: golang:1.15
+ pull: never
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
depends_on:
- test-mysql
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_MYSQL_HOST: mysql
TEST_MYSQL_CHARSET: utf8mb4
TEST_MYSQL_DBNAME: xorm_test
TEST_MYSQL_PASSWORD:
commands:
- make test-mysql
- - TEST_CACHE_ENABLE=true make test-mysql
- TEST_QUOTE_POLICY=reserved make test-mysql
- volumes:
- - name: cache
- path: /go
-
-- name: test-mymysql
- pull: default
- image: golang:1.15
- depends_on:
- - test-mysql-utf8mb4
- environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
- TEST_MYSQL_HOST: mysql:3306
- TEST_MYSQL_DBNAME: xorm_test
- TEST_MYSQL_USERNAME: root
- TEST_MYSQL_PASSWORD:
- commands:
- - make test-mymysql
- - TEST_CACHE_ENABLE=true make test-mymysql
- - TEST_QUOTE_POLICY=reserved make test-mymysql
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache
- depends_on:
- - test-mysql
- - test-mysql-utf8mb4
- - test-mymysql
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: mysql
- pull: default
image: mysql:5.7
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: yes
name: test-mysql8
depends_on:
- test-mysql
- - test-sqlite
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-mysql8
image: golang:1.15
+ pull: never
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_MYSQL_HOST: mysql8
TEST_MYSQL_CHARSET: utf8mb4
TEST_MYSQL_DBNAME: xorm_test
- make test-mysql
- TEST_CACHE_ENABLE=true make test-mysql
- TEST_QUOTE_POLICY=reserved make test-mysql
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- pull: true
- depends_on:
- - test-mysql8
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: mysql8
- pull: default
image: mysql:8.0
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: yes
name: test-mariadb
depends_on:
- test-mysql8
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-mariadb
image: golang:1.15
+ pull: never
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_MYSQL_HOST: mariadb
TEST_MYSQL_CHARSET: utf8mb4
TEST_MYSQL_DBNAME: xorm_test
- make test-mysql
- TEST_CACHE_ENABLE=true make test-mysql
- TEST_QUOTE_POLICY=reserved make test-mysql
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- depends_on:
- - test-mariadb
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: mariadb
- pull: default
image: mariadb:10.4
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: yes
name: test-postgres
depends_on:
- test-mariadb
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-postgres
- pull: default
+ pull: never
image: golang:1.15
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_PGSQL_HOST: pgsql
TEST_PGSQL_DBNAME: xorm_test
TEST_PGSQL_USERNAME: postgres
commands:
- make test-postgres
- TEST_CACHE_ENABLE=true make test-postgres
- - TEST_QUOTE_POLICY=reserved make test-postgres
- volumes:
- - name: cache
- path: /go
- name: test-postgres-schema
- pull: default
+ pull: never
image: golang:1.15
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
depends_on:
- test-postgres
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_PGSQL_HOST: pgsql
TEST_PGSQL_SCHEMA: xorm
TEST_PGSQL_DBNAME: xorm_test
TEST_PGSQL_USERNAME: postgres
TEST_PGSQL_PASSWORD: postgres
commands:
- - make test-postgres
- - TEST_CACHE_ENABLE=true make test-postgres
- TEST_QUOTE_POLICY=reserved make test-postgres
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- pull: true
- depends_on:
- - test-postgres-schema
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: pgsql
- pull: default
image: postgres:9.5
environment:
POSTGRES_DB: xorm_test
name: test-mssql
depends_on:
- test-postgres
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-mssql
- pull: default
+ pull: never
image: golang:1.15
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_MSSQL_HOST: mssql
TEST_MSSQL_DBNAME: xorm_test
TEST_MSSQL_USERNAME: sa
- TEST_CACHE_ENABLE=true make test-mssql
- TEST_QUOTE_POLICY=reserved make test-mssql
- TEST_MSSQL_DEFAULT_VARCHAR=NVARCHAR TEST_MSSQL_DEFAULT_CHAR=NCHAR make test-mssql
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: mssql
- pull: default
- image: microsoft/mssql-server-linux:latest
+ pull: always
+ image: mcr.microsoft.com/mssql/server:latest
environment:
ACCEPT_EULA: Y
SA_PASSWORD: yourStrong(!)Password
- MSSQL_PID: Developer
+ MSSQL_PID: Standard
---
kind: pipeline
name: test-tidb
depends_on:
- test-mssql
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-tidb
- pull: default
+ pull: never
image: golang:1.15
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_TIDB_HOST: "tidb:4000"
TEST_TIDB_DBNAME: xorm_test
TEST_TIDB_USERNAME: root
- make test-tidb
- TEST_CACHE_ENABLE=true make test-tidb
- TEST_QUOTE_POLICY=reserved make test-tidb
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: tidb
- pull: default
image: pingcap/tidb:v3.0.3
---
name: test-cockroach
depends_on:
- test-tidb
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
-- name: restore-cache
- image: meltwater/drone-cache
- pull: always
- settings:
- backend: "filesystem"
- restore: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
-
- name: test-cockroach
- pull: default
+ pull: never
image: golang:1.15
+ volumes:
+ - name: cache
+ path: /go/pkg/mod
environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
- CGO_ENABLED: 1
- GOMODCACHE: '/drone/src/pkg.mod'
- GOCACHE: '/drone/src/pkg.build'
TEST_COCKROACH_HOST: "cockroach:26257"
TEST_COCKROACH_DBNAME: xorm_test
TEST_COCKROACH_USERNAME: root
- sleep 10
- make test-cockroach
- TEST_CACHE_ENABLE=true make test-cockroach
- volumes:
- - name: cache
- path: /go
-
-- name: rebuild-cache
- image: meltwater/drone-cache:dev
- pull: true
- settings:
- backend: "filesystem"
- rebuild: true
- cache_key: '{{ .Repo.Name }}_{{ checksum "go.mod" }}_{{ checksum "go.sum" }}_{{ arch }}_{{ os }}'
- archive_format: "gzip"
- filesystem_cache_root: "/go"
- mount:
- - pkg.mod
- - pkg.build
- volumes:
- - name: cache
- path: /go
volumes:
- - name: cache
- temp: {}
+- name: cache
+ host:
+ path: /tmp/cache
services:
- name: cockroach
- pull: default
image: cockroachdb/cockroach:v19.2.4
commands:
- /cockroach/cockroach start --insecure
kind: pipeline
name: merge_coverage
depends_on:
- - testing
- - test-sqlite
- test-mysql
- test-mysql8
- test-mariadb
- test-mssql
- test-tidb
- test-cockroach
+trigger:
+ ref:
+ - refs/heads/master
+ - refs/pull/*/head
steps:
- name: merge_coverage
- pull: default
image: golang:1.15
- environment:
- GO111MODULE: "on"
- GOPROXY: "https://goproxy.io"
commands:
- make coverage
- when:
- branch:
- - master
- event:
- - push
- - pull_request
*coverage.out
test.db
integrations/*.sql
-integrations/test_sqlite*
\ No newline at end of file
+integrations/test_sqlite*
+cover.out
\ No newline at end of file
[rule.context-as-argument]
[rule.context-keys-type]
[rule.dot-imports]
+[rule.empty-lines]
+[rule.errorf]
[rule.error-return]
[rule.error-strings]
[rule.error-naming]
[rule.exported]
[rule.if-return]
[rule.increment-decrement]
-[rule.var-naming]
- arguments = [["ID", "UID", "UUID", "URL", "JSON"], []]
-[rule.var-declaration]
+[rule.indent-error-flow]
[rule.package-comments]
[rule.range]
[rule.receiver-naming]
+[rule.struct-tag]
[rule.time-naming]
[rule.unexported-return]
-[rule.indent-error-flow]
-[rule.errorf]
-[rule.struct-tag]
\ No newline at end of file
+[rule.unnecessary-stmt]
+[rule.var-declaration]
+[rule.var-naming]
+ arguments = [["ID", "UID", "UUID", "URL", "JSON"], []]
\ No newline at end of file
This changelog goes through all the changes that have been made in each release
without substantial changes to our git log.
+## [1.1.1](https://gitea.com/xorm/xorm/releases/tag/1.1.1) - 2021-07-03
+
+* BUGFIXES
+ * Ignore comments when deciding when to replace question marks. #1954 (#1955)
+ * Fix bug didn't reset statement on update (#1939)
+ * Fix create table with struct missing columns (#1938)
+ * Fix #929 (#1936)
+ * Fix exist (#1921)
+* ENHANCEMENTS
+ * Improve get field value of bean (#1961)
+ * refactor splitTag function (#1960)
+ * Fix #1663 (#1952)
+ * fix pg GetColumns missing comment (#1949)
+ * Support build flag jsoniter to replace default json (#1916)
+ * refactor exprParam (#1825)
+ * Add DBVersion (#1723)
+* TESTING
+ * Add test to confirm #1247 resolved (#1951)
+ * Add test for dump table with default value (#1950)
+ * Test for #1486 (#1942)
+ * Add sync tests to confirm #539 is gone (#1937)
+ * test for unsigned int32 (#1923)
+ * Add tests for array store (#1922)
+* BUILD
+ * Remove mymysql from ci (#1928)
+* MISC
+ * fix lint (#1953)
+ * Compitable with cockroach (#1930)
+ * Replace goracle with godror (#1914)
+
## [1.1.0](https://gitea.com/xorm/xorm/releases/tag/1.1.0) - 2021-05-14
* FEATURES
return nil
}
- dpv := reflect.ValueOf(dest)
+ return convertAssignV(reflect.ValueOf(dest), src)
+}
+
+func convertAssignV(dpv reflect.Value, src interface{}) error {
if dpv.Kind() != reflect.Ptr {
return errors.New("destination not a pointer")
}
return errNilPtr
}
- if !sv.IsValid() {
- sv = reflect.ValueOf(src)
- }
+ var sv = reflect.ValueOf(src)
dv := reflect.Indirect(dpv)
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
return nil
}
- return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
+ return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dpv.Interface())
}
func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) {
}
return v.Interface(), nil
}
-
-func int64ToIntValue(id int64, tp reflect.Type) reflect.Value {
- var v interface{}
- kind := tp.Kind()
-
- if kind == reflect.Ptr {
- kind = tp.Elem().Kind()
- }
-
- switch kind {
- case reflect.Int16:
- temp := int16(id)
- v = &temp
- case reflect.Int32:
- temp := int32(id)
- v = &temp
- case reflect.Int:
- temp := int(id)
- v = &temp
- case reflect.Int64:
- temp := id
- v = &temp
- case reflect.Uint16:
- temp := uint16(id)
- v = &temp
- case reflect.Uint32:
- temp := uint32(id)
- v = &temp
- case reflect.Uint64:
- temp := uint64(id)
- v = &temp
- case reflect.Uint:
- temp := uint(id)
- v = &temp
- }
-
- if tp.Kind() == reflect.Ptr {
- return reflect.ValueOf(v).Convert(tp)
- }
- return reflect.ValueOf(v).Elem().Convert(tp)
-}
-
-func int64ToInt(id int64, tp reflect.Type) interface{} {
- return int64ToIntValue(id, tp).Interface()
-}
URI() *URI
SQLType(*schemas.Column) string
FormatBytes(b []byte) string
+ Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error)
IsReserved(string) bool
Quoter() schemas.Quoter
"sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }},
"sqlite": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }},
"oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }},
- "goracle": {"oracle", func() Driver { return &goracleDriver{} }, func() Dialect { return &oracle{} }},
+ "godror": {"oracle", func() Driver { return &godrorDriver{} }, func() Dialect { return &oracle{} }},
}
for driverName, v := range providedDrvsNDialects {
func convertQuestionMark(sql, prefix string, start int) string {
var buf strings.Builder
var beginSingleQuote bool
+ var isLineComment bool
+ var isComment bool
+ var isMaybeLineComment bool
+ var isMaybeComment bool
+ var isMaybeCommentEnd bool
var index = start
for _, c := range sql {
- if !beginSingleQuote && c == '?' {
+ if !beginSingleQuote && !isLineComment && !isComment && c == '?' {
buf.WriteString(fmt.Sprintf("%s%v", prefix, index))
index++
} else {
- if c == '\'' {
+ if isMaybeLineComment {
+ if c == '-' {
+ isLineComment = true
+ }
+ isMaybeLineComment = false
+ } else if isMaybeComment {
+ if c == '*' {
+ isComment = true
+ }
+ isMaybeComment = false
+ } else if isMaybeCommentEnd {
+ if c == '/' {
+ isComment = false
+ }
+ isMaybeCommentEnd = false
+ } else if isLineComment {
+ if c == '\n' {
+ isLineComment = false
+ }
+ } else if isComment {
+ if c == '*' {
+ isMaybeCommentEnd = true
+ }
+ } else if !beginSingleQuote && c == '-' {
+ isMaybeLineComment = true
+ } else if !beginSingleQuote && c == '/' {
+ isMaybeComment = true
+ } else if c == '\'' {
beginSingleQuote = !beginSingleQuote
}
buf.WriteRune(c)
}
}
+func (db *mssql) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
+ rows, err := queryer.QueryContext(ctx,
+ "SELECT SERVERPROPERTY('productversion'), SERVERPROPERTY ('productlevel') AS ProductLevel, SERVERPROPERTY ('edition') AS ProductEdition")
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var version, level, edition string
+ if !rows.Next() {
+ return nil, errors.New("unknow version")
+ }
+
+ if err := rows.Scan(&version, &level, &edition); err != nil {
+ return nil, err
+ }
+
+ // MSSQL: Microsoft SQL Server 2017 (RTM-CU13) (KB4466404) - 14.0.3048.4 (X64) Nov 30 2018 12:57:58 Copyright (C) 2017 Microsoft Corporation Developer Edition (64-bit) on Linux (Ubuntu 16.04.5 LTS)
+ return &schemas.Version{
+ Number: version,
+ Level: level,
+ Edition: edition,
+ }, nil
+}
+
func (db *mssql) SQLType(c *schemas.Column) string {
var res string
switch t := c.SQLType.Name; t {
case schemas.TimeStampz:
res = "DATETIMEOFFSET"
c.Length = 7
- case schemas.MediumInt, schemas.UnsignedInt:
+ case schemas.MediumInt:
res = schemas.Int
case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json:
res = db.defaultVarchar + "(MAX)"
case schemas.TinyInt:
res = schemas.TinyInt
c.Length = 0
- case schemas.BigInt, schemas.UnsignedBigInt:
+ case schemas.BigInt, schemas.UnsignedBigInt, schemas.UnsignedInt:
res = schemas.BigInt
c.Length = 0
case schemas.NVarchar:
return db.Base.Init(db, uri)
}
+func (db *mysql) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
+ rows, err := queryer.QueryContext(ctx, "SELECT @@VERSION")
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var version string
+ if !rows.Next() {
+ return nil, errors.New("Unknow version")
+ }
+
+ if err := rows.Scan(&version); err != nil {
+ return nil, err
+ }
+
+ fields := strings.Split(version, "-")
+ if len(fields) == 3 && fields[1] == "TiDB" {
+ // 5.7.25-TiDB-v3.0.3
+ return &schemas.Version{
+ Number: strings.TrimPrefix(fields[2], "v"),
+ Level: fields[0],
+ Edition: fields[1],
+ }, nil
+ }
+
+ var edition string
+ if len(fields) == 2 {
+ edition = fields[1]
+ }
+
+ return &schemas.Version{
+ Number: fields[0],
+ Edition: edition,
+ }, nil
+}
+
func (db *mysql) SetParams(params map[string]string) {
rowFormat, ok := params["rowFormat"]
if ok {
return db.Base.Init(db, uri)
}
+func (db *oracle) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
+ rows, err := queryer.QueryContext(ctx, "select * from v$version where banner like 'Oracle%'")
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var version string
+ if !rows.Next() {
+ return nil, errors.New("unknow version")
+ }
+
+ if err := rows.Scan(&version); err != nil {
+ return nil, err
+ }
+ return &schemas.Version{
+ Number: version,
+ }, nil
+}
+
func (db *oracle) SQLType(c *schemas.Column) string {
var res string
switch t := c.SQLType.Name; t {
}
}
-type goracleDriver struct {
+type godrorDriver struct {
}
-func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*URI, error) {
+func (cfg *godrorDriver) Parse(driverName, dataSourceName string) (*URI, error) {
db := &URI{DBType: schemas.ORACLE}
dsnPattern := regexp.MustCompile(
`^(?:(?P<user>.*?)(?::(?P<passwd>.*))?@)?` + // [user[:password]@]
return db.Base.Init(db, uri)
}
+func (db *postgres) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
+ rows, err := queryer.QueryContext(ctx, "SELECT version()")
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var version string
+ if !rows.Next() {
+ return nil, errors.New("Unknow version")
+ }
+
+ if err := rows.Scan(&version); err != nil {
+ return nil, err
+ }
+
+ // Postgres: 9.5.22 on x86_64-pc-linux-gnu (Debian 9.5.22-1.pgdg90+1), compiled by gcc (Debian 6.3.0-18+deb9u1) 6.3.0 20170516, 64-bit
+ // CockroachDB CCL v19.2.4 (x86_64-unknown-linux-gnu, built
+ if strings.HasPrefix(version, "CockroachDB") {
+ versions := strings.Split(strings.TrimPrefix(version, "CockroachDB CCL "), " ")
+ return &schemas.Version{
+ Number: strings.TrimPrefix(versions[0], "v"),
+ Edition: "CockroachDB",
+ }, nil
+ } else if strings.HasPrefix(version, "PostgreSQL") {
+ versions := strings.Split(strings.TrimPrefix(version, "PostgreSQL "), " on ")
+ return &schemas.Version{
+ Number: versions[0],
+ Level: versions[1],
+ Edition: "PostgreSQL",
+ }, nil
+ }
+
+ return nil, errors.New("unknow database version")
+}
+
func (db *postgres) getSchema() string {
if db.uri.Schema != "" {
return db.uri.Schema
case schemas.Bit:
res = schemas.Boolean
return res
- case schemas.MediumInt, schemas.Int, schemas.Integer, schemas.UnsignedInt:
+ case schemas.MediumInt, schemas.Int, schemas.Integer:
if c.IsAutoIncrement {
return schemas.Serial
}
return schemas.Integer
- case schemas.BigInt, schemas.UnsignedBigInt:
+ case schemas.BigInt, schemas.UnsignedBigInt, schemas.UnsignedInt:
if c.IsAutoIncrement {
return schemas.BigSerial
}
func (db *postgres) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) {
args := []interface{}{tableName}
- s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length,
+ s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, description,
CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey,
CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey
FROM pg_attribute f
JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
+ LEFT JOIN pg_description de ON f.attrelid=de.objoid AND f.attnum=de.objsubid
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
col.Indexes = make(map[string]int)
var colName, isNullable, dataType string
- var maxLenStr, colDefault *string
+ var maxLenStr, colDefault, description *string
var isPK, isUnique bool
- err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &isPK, &isUnique)
+ err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &description, &isPK, &isUnique)
if err != nil {
return nil, nil, err
}
col.DefaultIsEmpty = true
}
+ if description != nil {
+ col.Comment = *description
+ }
+
if isPK {
col.IsPrimaryKey = true
}
continue
}
indexName = strings.Trim(indexName, `" `)
- if strings.HasSuffix(indexName, "_pkey") {
+ // ignore primary index
+ if strings.HasSuffix(indexName, "_pkey") || strings.EqualFold(indexName, "primary") {
continue
}
if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") {
index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)}
for _, colName := range colNames {
- index.Cols = append(index.Cols, strings.TrimSpace(strings.Replace(colName, `"`, "", -1)))
+ col := strings.TrimSpace(strings.Replace(colName, `"`, "", -1))
+ fields := strings.Split(col, " ")
+ index.Cols = append(index.Cols, fields[0])
}
index.IsRegular = isRegular
indexes[index.Name] = index
return db.Base.Init(db, uri)
}
+func (db *sqlite3) Version(ctx context.Context, queryer core.Queryer) (*schemas.Version, error) {
+ rows, err := queryer.QueryContext(ctx, "SELECT sqlite_version()")
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var version string
+ if !rows.Next() {
+ return nil, errors.New("Unknow version")
+ }
+
+ if err := rows.Scan(&version); err != nil {
+ return nil, err
+ }
+ return &schemas.Version{
+ Number: version,
+ Edition: "sqlite",
+ }, nil
+}
+
func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) {
switch quotePolicy {
case QuotePolicyNone:
return engine.dumpTables(tables, w, tp...)
}
-func formatColumnValue(dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string {
+func formatColumnValue(dbLocation *time.Location, dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string {
if d == nil {
return "NULL"
}
return "'" + strings.Replace(v, "'", "''", -1) + "'"
} else if col.SQLType.IsTime() {
- if dstDialect.URI().DBType == schemas.MSSQL && col.SQLType.Name == schemas.DateTime {
- if t, ok := d.(time.Time); ok {
- return "'" + t.UTC().Format("2006-01-02 15:04:05") + "'"
- }
+ if t, ok := d.(time.Time); ok {
+ return "'" + t.In(dbLocation).Format("2006-01-02 15:04:05") + "'"
}
var v = fmt.Sprintf("%s", d)
if strings.HasSuffix(v, " +0000 UTC") {
return errors.New("unknown column error")
}
- fields := strings.Split(col.FieldName, ".")
- field := dataStruct
- for _, fieldName := range fields {
- field = field.FieldByName(fieldName)
- }
- temp += "," + formatColumnValue(dstDialect, field.Interface(), col)
+ field := dataStruct.FieldByIndex(col.FieldIndex)
+ temp += "," + formatColumnValue(engine.DatabaseTZ, dstDialect, field.Interface(), col)
}
_, err = io.WriteString(w, temp[1:]+");\n")
if err != nil {
return errors.New("unknow column error")
}
- temp += "," + formatColumnValue(dstDialect, d, col)
+ temp += "," + formatColumnValue(engine.DatabaseTZ, dstDialect, d, col)
}
_, err = io.WriteString(w, temp[1:]+");\n")
if err != nil {
return session.Having(conditions)
}
-// Table table struct
-type Table struct {
- *schemas.Table
- Name string
-}
-
-// IsValid if table is valid
-func (t *Table) IsValid() bool {
- return t.Table != nil && len(t.Name) > 0
+// DBVersion returns the database version
+func (engine *Engine) DBVersion() (*schemas.Version, error) {
+ return engine.dialect.Version(engine.defaultContext, engine.db)
}
// TableInfo get table info according to bean's content
require (
github.com/denisenkom/go-mssqldb v0.9.0
github.com/go-sql-driver/mysql v1.5.0
+ github.com/json-iterator/go v1.1.11
github.com/lib/pq v1.7.0
github.com/mattn/go-sqlite3 v1.14.6
github.com/stretchr/testify v1.4.0
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk=
github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
Context(context.Context) *Session
CreateTables(...interface{}) error
DBMetas() ([]*schemas.Table, error)
+ DBVersion() (*schemas.Version, error)
Dialect() dialects.Dialect
DriverName() string
DropTables(...interface{}) error
--- /dev/null
+// Copyright 2021 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build jsoniter
+
+package json
+
+import (
+ jsoniter "github.com/json-iterator/go"
+)
+
+func init() {
+ DefaultJSONHandler = JSONiter{}
+}
+
+// JSONiter implements JSONInterface via jsoniter
+type JSONiter struct{}
+
+// Marshal implements JSONInterface
+func (JSONiter) Marshal(v interface{}) ([]byte, error) {
+ return jsoniter.Marshal(v)
+}
+
+// Unmarshal implements JSONInterface
+func (JSONiter) Unmarshal(data []byte, v interface{}) error {
+ return jsoniter.Unmarshal(data, v)
+}
--- /dev/null
+// Copyright 2019 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+import (
+ "fmt"
+ "strings"
+
+ "xorm.io/builder"
+ "xorm.io/xorm/schemas"
+)
+
+// ErrUnsupportedExprType represents an error with unsupported express type
+type ErrUnsupportedExprType struct {
+ tp string
+}
+
+func (err ErrUnsupportedExprType) Error() string {
+ return fmt.Sprintf("Unsupported expression type: %v", err.tp)
+}
+
+// Expr represents an SQL express
+type Expr struct {
+ ColName string
+ Arg interface{}
+}
+
+// WriteArgs writes args to the writer
+func (expr *Expr) WriteArgs(w *builder.BytesWriter) error {
+ switch arg := expr.Arg.(type) {
+ case *builder.Builder:
+ if _, err := w.WriteString("("); err != nil {
+ return err
+ }
+ if err := arg.WriteTo(w); err != nil {
+ return err
+ }
+ if _, err := w.WriteString(")"); err != nil {
+ return err
+ }
+ case string:
+ if arg == "" {
+ arg = "''"
+ }
+ if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil {
+ return err
+ }
+ default:
+ if _, err := w.WriteString("?"); err != nil {
+ return err
+ }
+ w.Append(arg)
+ }
+ return nil
+}
+
+type exprParams []Expr
+
+func (exprs exprParams) ColNames() []string {
+ var cols = make([]string, 0, len(exprs))
+ for _, expr := range exprs {
+ cols = append(cols, expr.ColName)
+ }
+ return cols
+}
+
+func (exprs *exprParams) Add(name string, arg interface{}) {
+ *exprs = append(*exprs, Expr{name, arg})
+}
+
+func (exprs exprParams) IsColExist(colName string) bool {
+ for _, expr := range exprs {
+ if strings.EqualFold(schemas.CommonQuoter.Trim(expr.ColName), schemas.CommonQuoter.Trim(colName)) {
+ return true
+ }
+ }
+ return false
+}
+
+func (exprs exprParams) WriteArgs(w *builder.BytesWriter) error {
+ for i, expr := range exprs {
+ if err := expr.WriteArgs(w); err != nil {
+ return err
+ }
+ if i != len(exprs)-1 {
+ if _, err := w.WriteString(","); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+++ /dev/null
-// Copyright 2019 The Xorm Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package statements
-
-import (
- "fmt"
- "strings"
-
- "xorm.io/builder"
- "xorm.io/xorm/schemas"
-)
-
-// ErrUnsupportedExprType represents an error with unsupported express type
-type ErrUnsupportedExprType struct {
- tp string
-}
-
-func (err ErrUnsupportedExprType) Error() string {
- return fmt.Sprintf("Unsupported expression type: %v", err.tp)
-}
-
-type exprParam struct {
- colName string
- arg interface{}
-}
-
-type exprParams struct {
- ColNames []string
- Args []interface{}
-}
-
-func (exprs *exprParams) Len() int {
- return len(exprs.ColNames)
-}
-
-func (exprs *exprParams) addParam(colName string, arg interface{}) {
- exprs.ColNames = append(exprs.ColNames, colName)
- exprs.Args = append(exprs.Args, arg)
-}
-
-func (exprs *exprParams) IsColExist(colName string) bool {
- for _, name := range exprs.ColNames {
- if strings.EqualFold(schemas.CommonQuoter.Trim(name), schemas.CommonQuoter.Trim(colName)) {
- return true
- }
- }
- return false
-}
-
-func (exprs *exprParams) getByName(colName string) (exprParam, bool) {
- for i, name := range exprs.ColNames {
- if strings.EqualFold(name, colName) {
- return exprParam{name, exprs.Args[i]}, true
- }
- }
- return exprParam{}, false
-}
-
-func (exprs *exprParams) WriteArgs(w *builder.BytesWriter) error {
- for i, expr := range exprs.Args {
- switch arg := expr.(type) {
- case *builder.Builder:
- if _, err := w.WriteString("("); err != nil {
- return err
- }
- if err := arg.WriteTo(w); err != nil {
- return err
- }
- if _, err := w.WriteString(")"); err != nil {
- return err
- }
- case string:
- if arg == "" {
- arg = "''"
- }
- if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil {
- return err
- }
- default:
- if _, err := w.WriteString("?"); err != nil {
- return err
- }
- w.Append(arg)
- }
- if i != len(exprs.Args)-1 {
- if _, err := w.WriteString(","); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error {
- for i, colName := range exprs.ColNames {
- if _, err := w.WriteString(colName); err != nil {
- return err
- }
- if _, err := w.WriteString("="); err != nil {
- return err
- }
-
- switch arg := exprs.Args[i].(type) {
- case *builder.Builder:
- if _, err := w.WriteString("("); err != nil {
- return err
- }
- if err := arg.WriteTo(w); err != nil {
- return err
- }
- if _, err := w.WriteString("("); err != nil {
- return err
- }
- default:
- w.Append(exprs.Args[i])
- }
-
- if i+1 != len(exprs.ColNames) {
- if _, err := w.WriteString(","); err != nil {
- return err
- }
- }
- }
- return nil
-}
if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil {
return err
}
- if _, err := buf.WriteString(table.AutoIncrement); err != nil {
+ if err := statement.dialect.Quoter().QuoteTo(buf, table.AutoIncrement); err != nil {
return err
}
}
return "", nil, err
}
- if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames...), ","); err != nil {
+ if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames()...), ","); err != nil {
return "", nil, err
}
return "", nil, err
}
- if len(exprs.Args) > 0 {
+ if len(exprs) > 0 {
if _, err := buf.WriteString(","); err != nil {
return "", nil, err
}
return "", nil, err
}
- if len(exprs.Args) > 0 {
+ if len(exprs) > 0 {
if _, err := buf.WriteString(","); err != nil {
return "", nil, err
}
return "", nil, err
}
- if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames...), ","); err != nil {
+ if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames()...), ","); err != nil {
return "", nil, err
}
return "", nil, err
}
- if len(exprs.Args) > 0 {
+ if len(exprs) > 0 {
if _, err := buf.WriteString(","); err != nil {
return "", nil, err
}
return "", nil, err
}
- if len(exprs.Args) > 0 {
+ if len(exprs) > 0 {
if _, err := buf.WriteString(","); err != nil {
return "", nil, err
}
// GenGetSQL generates Get SQL
func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) {
- v := rValue(bean)
- isStruct := v.Kind() == reflect.Struct
- if isStruct {
- statement.SetRefBean(bean)
+ var isStruct bool
+ if bean != nil {
+ v := rValue(bean)
+ isStruct = v.Kind() == reflect.Struct
+ if isStruct {
+ statement.SetRefBean(bean)
+ }
}
var columnStr = statement.ColumnStr()
selectSQL = "count(*)"
}
}
- sqlStr, condArgs, err := statement.genSelectSQL(selectSQL, false, false)
+ var subQuerySelect string
+ if statement.GroupByStr != "" {
+ subQuerySelect = statement.GroupByStr
+ } else {
+ subQuerySelect = selectSQL
+ }
+
+ sqlStr, condArgs, err := statement.genSelectSQL(subQuerySelect, false, false)
if err != nil {
return "", nil, err
}
+ if statement.GroupByStr != "" {
+ sqlStr = fmt.Sprintf("SELECT %s FROM (%s) sub", selectSQL, sqlStr)
+ }
+
return sqlStr, append(statement.joinArgs, condArgs...), nil
}
var args []interface{}
var joinStr string
var err error
- if len(bean) == 0 {
- tableName := statement.TableName()
- if len(tableName) <= 0 {
- return "", nil, ErrTableNotFound
+ var b interface{}
+ if len(bean) > 0 {
+ b = bean[0]
+ beanValue := reflect.ValueOf(bean[0])
+ if beanValue.Kind() != reflect.Ptr {
+ return "", nil, errors.New("needs a pointer")
}
+ if beanValue.Elem().Kind() == reflect.Struct {
+ if err := statement.SetRefBean(bean[0]); err != nil {
+ return "", nil, err
+ }
+ }
+ }
+ tableName := statement.TableName()
+ if len(tableName) <= 0 {
+ return "", nil, ErrTableNotFound
+ }
+ if statement.RefTable == nil {
tableName = statement.quote(tableName)
if len(statement.JoinStr) > 0 {
joinStr = statement.JoinStr
args = []interface{}{}
}
} else {
- beanValue := reflect.ValueOf(bean[0])
- if beanValue.Kind() != reflect.Ptr {
- return "", nil, errors.New("needs a pointer")
- }
-
- if beanValue.Elem().Kind() == reflect.Struct {
- if err := statement.SetRefBean(bean[0]); err != nil {
- return "", nil, err
- }
- }
-
- if len(statement.TableName()) <= 0 {
- return "", nil, ErrTableNotFound
- }
statement.Limit(1)
- sqlStr, args, err = statement.GenGetSQL(bean[0])
+ sqlStr, args, err = statement.GenGetSQL(b)
if err != nil {
return "", nil, err
}
// And add Where & and statement
func (statement *Statement) And(query interface{}, args ...interface{}) *Statement {
- switch query.(type) {
+ switch qr := query.(type) {
case string:
- cond := builder.Expr(query.(string), args...)
+ cond := builder.Expr(qr, args...)
statement.cond = statement.cond.And(cond)
case map[string]interface{}:
- queryMap := query.(map[string]interface{})
- newMap := make(map[string]interface{})
- for k, v := range queryMap {
- newMap[statement.quote(k)] = v
+ cond := make(builder.Eq)
+ for k, v := range qr {
+ cond[statement.quote(k)] = v
}
- statement.cond = statement.cond.And(builder.Eq(newMap))
- case builder.Cond:
- cond := query.(builder.Cond)
statement.cond = statement.cond.And(cond)
+ case builder.Cond:
+ statement.cond = statement.cond.And(qr)
for _, v := range args {
if vv, ok := v.(builder.Cond); ok {
statement.cond = statement.cond.And(vv)
// Or add Where & Or statement
func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement {
- switch query.(type) {
+ switch qr := query.(type) {
case string:
- cond := builder.Expr(query.(string), args...)
+ cond := builder.Expr(qr, args...)
statement.cond = statement.cond.Or(cond)
case map[string]interface{}:
- cond := builder.Eq(query.(map[string]interface{}))
+ cond := make(builder.Eq)
+ for k, v := range qr {
+ cond[statement.quote(k)] = v
+ }
statement.cond = statement.cond.Or(cond)
case builder.Cond:
- cond := query.(builder.Cond)
- statement.cond = statement.cond.Or(cond)
+ statement.cond = statement.cond.Or(qr)
for _, v := range args {
if vv, ok := v.(builder.Cond); ok {
statement.cond = statement.cond.Or(vv)
}
}
default:
- // TODO: not support condition type
+ statement.LastError = ErrConditionType
}
return statement
}
// Incr Generate "Update ... Set column = column + arg" statement
func (statement *Statement) Incr(column string, arg ...interface{}) *Statement {
if len(arg) > 0 {
- statement.IncrColumns.addParam(column, arg[0])
+ statement.IncrColumns.Add(column, arg[0])
} else {
- statement.IncrColumns.addParam(column, 1)
+ statement.IncrColumns.Add(column, 1)
}
return statement
}
// Decr Generate "Update ... Set column = column - arg" statement
func (statement *Statement) Decr(column string, arg ...interface{}) *Statement {
if len(arg) > 0 {
- statement.DecrColumns.addParam(column, arg[0])
+ statement.DecrColumns.Add(column, arg[0])
} else {
- statement.DecrColumns.addParam(column, 1)
+ statement.DecrColumns.Add(column, 1)
}
return statement
}
// SetExpr Generate "Update ... Set column = {expression}" statement
func (statement *Statement) SetExpr(column string, expression interface{}) *Statement {
if e, ok := expression.(string); ok {
- statement.ExprColumns.addParam(column, statement.dialect.Quoter().Replace(e))
+ statement.ExprColumns.Add(column, statement.dialect.Quoter().Replace(e))
} else {
- statement.ExprColumns.addParam(column, expression)
+ statement.ExprColumns.Add(column, expression)
}
return statement
}
//engine.logger.Warn(err)
}
continue
+ } else if fieldValuePtr == nil {
+ continue
}
if col.IsDeleted && !unscoped { // tag "deleted" is enabled
// CondDeleted returns the conditions whether a record is soft deleted.
func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond {
- var colName = col.Name
+ var colName = statement.quote(col.Name)
if statement.JoinStr != "" {
var prefix string
if statement.TableAlias != "" {
if err != nil {
return nil, nil, err
}
+ if fieldValuePtr == nil {
+ continue
+ }
fieldValue := *fieldValuePtr
fieldType := reflect.TypeOf(fieldValue.Interface())
if s.level <= LOG_ERR {
s.ERR.Output(2, fmt.Sprintln(v...))
}
- return
}
// Errorf implement ILogger
if s.level <= LOG_ERR {
s.ERR.Output(2, fmt.Sprintf(format, v...))
}
- return
}
// Debug implement ILogger
if s.level <= LOG_DEBUG {
s.DEBUG.Output(2, fmt.Sprintln(v...))
}
- return
}
// Debugf implement ILogger
if s.level <= LOG_DEBUG {
s.DEBUG.Output(2, fmt.Sprintf(format, v...))
}
- return
}
// Info implement ILogger
if s.level <= LOG_INFO {
s.INFO.Output(2, fmt.Sprintln(v...))
}
- return
}
// Infof implement ILogger
if s.level <= LOG_INFO {
s.INFO.Output(2, fmt.Sprintf(format, v...))
}
- return
}
// Warn implement ILogger
if s.level <= LOG_WARNING {
s.WARN.Output(2, fmt.Sprintln(v...))
}
- return
}
// Warnf implement ILogger
if s.level <= LOG_WARNING {
s.WARN.Output(2, fmt.Sprintf(format, v...))
}
- return
}
// Level implement ILogger
// SetLevel implement ILogger
func (s *SimpleLogger) SetLevel(l LogLevel) {
s.level = l
- return
}
// ShowSQL implement ILogger
--- /dev/null
+// Copyright 2021 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xorm
+
+import (
+ "database/sql"
+
+ "xorm.io/xorm/core"
+)
+
+func (engine *Engine) row2mapStr(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string]string, error) {
+ var scanResults = make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var s sql.NullString
+ scanResults[i] = &s
+ }
+
+ if err := rows.Scan(scanResults...); err != nil {
+ return nil, err
+ }
+
+ result := make(map[string]string, len(fields))
+ for ii, key := range fields {
+ s := scanResults[ii].(*sql.NullString)
+ result[key] = s.String
+ }
+ return result, nil
+}
+
+func (engine *Engine) row2mapBytes(rows *core.Rows, types []*sql.ColumnType, fields []string) (map[string][]byte, error) {
+ var scanResults = make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var s sql.NullString
+ scanResults[i] = &s
+ }
+
+ if err := rows.Scan(scanResults...); err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]byte, len(fields))
+ for ii, key := range fields {
+ s := scanResults[ii].(*sql.NullString)
+ result[key] = []byte(s.String)
+ }
+ return result, nil
+}
+
+func (engine *Engine) row2sliceStr(rows *core.Rows, types []*sql.ColumnType, fields []string) ([]string, error) {
+ results := make([]string, 0, len(fields))
+ var scanResults = make([]interface{}, len(fields))
+ for i := 0; i < len(fields); i++ {
+ var s sql.NullString
+ scanResults[i] = &s
+ }
+
+ if err := rows.Scan(scanResults...); err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < len(fields); i++ {
+ results = append(results, scanResults[i].(*sql.NullString).String)
+ }
+ return results, nil
+}
import (
"errors"
- "fmt"
"reflect"
"strconv"
- "strings"
"time"
)
type Column struct {
Name string
TableName string
- FieldName string // Avaiable only when parsed from a struct
+ FieldName string // Available only when parsed from a struct
+ FieldIndex []int // Available only when parsed from a struct
SQLType SQLType
IsJSON bool
Length int
// ValueOfV returns column's filed of struct's value accept reflevt value
func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) {
- var fieldValue reflect.Value
- fieldPath := strings.Split(col.FieldName, ".")
-
- if dataStruct.Type().Kind() == reflect.Map {
- keyValue := reflect.ValueOf(fieldPath[len(fieldPath)-1])
- fieldValue = dataStruct.MapIndex(keyValue)
- return &fieldValue, nil
- } else if dataStruct.Type().Kind() == reflect.Interface {
- structValue := reflect.ValueOf(dataStruct.Interface())
- dataStruct = &structValue
- }
-
- level := len(fieldPath)
- fieldValue = dataStruct.FieldByName(fieldPath[0])
- for i := 0; i < level-1; i++ {
- if !fieldValue.IsValid() {
- break
- }
- if fieldValue.Kind() == reflect.Struct {
- fieldValue = fieldValue.FieldByName(fieldPath[i+1])
- } else if fieldValue.Kind() == reflect.Ptr {
- if fieldValue.IsNil() {
- fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
+ var v = *dataStruct
+ for _, i := range col.FieldIndex {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
}
- fieldValue = fieldValue.Elem().FieldByName(fieldPath[i+1])
- } else {
- return nil, fmt.Errorf("field %v is not valid", col.FieldName)
+ v = v.Elem()
}
+ v = v.FieldByIndex([]int{i})
}
-
- if !fieldValue.IsValid() {
- return nil, fmt.Errorf("field %v is not valid", col.FieldName)
- }
-
- return &fieldValue, nil
+ return &v, nil
}
// ConvertID converts id content to suitable type according column type
package schemas
import (
- "fmt"
"reflect"
"strconv"
"strings"
for i, col := range table.PKColumns() {
var err error
- fieldName := col.FieldName
- for {
- parts := strings.SplitN(fieldName, ".", 2)
- if len(parts) == 1 {
- break
- }
-
- v = v.FieldByName(parts[0])
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if v.Kind() != reflect.Struct {
- return nil, fmt.Errorf("Unsupported read value of column %s from field %s", col.Name, col.FieldName)
- }
- fieldName = parts[1]
- }
+ pkField := v.FieldByIndex(col.FieldIndex)
- pkField := v.FieldByName(fieldName)
switch pkField.Kind() {
case reflect.String:
pk[i], err = col.ConvertID(pkField.String())
--- /dev/null
+// Copyright 2021 The Xorm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package schemas
+
+// Version represents a database version
+type Version struct {
+ Number string // the version number which could be compared
+ Level string
+ Edition string
+}
if err != nil {
return nil, err
}
+ if fieldValue == nil {
+ return nil, ErrFieldIsNotValid{key, table.Name}
+ }
if !fieldValue.IsValid() || !fieldValue.CanSet() {
return nil, ErrFieldIsNotValid{key, table.Name}
sd, err := strconv.ParseInt(sdata, 10, 64)
if err == nil {
x = time.Unix(sd, 0)
- //session.engine.logger.Debugf("time(0) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
- } else {
- //session.engine.logger.Debugf("time(0) err key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
}
} else if len(sdata) > 19 && strings.Contains(sdata, "-") {
x, err = time.ParseInLocation(time.RFC3339Nano, sdata, parseLoc)
- session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
+ session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.Name, x, sdata)
if err != nil {
x, err = time.ParseInLocation("2006-01-02 15:04:05.999999999", sdata, parseLoc)
- //session.engine.logger.Debugf("time(2) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
}
if err != nil {
x, err = time.ParseInLocation("2006-01-02 15:04:05.9999999 Z07:00", sdata, parseLoc)
- //session.engine.logger.Debugf("time(3) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
}
} else if len(sdata) == 19 && strings.Contains(sdata, "-") {
x, err = time.ParseInLocation("2006-01-02 15:04:05", sdata, parseLoc)
- //session.engine.logger.Debugf("time(4) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
} else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' {
x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc)
- //session.engine.logger.Debugf("time(5) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
} else if col.SQLType.Name == schemas.Time {
if strings.Contains(sdata, " ") {
ssd := strings.Split(sdata, " ")
st := fmt.Sprintf("2006-01-02 %v", sdata)
x, err = time.ParseInLocation("2006-01-02 15:04:05", st, parseLoc)
- //session.engine.logger.Debugf("time(6) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata)
} else {
outErr = fmt.Errorf("unsupported time format %v", sdata)
return
"sort"
"strconv"
"strings"
+ "time"
"xorm.io/xorm/internal/utils"
"xorm.io/xorm/schemas"
return 1, nil
}
- aiValue.Set(int64ToIntValue(id, aiValue.Type()))
-
- return 1, nil
+ return 1, convertAssignV(aiValue.Addr(), id)
} else if len(table.AutoIncrement) > 0 && (session.engine.dialect.URI().DBType == schemas.POSTGRES ||
session.engine.dialect.URI().DBType == schemas.MSSQL) {
res, err := session.queryBytes(sqlStr, args...)
return 1, nil
}
- aiValue.Set(int64ToIntValue(id, aiValue.Type()))
-
- return 1, nil
+ return 1, convertAssignV(aiValue.Addr(), id)
}
res, err := session.exec(sqlStr, args...)
return res.RowsAffected()
}
- aiValue.Set(int64ToIntValue(id, aiValue.Type()))
+ if err := convertAssignV(aiValue.Addr(), id); err != nil {
+ return 0, err
+ }
return res.RowsAffected()
}
}
if col.IsDeleted {
+ colNames = append(colNames, col.Name)
+ if !col.Nullable {
+ if col.SQLType.IsNumeric() {
+ args = append(args, 0)
+ } else {
+ args = append(args, time.Time{}.Format("2006-01-02 15:04:05"))
+ }
+ } else {
+ args = append(args, nil)
+ }
continue
}
return
}
-func row2mapStr(rows *core.Rows, fields []string) (resultsMap map[string]string, err error) {
- result := make(map[string]string)
- scanResultContainers := make([]interface{}, len(fields))
- for i := 0; i < len(fields); i++ {
- var scanResultContainer interface{}
- scanResultContainers[i] = &scanResultContainer
- }
- if err := rows.Scan(scanResultContainers...); err != nil {
- return nil, err
- }
-
- for ii, key := range fields {
- rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
- // if row is null then as empty string
- if rawValue.Interface() == nil {
- result[key] = ""
- continue
- }
-
- if data, err := value2String(&rawValue); err == nil {
- result[key] = data
- } else {
- return nil, err
- }
- }
- return result, nil
-}
-
-func row2sliceStr(rows *core.Rows, fields []string) (results []string, err error) {
- result := make([]string, 0, len(fields))
- scanResultContainers := make([]interface{}, len(fields))
- for i := 0; i < len(fields); i++ {
- var scanResultContainer interface{}
- scanResultContainers[i] = &scanResultContainer
- }
- if err := rows.Scan(scanResultContainers...); err != nil {
+func (session *Session) rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) {
+ fields, err := rows.Columns()
+ if err != nil {
return nil, err
}
-
- for i := 0; i < len(fields); i++ {
- rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[i]))
- // if row is null then as empty string
- if rawValue.Interface() == nil {
- result = append(result, "")
- continue
- }
-
- if data, err := value2String(&rawValue); err == nil {
- result = append(result, data)
- } else {
- return nil, err
- }
- }
- return result, nil
-}
-
-func rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) {
- fields, err := rows.Columns()
+ types, err := rows.ColumnTypes()
if err != nil {
return nil, err
}
+
for rows.Next() {
- result, err := row2mapStr(rows, fields)
+ result, err := session.engine.row2mapStr(rows, types, fields)
if err != nil {
return nil, err
}
return resultsSlice, nil
}
-func rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) {
+func (session *Session) rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) {
fields, err := rows.Columns()
if err != nil {
return nil, err
}
+ types, err := rows.ColumnTypes()
+ if err != nil {
+ return nil, err
+ }
+
for rows.Next() {
- record, err := row2sliceStr(rows, fields)
+ record, err := session.engine.row2sliceStr(rows, types, fields)
if err != nil {
return nil, err
}
}
defer rows.Close()
- return rows2Strings(rows)
+ return session.rows2Strings(rows)
}
// QuerySliceString runs a raw sql and return records as [][]string
}
defer rows.Close()
- return rows2SliceString(rows)
+ return session.rows2SliceString(rows)
}
func row2mapInterface(rows *core.Rows, fields []string) (resultsMap map[string]interface{}, err error) {
return []byte(str), nil
}
-func row2map(rows *core.Rows, fields []string) (resultsMap map[string][]byte, err error) {
- result := make(map[string][]byte)
- scanResultContainers := make([]interface{}, len(fields))
- for i := 0; i < len(fields); i++ {
- var scanResultContainer interface{}
- scanResultContainers[i] = &scanResultContainer
- }
- if err := rows.Scan(scanResultContainers...); err != nil {
+func (session *Session) rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) {
+ fields, err := rows.Columns()
+ if err != nil {
return nil, err
}
-
- for ii, key := range fields {
- rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii]))
- //if row is null then ignore
- if rawValue.Interface() == nil {
- result[key] = []byte{}
- continue
- }
-
- if data, err := value2Bytes(&rawValue); err == nil {
- result[key] = data
- } else {
- return nil, err // !nashtsai! REVIEW, should return err or just error log?
- }
- }
- return result, nil
-}
-
-func rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) {
- fields, err := rows.Columns()
+ types, err := rows.ColumnTypes()
if err != nil {
return nil, err
}
for rows.Next() {
- result, err := row2map(rows, fields)
+ result, err := session.engine.row2mapBytes(rows, types, fields)
if err != nil {
return nil, err
}
}
defer rows.Close()
- return rows2maps(rows)
+ return session.rows2maps(rows)
}
func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) {
"xorm.io/xorm/schemas"
)
+// enumerated all errors
+var (
+ ErrNoColumnsTobeUpdated = errors.New("no columns found to be updated")
+)
+
func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error {
if table == nil ||
session.tx != nil {
defer session.Close()
}
+ defer session.resetStatement()
+
if session.statement.LastError != nil {
return 0, session.statement.LastError
}
// for update action to like "column = column + ?"
incColumns := session.statement.IncrColumns
- for i, colName := range incColumns.ColNames {
- colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" + ?")
- args = append(args, incColumns.Args[i])
+ for _, expr := range incColumns {
+ colNames = append(colNames, session.engine.Quote(expr.ColName)+" = "+session.engine.Quote(expr.ColName)+" + ?")
+ args = append(args, expr.Arg)
}
// for update action to like "column = column - ?"
decColumns := session.statement.DecrColumns
- for i, colName := range decColumns.ColNames {
- colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" - ?")
- args = append(args, decColumns.Args[i])
+ for _, expr := range decColumns {
+ colNames = append(colNames, session.engine.Quote(expr.ColName)+" = "+session.engine.Quote(expr.ColName)+" - ?")
+ args = append(args, expr.Arg)
}
// for update action to like "column = expression"
exprColumns := session.statement.ExprColumns
- for i, colName := range exprColumns.ColNames {
- switch tp := exprColumns.Args[i].(type) {
+ for _, expr := range exprColumns {
+ switch tp := expr.Arg.(type) {
case string:
if len(tp) == 0 {
tp = "''"
}
- colNames = append(colNames, session.engine.Quote(colName)+"="+tp)
+ colNames = append(colNames, session.engine.Quote(expr.ColName)+"="+tp)
case *builder.Builder:
subQuery, subArgs, err := session.statement.GenCondSQL(tp)
if err != nil {
return 0, err
}
- colNames = append(colNames, session.engine.Quote(colName)+"=("+subQuery+")")
+ colNames = append(colNames, session.engine.Quote(expr.ColName)+"=("+subQuery+")")
args = append(args, subArgs...)
default:
- colNames = append(colNames, session.engine.Quote(colName)+"=?")
- args = append(args, exprColumns.Args[i])
+ colNames = append(colNames, session.engine.Quote(expr.ColName)+"=?")
+ args = append(args, expr.Arg)
}
}
k = ct.Elem().Kind()
}
if k == reflect.Struct {
- var refTable = session.statement.RefTable
- if refTable == nil {
- refTable, err = session.engine.TableInfo(condiBean[0])
- if err != nil {
- return 0, err
- }
+ condTable, err := session.engine.TableInfo(condiBean[0])
+ if err != nil {
+ return 0, err
}
- var err error
- autoCond, err = session.statement.BuildConds(refTable, condiBean[0], true, true, false, true, false)
+
+ autoCond, err = session.statement.BuildConds(condTable, condiBean[0], true, true, false, true, false)
if err != nil {
return 0, err
}
}
if len(colNames) <= 0 {
- return 0, errors.New("No content found to be updated")
+ return 0, ErrNoColumnsTobeUpdated
}
condSQL, condArgs, err = session.statement.GenCondSQL(cond)
// FIXME: if bean is a map type, it will panic because map cannot be as map key
session.afterUpdateBeans[bean] = &afterClosures
}
-
} else {
if _, ok := interface{}(bean).(AfterUpdateProcessor); ok {
session.afterUpdateBeans[bean] = nil
import (
"encoding/gob"
"errors"
- "fmt"
"reflect"
"strings"
"sync"
"time"
+ "unicode"
"xorm.io/xorm/caches"
"xorm.io/xorm/convert"
var (
// ErrUnsupportedType represents an unsupported type error
- ErrUnsupportedType = errors.New("Unsupported type")
+ ErrUnsupportedType = errors.New("unsupported type")
)
// Parser represents a parser for xorm tag
}
}
+var ErrIgnoreField = errors.New("field will be ignored")
+
+func (parser *Parser) parseFieldWithNoTag(fieldIndex int, field reflect.StructField, fieldValue reflect.Value) (*schemas.Column, error) {
+ var sqlType schemas.SQLType
+ if fieldValue.CanAddr() {
+ if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok {
+ sqlType = schemas.SQLType{Name: schemas.Text}
+ }
+ }
+ if _, ok := fieldValue.Interface().(convert.Conversion); ok {
+ sqlType = schemas.SQLType{Name: schemas.Text}
+ } else {
+ sqlType = schemas.Type2SQLType(field.Type)
+ }
+ col := schemas.NewColumn(parser.columnMapper.Obj2Table(field.Name),
+ field.Name, sqlType, sqlType.DefaultLength,
+ sqlType.DefaultLength2, true)
+ col.FieldIndex = []int{fieldIndex}
+
+ if field.Type.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) {
+ col.IsAutoIncrement = true
+ col.IsPrimaryKey = true
+ col.Nullable = false
+ }
+ return col, nil
+}
+
+func (parser *Parser) parseFieldWithTags(table *schemas.Table, fieldIndex int, field reflect.StructField, fieldValue reflect.Value, tags []tag) (*schemas.Column, error) {
+ var col = &schemas.Column{
+ FieldName: field.Name,
+ FieldIndex: []int{fieldIndex},
+ Nullable: true,
+ IsPrimaryKey: false,
+ IsAutoIncrement: false,
+ MapType: schemas.TWOSIDES,
+ Indexes: make(map[string]int),
+ DefaultIsEmpty: true,
+ }
+
+ var ctx = Context{
+ table: table,
+ col: col,
+ fieldValue: fieldValue,
+ indexNames: make(map[string]int),
+ parser: parser,
+ }
+
+ for j, tag := range tags {
+ if ctx.ignoreNext {
+ ctx.ignoreNext = false
+ continue
+ }
+
+ ctx.tag = tag
+ ctx.tagUname = strings.ToUpper(tag.name)
+
+ if j > 0 {
+ ctx.preTag = strings.ToUpper(tags[j-1].name)
+ }
+ if j < len(tags)-1 {
+ ctx.nextTag = tags[j+1].name
+ } else {
+ ctx.nextTag = ""
+ }
+
+ if h, ok := parser.handlers[ctx.tagUname]; ok {
+ if err := h(&ctx); err != nil {
+ return nil, err
+ }
+ } else {
+ if strings.HasPrefix(ctx.tag.name, "'") && strings.HasSuffix(ctx.tag.name, "'") {
+ col.Name = ctx.tag.name[1 : len(ctx.tag.name)-1]
+ } else {
+ col.Name = ctx.tag.name
+ }
+ }
+
+ if ctx.hasCacheTag {
+ if parser.cacherMgr.GetDefaultCacher() != nil {
+ parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher())
+ } else {
+ parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000))
+ }
+ }
+ if ctx.hasNoCacheTag {
+ parser.cacherMgr.SetCacher(table.Name, nil)
+ }
+ }
+
+ if col.SQLType.Name == "" {
+ col.SQLType = schemas.Type2SQLType(field.Type)
+ }
+ parser.dialect.SQLType(col)
+ if col.Length == 0 {
+ col.Length = col.SQLType.DefaultLength
+ }
+ if col.Length2 == 0 {
+ col.Length2 = col.SQLType.DefaultLength2
+ }
+ if col.Name == "" {
+ col.Name = parser.columnMapper.Obj2Table(field.Name)
+ }
+
+ if ctx.isUnique {
+ ctx.indexNames[col.Name] = schemas.UniqueType
+ } else if ctx.isIndex {
+ ctx.indexNames[col.Name] = schemas.IndexType
+ }
+
+ for indexName, indexType := range ctx.indexNames {
+ addIndex(indexName, table, col, indexType)
+ }
+
+ return col, nil
+}
+
+func (parser *Parser) parseField(table *schemas.Table, fieldIndex int, field reflect.StructField, fieldValue reflect.Value) (*schemas.Column, error) {
+ var (
+ tag = field.Tag
+ ormTagStr = strings.TrimSpace(tag.Get(parser.identifier))
+ )
+ if ormTagStr == "-" {
+ return nil, ErrIgnoreField
+ }
+ if ormTagStr == "" {
+ return parser.parseFieldWithNoTag(fieldIndex, field, fieldValue)
+ }
+ tags, err := splitTag(ormTagStr)
+ if err != nil {
+ return nil, err
+ }
+ return parser.parseFieldWithTags(table, fieldIndex, field, fieldValue, tags)
+}
+
+func isNotTitle(n string) bool {
+ for _, c := range n {
+ return unicode.IsLower(c)
+ }
+ return true
+}
+
// Parse parses a struct as a table information
func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) {
t := v.Type()
table.Type = t
table.Name = names.GetTableName(parser.tableMapper, v)
- var idFieldColName string
- var hasCacheTag, hasNoCacheTag bool
-
for i := 0; i < t.NumField(); i++ {
- tag := t.Field(i).Tag
-
- ormTagStr := tag.Get(parser.identifier)
- var col *schemas.Column
- fieldValue := v.Field(i)
- fieldType := fieldValue.Type()
-
- if ormTagStr != "" {
- col = &schemas.Column{
- FieldName: t.Field(i).Name,
- Nullable: true,
- IsPrimaryKey: false,
- IsAutoIncrement: false,
- MapType: schemas.TWOSIDES,
- Indexes: make(map[string]int),
- DefaultIsEmpty: true,
- }
- tags := splitTag(ormTagStr)
-
- if len(tags) > 0 {
- if tags[0] == "-" {
- continue
- }
-
- var ctx = Context{
- table: table,
- col: col,
- fieldValue: fieldValue,
- indexNames: make(map[string]int),
- parser: parser,
- }
-
- if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") {
- pStart := strings.Index(tags[0], "(")
- if pStart > -1 && strings.HasSuffix(tags[0], ")") {
- var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool {
- return r == '\'' || r == '"'
- })
-
- ctx.params = []string{tagPrefix}
- }
-
- if err := ExtendsTagHandler(&ctx); err != nil {
- return nil, err
- }
- continue
- }
-
- for j, key := range tags {
- if ctx.ignoreNext {
- ctx.ignoreNext = false
- continue
- }
-
- k := strings.ToUpper(key)
- ctx.tagName = k
- ctx.params = []string{}
-
- pStart := strings.Index(k, "(")
- if pStart == 0 {
- return nil, errors.New("( could not be the first character")
- }
- if pStart > -1 {
- if !strings.HasSuffix(k, ")") {
- return nil, fmt.Errorf("field %s tag %s cannot match ) character", col.FieldName, key)
- }
-
- ctx.tagName = k[:pStart]
- ctx.params = strings.Split(key[pStart+1:len(k)-1], ",")
- }
-
- if j > 0 {
- ctx.preTag = strings.ToUpper(tags[j-1])
- }
- if j < len(tags)-1 {
- ctx.nextTag = tags[j+1]
- } else {
- ctx.nextTag = ""
- }
-
- if h, ok := parser.handlers[ctx.tagName]; ok {
- if err := h(&ctx); err != nil {
- return nil, err
- }
- } else {
- if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") {
- col.Name = key[1 : len(key)-1]
- } else {
- col.Name = key
- }
- }
-
- if ctx.hasCacheTag {
- hasCacheTag = true
- }
- if ctx.hasNoCacheTag {
- hasNoCacheTag = true
- }
- }
-
- if col.SQLType.Name == "" {
- col.SQLType = schemas.Type2SQLType(fieldType)
- }
- parser.dialect.SQLType(col)
- if col.Length == 0 {
- col.Length = col.SQLType.DefaultLength
- }
- if col.Length2 == 0 {
- col.Length2 = col.SQLType.DefaultLength2
- }
- if col.Name == "" {
- col.Name = parser.columnMapper.Obj2Table(t.Field(i).Name)
- }
-
- if ctx.isUnique {
- ctx.indexNames[col.Name] = schemas.UniqueType
- } else if ctx.isIndex {
- ctx.indexNames[col.Name] = schemas.IndexType
- }
-
- for indexName, indexType := range ctx.indexNames {
- addIndex(indexName, table, col, indexType)
- }
- }
- } else if fieldValue.CanSet() {
- var sqlType schemas.SQLType
- if fieldValue.CanAddr() {
- if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok {
- sqlType = schemas.SQLType{Name: schemas.Text}
- }
- }
- if _, ok := fieldValue.Interface().(convert.Conversion); ok {
- sqlType = schemas.SQLType{Name: schemas.Text}
- } else {
- sqlType = schemas.Type2SQLType(fieldType)
- }
- col = schemas.NewColumn(parser.columnMapper.Obj2Table(t.Field(i).Name),
- t.Field(i).Name, sqlType, sqlType.DefaultLength,
- sqlType.DefaultLength2, true)
-
- if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) {
- idFieldColName = col.Name
- }
- } else {
+ var field = t.Field(i)
+ if isNotTitle(field.Name) {
continue
}
- if col.IsAutoIncrement {
- col.Nullable = false
+
+ col, err := parser.parseField(table, i, field, v.Field(i))
+ if err == ErrIgnoreField {
+ continue
+ } else if err != nil {
+ return nil, err
}
table.AddColumn(col)
-
} // end for
- if idFieldColName != "" && len(table.PrimaryKeys) == 0 {
- col := table.GetColumn(idFieldColName)
- col.IsPrimaryKey = true
- col.IsAutoIncrement = true
- col.Nullable = false
- table.PrimaryKeys = append(table.PrimaryKeys, col.Name)
- table.AutoIncrement = col.Name
- }
-
- if hasCacheTag {
- if parser.cacherMgr.GetDefaultCacher() != nil { // !nash! use engine's cacher if provided
- //engine.logger.Info("enable cache on table:", table.Name)
- parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher())
- } else {
- //engine.logger.Info("enable LRU cache on table:", table.Name)
- parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000))
- }
- }
- if hasNoCacheTag {
- //engine.logger.Info("disable cache on table:", table.Name)
- parser.cacherMgr.SetCacher(table.Name, nil)
+ deletedColumn := table.DeletedColumn()
+ // check columns
+ if deletedColumn != nil {
+ deletedColumn.Nullable = true
}
return table, nil
"xorm.io/xorm/schemas"
)
-func splitTag(tag string) (tags []string) {
- tag = strings.TrimSpace(tag)
- var hasQuote = false
- var lastIdx = 0
- for i, t := range tag {
- if t == '\'' {
- hasQuote = !hasQuote
- } else if t == ' ' {
- if lastIdx < i && !hasQuote {
- tags = append(tags, strings.TrimSpace(tag[lastIdx:i]))
- lastIdx = i + 1
+type tag struct {
+ name string
+ params []string
+}
+
+func splitTag(tagStr string) ([]tag, error) {
+ tagStr = strings.TrimSpace(tagStr)
+ var (
+ inQuote bool
+ inBigQuote bool
+ lastIdx int
+ curTag tag
+ paramStart int
+ tags []tag
+ )
+ for i, t := range tagStr {
+ switch t {
+ case '\'':
+ inQuote = !inQuote
+ case ' ':
+ if !inQuote && !inBigQuote {
+ if lastIdx < i {
+ if curTag.name == "" {
+ curTag.name = tagStr[lastIdx:i]
+ }
+ tags = append(tags, curTag)
+ lastIdx = i + 1
+ curTag = tag{}
+ } else if lastIdx == i {
+ lastIdx = i + 1
+ }
+ } else if inBigQuote && !inQuote {
+ paramStart = i + 1
+ }
+ case ',':
+ if !inQuote && !inBigQuote {
+ return nil, fmt.Errorf("comma[%d] of %s should be in quote or big quote", i, tagStr)
+ }
+ if !inQuote && inBigQuote {
+ curTag.params = append(curTag.params, strings.TrimSpace(tagStr[paramStart:i]))
+ paramStart = i + 1
+ }
+ case '(':
+ inBigQuote = true
+ if !inQuote {
+ curTag.name = tagStr[lastIdx:i]
+ paramStart = i + 1
+ }
+ case ')':
+ inBigQuote = false
+ if !inQuote {
+ curTag.params = append(curTag.params, tagStr[paramStart:i])
}
}
}
- if lastIdx < len(tag) {
- tags = append(tags, strings.TrimSpace(tag[lastIdx:]))
+ if lastIdx < len(tagStr) {
+ if curTag.name == "" {
+ curTag.name = tagStr[lastIdx:]
+ }
+ tags = append(tags, curTag)
}
- return
+ return tags, nil
}
// Context represents a context for xorm tag parse.
type Context struct {
- tagName string
- params []string
+ tag
+ tagUname string
preTag, nextTag string
table *schemas.Table
col *schemas.Column
"CACHE": CacheTagHandler,
"NOCACHE": NoCacheTagHandler,
"COMMENT": CommentTagHandler,
+ "EXTENDS": ExtendsTagHandler,
}
)
// AutoIncrTagHandler describes autoincr tag handler
func AutoIncrTagHandler(ctx *Context) error {
ctx.col.IsAutoIncrement = true
+ ctx.col.Nullable = false
/*
if len(ctx.params) > 0 {
autoStartInt, err := strconv.Atoi(ctx.params[0])
// DeletedTagHandler describes deleted tag handler
func DeletedTagHandler(ctx *Context) error {
ctx.col.IsDeleted = true
+ ctx.col.Nullable = true
return nil
}
// SQLTypeTagHandler describes SQL Type tag handler
func SQLTypeTagHandler(ctx *Context) error {
- ctx.col.SQLType = schemas.SQLType{Name: ctx.tagName}
- if strings.EqualFold(ctx.tagName, "JSON") {
+ ctx.col.SQLType = schemas.SQLType{Name: ctx.tagUname}
+ if ctx.tagUname == "JSON" {
ctx.col.IsJSON = true
}
- if len(ctx.params) > 0 {
- if ctx.tagName == schemas.Enum {
- ctx.col.EnumOptions = make(map[string]int)
- for k, v := range ctx.params {
- v = strings.TrimSpace(v)
- v = strings.Trim(v, "'")
- ctx.col.EnumOptions[v] = k
+ if len(ctx.params) == 0 {
+ return nil
+ }
+
+ switch ctx.tagUname {
+ case schemas.Enum:
+ ctx.col.EnumOptions = make(map[string]int)
+ for k, v := range ctx.params {
+ v = strings.TrimSpace(v)
+ v = strings.Trim(v, "'")
+ ctx.col.EnumOptions[v] = k
+ }
+ case schemas.Set:
+ ctx.col.SetOptions = make(map[string]int)
+ for k, v := range ctx.params {
+ v = strings.TrimSpace(v)
+ v = strings.Trim(v, "'")
+ ctx.col.SetOptions[v] = k
+ }
+ default:
+ var err error
+ if len(ctx.params) == 2 {
+ ctx.col.Length, err = strconv.Atoi(ctx.params[0])
+ if err != nil {
+ return err
}
- } else if ctx.tagName == schemas.Set {
- ctx.col.SetOptions = make(map[string]int)
- for k, v := range ctx.params {
- v = strings.TrimSpace(v)
- v = strings.Trim(v, "'")
- ctx.col.SetOptions[v] = k
+ ctx.col.Length2, err = strconv.Atoi(ctx.params[1])
+ if err != nil {
+ return err
}
- } else {
- var err error
- if len(ctx.params) == 2 {
- ctx.col.Length, err = strconv.Atoi(ctx.params[0])
- if err != nil {
- return err
- }
- ctx.col.Length2, err = strconv.Atoi(ctx.params[1])
- if err != nil {
- return err
- }
- } else if len(ctx.params) == 1 {
- ctx.col.Length, err = strconv.Atoi(ctx.params[0])
- if err != nil {
- return err
- }
+ } else if len(ctx.params) == 1 {
+ ctx.col.Length, err = strconv.Atoi(ctx.params[0])
+ if err != nil {
+ return err
}
}
}
}
for _, col := range parentTable.Columns() {
col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName)
+ col.FieldIndex = append(ctx.col.FieldIndex, col.FieldIndex...)
var tagPrefix = ctx.col.FieldName
if len(ctx.params) > 0 {
col.Nullable = isPtr
- tagPrefix = ctx.params[0]
+ tagPrefix = strings.Trim(ctx.params[0], "'")
if col.IsPrimaryKey {
col.Name = ctx.col.FieldName
col.IsPrimaryKey = false
default:
//TODO: warning
}
- return nil
+ return ErrIgnoreField
}
// CacheTagHandler describes cache tag handler