2016-07-07 18:54:16 -07:00
|
|
|
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
|
|
|
|
//
|
2016-07-15 16:54:07 +01:00
|
|
|
// This software (Documize Community Edition) is licensed under
|
2016-07-07 18:54:16 -07:00
|
|
|
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
|
|
|
|
//
|
|
|
|
// You can operate outside the AGPL restrictions by purchasing
|
|
|
|
// Documize Enterprise Edition and obtaining a commercial license
|
2016-07-15 16:54:07 +01:00
|
|
|
// by contacting <sales@documize.com>.
|
2016-07-07 18:54:16 -07:00
|
|
|
//
|
|
|
|
// https://documize.com
|
|
|
|
|
|
|
|
package database
|
|
|
|
|
|
|
|
import (
|
2016-07-15 16:54:07 +01:00
|
|
|
"bytes"
|
2016-07-27 18:48:29 +01:00
|
|
|
"crypto/rand"
|
2016-07-15 16:54:07 +01:00
|
|
|
"database/sql"
|
2016-07-27 18:48:29 +01:00
|
|
|
"fmt"
|
|
|
|
"os"
|
2016-07-15 16:54:07 +01:00
|
|
|
"regexp"
|
2016-07-07 18:54:16 -07:00
|
|
|
"sort"
|
|
|
|
"strings"
|
2016-07-27 12:58:19 +01:00
|
|
|
"time"
|
2016-07-07 18:54:16 -07:00
|
|
|
|
2016-07-15 16:54:07 +01:00
|
|
|
"github.com/jmoiron/sqlx"
|
|
|
|
|
2016-07-20 15:58:37 +01:00
|
|
|
"github.com/documize/community/core/log"
|
|
|
|
"github.com/documize/community/core/utility"
|
2016-07-27 12:58:19 +01:00
|
|
|
"github.com/documize/community/core/web"
|
2016-07-07 18:54:16 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
const migrationsDir = "bindata/scripts"
|
|
|
|
|
|
|
|
// migrationsT holds a list of migration sql files to run.
|
|
|
|
type migrationsT []string
|
|
|
|
|
|
|
|
// migrations returns a list of the migrations to update the database as required for this version of the code.
|
|
|
|
func migrations(lastMigration string) (migrationsT, error) {
|
|
|
|
|
|
|
|
lastMigration = strings.TrimPrefix(strings.TrimSuffix(lastMigration, `"`), `"`)
|
|
|
|
|
|
|
|
//fmt.Println(`DEBUG Migrations("`+lastMigration+`")`)
|
|
|
|
|
|
|
|
files, err := web.AssetDir(migrationsDir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
sort.Strings(files)
|
|
|
|
|
|
|
|
ret := make(migrationsT, 0, len(files))
|
|
|
|
|
|
|
|
hadLast := false
|
|
|
|
|
2016-07-15 16:54:07 +01:00
|
|
|
if len(lastMigration) == 0 {
|
|
|
|
hadLast = true
|
|
|
|
}
|
|
|
|
|
2016-07-07 18:54:16 -07:00
|
|
|
for _, v := range files {
|
|
|
|
if v == lastMigration {
|
|
|
|
hadLast = true
|
|
|
|
} else {
|
|
|
|
if hadLast {
|
|
|
|
ret = append(ret, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//fmt.Println(`DEBUG Migrations("`+lastMigration+`")=`,ret)
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// migrate the database as required, by applying the migrations.
|
2016-07-15 16:54:07 +01:00
|
|
|
func (m migrationsT) migrate(tx *sqlx.Tx) error {
|
2016-07-07 18:54:16 -07:00
|
|
|
for _, v := range m {
|
2016-07-15 16:54:07 +01:00
|
|
|
log.Info("Processing migration file: " + v)
|
2016-07-28 14:13:07 +01:00
|
|
|
|
2016-07-07 18:54:16 -07:00
|
|
|
buf, err := web.Asset(migrationsDir + "/" + v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-28 14:13:07 +01:00
|
|
|
|
2016-07-15 16:54:07 +01:00
|
|
|
err = processSQLfile(tx, buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-28 14:13:07 +01:00
|
|
|
|
2016-07-15 16:54:07 +01:00
|
|
|
json := `{"database":"` + v + `"}`
|
|
|
|
sql := "INSERT INTO `config` (`key`,`config`) " +
|
|
|
|
"VALUES ('META','" + json +
|
|
|
|
"') ON DUPLICATE KEY UPDATE `config`='" + json + "';"
|
|
|
|
|
2016-07-28 14:13:07 +01:00
|
|
|
_, err = tx.Exec(sql) // add a record in the config file to say we have done the upgrade
|
2016-07-15 16:54:07 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-07-07 18:54:16 -07:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-27 18:48:29 +01:00
|
|
|
func lockDB() (bool, error) {
|
|
|
|
b := make([]byte, 2)
|
|
|
|
_, err := rand.Read(b)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2016-07-28 14:13:07 +01:00
|
|
|
wait := ((time.Duration(b[0]) << 8) | time.Duration(b[1])) * time.Millisecond / 10 // up to 6.5 secs wait
|
|
|
|
time.Sleep(wait)
|
2016-07-27 18:48:29 +01:00
|
|
|
|
|
|
|
tx, err := (*dbPtr).Beginx()
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = tx.Exec("LOCK TABLE `config` WRITE;")
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
_, err = tx.Exec("UNLOCK TABLES;")
|
|
|
|
log.IfErr(err)
|
|
|
|
log.IfErr(tx.Commit())
|
|
|
|
}()
|
|
|
|
|
|
|
|
_, err = tx.Exec("INSERT INTO `config` (`key`,`config`) " +
|
|
|
|
fmt.Sprintf(`VALUES ('DBLOCK','{"pid": "%d"}');`, os.Getpid()))
|
|
|
|
if err != nil {
|
2016-07-28 14:13:07 +01:00
|
|
|
// good error would be "Error 1062: Duplicate entry 'DBLOCK' for key 'idx_config_area'"
|
2016-07-27 18:48:29 +01:00
|
|
|
if strings.HasPrefix(err.Error(), "Error 1062:") {
|
|
|
|
log.Info("Database locked by annother Documize instance")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Database locked by this Documize instance")
|
|
|
|
return true, err // success!
|
|
|
|
}
|
|
|
|
|
|
|
|
func unlockDB() error {
|
|
|
|
tx, err := (*dbPtr).Beginx()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = tx.Exec("DELETE FROM `config` WHERE `key`='DBLOCK';")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
|
|
|
|
func migrateEnd(tx *sqlx.Tx, err error, amLeader bool) error {
|
|
|
|
if amLeader {
|
|
|
|
defer func() { log.IfErr(unlockDB()) }()
|
2016-07-28 14:13:07 +01:00
|
|
|
if tx != nil {
|
|
|
|
if err == nil {
|
|
|
|
log.IfErr(tx.Commit())
|
|
|
|
log.Info("Database checks: completed")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
log.IfErr(tx.Rollback())
|
2016-07-15 16:54:07 +01:00
|
|
|
}
|
2016-07-28 14:13:07 +01:00
|
|
|
log.Error("Database checks: failed: ", err)
|
|
|
|
return err
|
2016-07-15 16:54:07 +01:00
|
|
|
}
|
2016-07-28 14:13:07 +01:00
|
|
|
return nil // not the leader, so ignore errors
|
2016-07-15 16:54:07 +01:00
|
|
|
}
|
|
|
|
|
2016-07-27 12:58:19 +01:00
|
|
|
func getLastMigration(tx *sqlx.Tx) (lastMigration string, err error) {
|
|
|
|
var stmt *sql.Stmt
|
|
|
|
stmt, err = tx.Prepare("SELECT JSON_EXTRACT(`config`,'$.database') FROM `config` WHERE `key` = 'META';")
|
|
|
|
if err == nil {
|
|
|
|
defer utility.Close(stmt)
|
|
|
|
var item = make([]uint8, 0)
|
|
|
|
|
|
|
|
row := stmt.QueryRow()
|
|
|
|
|
|
|
|
err = row.Scan(&item)
|
|
|
|
if err == nil {
|
|
|
|
if len(item) > 1 {
|
|
|
|
q := []byte(`"`)
|
|
|
|
lastMigration = string(bytes.TrimPrefix(bytes.TrimSuffix(item, q), q))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-07-07 18:54:16 -07:00
|
|
|
// Migrate the database as required, consolidated action.
|
2016-07-27 18:48:29 +01:00
|
|
|
func Migrate(ConfigTableExists bool) error {
|
2016-07-27 12:58:19 +01:00
|
|
|
|
2016-07-27 18:48:29 +01:00
|
|
|
amLeader := false
|
2016-07-15 16:54:07 +01:00
|
|
|
|
2016-07-27 18:48:29 +01:00
|
|
|
if ConfigTableExists {
|
|
|
|
var err error
|
|
|
|
amLeader, err = lockDB()
|
|
|
|
log.IfErr(err)
|
|
|
|
} else {
|
|
|
|
amLeader = true // what else can you do?
|
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
|
|
|
tx, err := (*dbPtr).Beginx()
|
2016-07-07 18:54:16 -07:00
|
|
|
if err != nil {
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, err, amLeader)
|
2016-07-07 18:54:16 -07:00
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
2016-07-27 18:48:29 +01:00
|
|
|
lastMigration := ""
|
|
|
|
|
2016-07-15 16:54:07 +01:00
|
|
|
if ConfigTableExists {
|
2016-07-27 12:58:19 +01:00
|
|
|
lastMigration, err = getLastMigration(tx)
|
2016-07-15 16:54:07 +01:00
|
|
|
if err != nil {
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, err, amLeader)
|
2016-07-15 16:54:07 +01:00
|
|
|
}
|
2017-03-19 14:25:21 +00:00
|
|
|
log.Info("Database checks: last applied " + lastMigration)
|
2016-07-07 18:54:16 -07:00
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
|
|
|
mig, err := migrations(lastMigration)
|
2016-07-07 18:54:16 -07:00
|
|
|
if err != nil {
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, err, amLeader)
|
2016-07-15 16:54:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(mig) == 0 {
|
2017-03-19 14:25:21 +00:00
|
|
|
log.Info("Database checks: no updates required")
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, nil, amLeader) // no migrations to perform
|
2016-07-07 18:54:16 -07:00
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
2016-07-27 12:58:19 +01:00
|
|
|
if amLeader {
|
|
|
|
log.Info("Database checks: will execute the following update files: " + strings.Join([]string(mig), ", "))
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, mig.migrate(tx), amLeader)
|
2016-07-27 12:58:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// a follower instance
|
|
|
|
targetMigration := string(mig[len(mig)-1])
|
|
|
|
for targetMigration != lastMigration {
|
|
|
|
time.Sleep(time.Second)
|
2017-03-19 14:25:21 +00:00
|
|
|
log.Info("Waiting for database migration completion")
|
2016-07-27 18:48:29 +01:00
|
|
|
tx.Rollback() // ignore error
|
|
|
|
tx, err := (*dbPtr).Beginx() // need this in order to see the changed situation since last tx
|
2016-07-27 12:58:19 +01:00
|
|
|
if err != nil {
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, err, amLeader)
|
2016-07-27 12:58:19 +01:00
|
|
|
}
|
|
|
|
lastMigration, _ = getLastMigration(tx)
|
|
|
|
}
|
|
|
|
|
2016-07-27 18:48:29 +01:00
|
|
|
return migrateEnd(tx, nil, amLeader)
|
2016-07-15 16:54:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func processSQLfile(tx *sqlx.Tx, buf []byte) error {
|
|
|
|
|
|
|
|
stmts := getStatements(buf)
|
|
|
|
|
|
|
|
for _, stmt := range stmts {
|
|
|
|
|
|
|
|
_, err := tx.Exec(stmt)
|
|
|
|
if err != nil {
|
2016-07-07 18:54:16 -07:00
|
|
|
return err
|
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
2016-07-07 18:54:16 -07:00
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
2016-07-07 18:54:16 -07:00
|
|
|
return nil
|
|
|
|
}
|
2016-07-15 16:54:07 +01:00
|
|
|
|
|
|
|
// getStatement strips out the comments and returns all the individual SQL commands (apart from "USE") as a []string.
|
|
|
|
func getStatements(bytes []byte) []string {
|
|
|
|
/* Strip comments of the form '-- comment' or like this one */
|
|
|
|
stripped := regexp.MustCompile("(?s)--.*?\n|/\\*.*?\\*/").ReplaceAll(bytes, []byte("\n"))
|
|
|
|
sqls := strings.Split(string(stripped), ";")
|
|
|
|
ret := make([]string, 0, len(sqls))
|
|
|
|
for _, v := range sqls {
|
|
|
|
trimmed := strings.TrimSpace(v)
|
|
|
|
if len(trimmed) > 0 &&
|
|
|
|
!strings.HasPrefix(strings.ToUpper(trimmed), "USE ") { // make sure we don't USE the wrong database
|
|
|
|
ret = append(ret, trimmed+";")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|