1
0
Fork 0
mirror of https://github.com/documize/community.git synced 2025-07-19 05:09:42 +02:00

Make database install/upgrade provider aware

Ground work for installing and upgrading database schema based upon DB provider (MySQL, PostgresSQL, SQL Server, etc.)

Cleaned up legacy cruft, refactored, commented and made simpler for to support additional database providers.
This commit is contained in:
Harvey Kandola 2018-09-14 12:50:30 +01:00
parent cf1e1ff943
commit 2336dab69f
38 changed files with 1401 additions and 1208 deletions

View file

@ -32,9 +32,10 @@ copy core\database\templates\*.html embed\bindata
rd /s /q embed\bindata\scripts rd /s /q embed\bindata\scripts
mkdir embed\bindata\scripts mkdir embed\bindata\scripts
mkdir embed\bindata\scripts\mysql
echo "Copying database scripts folder" echo "Copying database scripts folder"
robocopy /e /NFL /NDL /NJH core\database\scripts\autobuild embed\bindata\scripts robocopy /e /NFL /NDL /NJH core\database\scripts\mysql embed\bindata\scripts\mysql
echo "Generating in-memory static assets..." echo "Generating in-memory static assets..."
go get -u github.com/jteeuwen/go-bindata/... go get -u github.com/jteeuwen/go-bindata/...

View file

@ -27,7 +27,8 @@ cp domain/mail/*.html embed/bindata/mail
cp core/database/templates/*.html embed/bindata cp core/database/templates/*.html embed/bindata
rm -rf embed/bindata/scripts rm -rf embed/bindata/scripts
mkdir -p embed/bindata/scripts mkdir -p embed/bindata/scripts
cp -r core/database/scripts/autobuild/*.sql embed/bindata/scripts mkdir -p embed/bindata/scripts/mysql
cp -r core/database/scripts/mysql/*.sql embed/bindata/scripts/mysql
echo "Generating in-memory static assets..." echo "Generating in-memory static assets..."
# go get -u github.com/jteeuwen/go-bindata/... # go get -u github.com/jteeuwen/go-bindata/...

View file

@ -140,33 +140,6 @@ func Check(runtime *env.Runtime) bool {
return true return true
} }
// GetSQLVariant uses database value form @@version_comment to deduce MySQL variant.
// func GetSQLVariant(dbType, vc string) env.DbVariant {
// vc = strings.ToLower(vc)
// dbType = strings.ToLower(dbType)
// // determine type from database
// if strings.Contains(vc, "mariadb") {
// return env.DBVariantMariaDB
// } else if strings.Contains(vc, "percona") {
// return env.DBVariantPercona
// } else if strings.Contains(vc, "mysql") {
// return env.DbVariantMySQL
// }
// // now determine type from command line switch
// if strings.Contains(dbType, "mariadb") {
// return env.DBVariantMariaDB
// } else if strings.Contains(dbType, "percona") {
// return env.DBVariantPercona
// } else if strings.Contains(dbType, "mysql") {
// return env.DbVariantMySQL
// }
// // horrid default could cause app to crash
// return env.DbVariantMySQL
// }
// GetSQLVersion returns SQL version as major,minor,patch numerics. // GetSQLVersion returns SQL version as major,minor,patch numerics.
func GetSQLVersion(v string) (ints []int, err error) { func GetSQLVersion(v string) (ints []int, err error) {
ints = []int{0, 0, 0} ints = []int{0, 0, 0}

View file

@ -11,7 +11,9 @@
package database package database
import "testing" import (
"testing"
)
// go test github.com/documize/community/core/database -run TestGetVersion // go test github.com/documize/community/core/database -run TestGetVersion
func TestGetVersion(t *testing.T) { func TestGetVersion(t *testing.T) {
@ -40,3 +42,20 @@ func ts2(t *testing.T, in string, out []int) {
} }
} }
} }
func TestDatabaseVersionLegacy(t *testing.T) {
i := extractVersionNumber("db_00021.sql")
if i != 21 {
t.Errorf("expected 21 got %d", i)
}
i = extractVersionNumber("db_000.sql")
if i != 0 {
t.Errorf("expected 0 got %d", i)
}
i = extractVersionNumber("26")
if i != 26 {
t.Errorf("expected 26 got %d", i)
}
}

170
core/database/installer.go Normal file
View file

@ -0,0 +1,170 @@
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
//
// This software (Documize Community Edition) is licensed under
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
//
// You can operate outside the AGPL restrictions by purchasing
// Documize Enterprise Edition and obtaining a commercial license
// by contacting <sales@documize.com>.
//
// https://documize.com
package database
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/documize/community/core/env"
"github.com/jmoiron/sqlx"
)
// InstallUpgrade creates new database or upgrades existing database.
func InstallUpgrade(runtime *env.Runtime, existingDB bool) (err error) {
amLeader := false
// Get all SQL scripts.
scripts, err := LoadScripts()
if err != nil {
runtime.Log.Error("Database: unable to load scripts", err)
return
}
// Filter out database specific scripts.
dbTypeScripts := SpecificScripts(runtime, scripts)
if len(dbTypeScripts) == 0 {
runtime.Log.Info(fmt.Sprintf("Database: unable to load scripts for database type %s", runtime.Storage.Type))
return
}
// Get current database version.
currentVersion := 0
if existingDB {
currentVersion, err = CurrentVersion(runtime)
if err != nil {
runtime.Log.Error("Database: unable to get current version", err)
return
}
runtime.Log.Info(fmt.Sprintf("Database: current version number is %d", currentVersion))
}
// Make a list of scripts to execute based upon current database state.
toProcess := []Script{}
for _, s := range dbTypeScripts {
if s.Version > currentVersion {
toProcess = append(toProcess, s)
}
}
if existingDB {
var err error
amLeader, err = Lock(runtime, len(toProcess))
if err != nil {
runtime.Log.Error("unable to lock DB", err)
}
} else {
// New installation hopes that you are only spinning up one instance of Documize.
// Assumption: nobody will perform the intial setup in a clustered environment.
amLeader = true
}
tx, err := runtime.Db.Beginx()
if err != nil {
return Unlock(runtime, tx, err, amLeader)
}
// If currently running process is database leader then we perform upgrade.
if amLeader {
runtime.Log.Info(fmt.Sprintf("Database: %d SQL scripts to process", len(toProcess)))
err = runScripts(runtime, tx, toProcess)
if err != nil {
runtime.Log.Error("Database: error processing SQL script", err)
}
return Unlock(runtime, tx, err, amLeader)
}
// If currently running process is a slave instance then we wait for migration to complete.
targetVersion := toProcess[len(toProcess)-1].Version
for targetVersion != currentVersion {
time.Sleep(time.Second)
runtime.Log.Info("Database: slave instance polling for upgrade process completion")
tx.Rollback()
// Get database version and check again.
currentVersion, err = CurrentVersion(runtime)
if err != nil {
return Unlock(runtime, tx, err, amLeader)
}
}
return Unlock(runtime, tx, nil, amLeader)
}
// Run SQL scripts to instal or upgrade this database.
func runScripts(runtime *env.Runtime, tx *sqlx.Tx, scripts []Script) (err error) {
// We can have multiple scripts as each Documize database change has it's own SQL script.
for _, script := range scripts {
runtime.Log.Info(fmt.Sprintf("Databasse: processing SQL version %d", script.Version))
err = executeSQL(tx, runtime.Storage.Type, script.Script)
if err != nil {
return err
}
// Record the fact we have processed this database script version.
_, err = tx.Exec(recordVersionUpgradeQuery(runtime.Storage.Type, script.Version))
if err != nil {
return err
}
}
return nil
}
// executeSQL runs specified SQL commands.
func executeSQL(tx *sqlx.Tx, v env.StoreType, SQLfile []byte) error {
// Turn SQL file contents into runnable SQL statements.
stmts := getStatements(SQLfile)
for _, stmt := range stmts {
// MariaDB has no specific JSON column type (but has JSON queries)
if v == env.StoreTypeMariaDB {
stmt = strings.Replace(stmt, "` JSON", "` TEXT", -1)
}
_, err := tx.Exec(stmt)
if err != nil {
return err
}
}
return nil
}
// getStatement strips out the comments and returns all the individual SQL commands (apart from "USE") as a []string.
func getStatements(bytes []byte) (stmts []string) {
// Strip comments of the form '-- comment' or like this one /**/
stripped := regexp.MustCompile("(?s)--.*?\n|/\\*.*?\\*/").ReplaceAll(bytes, []byte("\n"))
// Break into lines using ; terminator.
lines := strings.Split(string(stripped), ";")
// Prepare return data.
stmts = make([]string, 0, len(lines))
for _, v := range lines {
trimmed := strings.TrimSpace(v)
// Process non-empty lines and exclude "USE dbname" command
if len(trimmed) > 0 && !strings.HasPrefix(strings.ToUpper(trimmed), "USE ") {
stmts = append(stmts, trimmed+";")
}
}
return
}

222
core/database/leader.go Normal file
View file

@ -0,0 +1,222 @@
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
//
// This software (Documize Community Edition) is licensed under
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
//
// You can operate outside the AGPL restrictions by purchasing
// Documize Enterprise Edition and obtaining a commercial license
// by contacting <sales@documize.com>.
//
// https://documize.com
package database
import (
"crypto/rand"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/documize/community/core/env"
"github.com/jmoiron/sqlx"
)
// Lock will try to lock the database instance to the running process.
// Uses a "random" delay as a por man's database cluster-aware process.
// We skip delay if there are no scripts to process.
func Lock(runtime *env.Runtime, scriptsToProcess int) (bool, error) {
// Wait for random period of time.
b := make([]byte, 2)
_, err := rand.Read(b)
if err != nil {
return false, err
}
wait := ((time.Duration(b[0]) << 8) | time.Duration(b[1])) * time.Millisecond / 10 // up to 6.5 secs wait
// Why delay if nothing to process?
if scriptsToProcess > 0 {
time.Sleep(wait)
}
// Start transaction fotr lock process.
tx, err := runtime.Db.Beginx()
if err != nil {
return false, err
}
// Lock the database.
_, err = tx.Exec(processLockStartQuery(runtime.Storage.Type))
if err != nil {
return false, err
}
// Unlock the database at the end of this function.
defer func() {
_, err = tx.Exec(processLockFinishQuery(runtime.Storage.Type))
if err != nil {
runtime.Log.Error("Database: unable to unlock tables", err)
}
tx.Commit()
}()
// Try to record this process as leader of database migration process.
_, err = tx.Exec(insertProcessIDQuery(runtime.Storage.Type))
if err != nil {
runtime.Log.Info("Database: marked as slave process awaiting upgrade")
return false, nil
}
// We are the leader!
runtime.Log.Info("Database: marked as database upgrade process leader")
return true, err
}
// Unlock completes process that was started with Lock().
func Unlock(runtime *env.Runtime, tx *sqlx.Tx, err error, amLeader bool) error {
if amLeader {
defer func() {
doUnlock(runtime)
}()
if tx != nil {
if err == nil {
tx.Commit()
runtime.Log.Info("Database: ready")
return nil
}
tx.Rollback()
}
runtime.Log.Error("Database: install/upgrade failed", err)
return err
}
return nil // not the leader, so ignore errors
}
// CurrentVersion returns number that represents the current database version number.
// For example 23 represents the 23rd iteration of the database.
func CurrentVersion(runtime *env.Runtime) (version int, err error) {
row := runtime.Db.QueryRow(databaseVersionQuery(runtime.Storage.Type))
var currentVersion string
err = row.Scan(&currentVersion)
if err != nil {
currentVersion = "0"
}
return extractVersionNumber(currentVersion), nil
}
// Helper method for defer function called from Unlock().
func doUnlock(runtime *env.Runtime) error {
tx, err := runtime.Db.Beginx()
if err != nil {
return err
}
_, err = tx.Exec(deleteProcessIDQuery(runtime.Storage.Type))
if err != nil {
return err
}
return tx.Commit()
}
// processLockStartQuery returns database specific query that will
// LOCK the database to this running process.
func processLockStartQuery(t env.StoreType) string {
switch t {
case env.StoreTypeMySQL, env.StoreTypeMariaDB, env.StoreTypePercona:
return "LOCK TABLE `config` WRITE;"
case env.StoreTypePostgreSQL:
return ""
case env.StoreTypeMSSQL:
return ""
}
return ""
}
// processLockFinishQuery returns database specific query that will
// UNLOCK the database from this running process.
func processLockFinishQuery(t env.StoreType) string {
switch t {
case env.StoreTypeMySQL, env.StoreTypeMariaDB, env.StoreTypePercona:
return "UNLOCK TABLES;"
case env.StoreTypePostgreSQL:
return ""
case env.StoreTypeMSSQL:
return ""
}
return ""
}
// insertProcessIDQuery returns database specific query that will
// insert ID of this running process.
func insertProcessIDQuery(t env.StoreType) string {
return "INSERT INTO `config` (`key`,`config`) " + fmt.Sprintf(`VALUES ('DBLOCK','{"pid": "%d"}');`, os.Getpid())
}
// deleteProcessIDQuery returns database specific query that will
// delete ID of this running process.
func deleteProcessIDQuery(t env.StoreType) string {
return "DELETE FROM `config` WHERE `key`='DBLOCK';"
}
// recordVersionUpgradeQuery returns database specific insert statement
// that records the database version number
func recordVersionUpgradeQuery(t env.StoreType, version int) string {
// Make record that holds new database version number.
json := fmt.Sprintf("{\"database\": \"%d\"}", version)
switch t {
case env.StoreTypeMySQL, env.StoreTypeMariaDB, env.StoreTypePercona:
return "INSERT INTO `config` (`key`,`config`) " + "VALUES ('META','" + json + "') ON DUPLICATE KEY UPDATE `config`='" + json + "';"
case env.StoreTypePostgreSQL:
return ""
case env.StoreTypeMSSQL:
return ""
}
return ""
}
// databaseVersionQuery returns the schema version number.
func databaseVersionQuery(t env.StoreType) string {
switch t {
case env.StoreTypeMySQL, env.StoreTypeMariaDB, env.StoreTypePercona:
return "SELECT JSON_EXTRACT(`config`,'$.database') FROM `config` WHERE `key` = 'META';"
case env.StoreTypePostgreSQL:
return ""
case env.StoreTypeMSSQL:
return ""
}
return ""
}
// Turns legacy "db_00021.sql" and new "21" format into version number 21.
func extractVersionNumber(s string) int {
// Good practice in case of human tampering.
s = strings.TrimSpace(s)
s = strings.ToLower(s)
// Remove any quotes from JSON string.
s = strings.Replace(s, "\"", "", -1)
// Remove legacy version string formatting.
// We know just store the number.
s = strings.Replace(s, "db_000", "", 1)
s = strings.Replace(s, ".sql", "", 1)
i, err := strconv.Atoi(s)
if err != nil {
i = 0
}
return i
}

View file

@ -1,281 +0,0 @@
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
//
// This software (Documize Community Edition) is licensed under
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
//
// You can operate outside the AGPL restrictions by purchasing
// Documize Enterprise Edition and obtaining a commercial license
// by contacting <sales@documize.com>.
//
// https://documize.com
package database
import (
"bytes"
"crypto/rand"
"database/sql"
"fmt"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/documize/community/core/env"
"github.com/documize/community/core/streamutil"
"github.com/documize/community/server/web"
"github.com/jmoiron/sqlx"
)
const migrationsDir = "bindata/scripts"
// migrationsT holds a list of migration sql files to run.
type migrationsT []string
// migrations returns a list of the migrations to update the database as required for this version of the code.
func migrations(lastMigration string) (migrationsT, error) {
lastMigration = strings.TrimPrefix(strings.TrimSuffix(lastMigration, `"`), `"`)
files, err := web.AssetDir(migrationsDir)
if err != nil {
return nil, err
}
sort.Strings(files)
ret := make(migrationsT, 0, len(files))
hadLast := false
if len(lastMigration) == 0 {
hadLast = true
}
for _, v := range files {
if v == lastMigration {
hadLast = true
} else {
if hadLast {
ret = append(ret, v)
}
}
}
//fmt.Println(`DEBUG Migrations("`+lastMigration+`")=`,ret)
return ret, nil
}
// migrate the database as required, by applying the migrations.
func (m migrationsT) migrate(runtime *env.Runtime, tx *sqlx.Tx) error {
for _, v := range m {
runtime.Log.Info("Processing migration file: " + v)
buf, err := web.Asset(migrationsDir + "/" + v)
if err != nil {
return err
}
err = processSQLfile(tx, runtime.Storage.Type, buf)
if err != nil {
return err
}
json := `{"database":"` + v + `"}`
sql := "INSERT INTO `config` (`key`,`config`) " +
"VALUES ('META','" + json +
"') ON DUPLICATE KEY UPDATE `config`='" + json + "';"
_, err = tx.Exec(sql) // add a record in the config file to say we have done the upgrade
if err != nil {
return err
}
}
return nil
}
func lockDB(runtime *env.Runtime) (bool, error) {
b := make([]byte, 2)
_, err := rand.Read(b)
if err != nil {
return false, err
}
wait := ((time.Duration(b[0]) << 8) | time.Duration(b[1])) * time.Millisecond / 10 // up to 6.5 secs wait
time.Sleep(wait)
tx, err := runtime.Db.Beginx()
if err != nil {
return false, err
}
_, err = tx.Exec("LOCK TABLE `config` WRITE;")
if err != nil {
return false, err
}
defer func() {
_, err = tx.Exec("UNLOCK TABLES;")
if err != nil {
runtime.Log.Error("unable to unlock tables", err)
}
tx.Commit()
}()
_, err = tx.Exec("INSERT INTO `config` (`key`,`config`) " +
fmt.Sprintf(`VALUES ('DBLOCK','{"pid": "%d"}');`, os.Getpid()))
if err != nil {
// good error would be "Error 1062: Duplicate entry 'DBLOCK' for key 'idx_config_area'"
if strings.HasPrefix(err.Error(), "Error 1062:") {
runtime.Log.Info("Database locked by another Documize instance")
return false, nil
}
return false, err
}
runtime.Log.Info("Database locked by this Documize instance")
return true, err // success!
}
func unlockDB(rt *env.Runtime) error {
tx, err := rt.Db.Beginx()
if err != nil {
return err
}
_, err = tx.Exec("DELETE FROM `config` WHERE `key`='DBLOCK';")
if err != nil {
return err
}
return tx.Commit()
}
func migrateEnd(runtime *env.Runtime, tx *sqlx.Tx, err error, amLeader bool) error {
if amLeader {
defer func() { unlockDB(runtime) }()
if tx != nil {
if err == nil {
tx.Commit()
runtime.Log.Info("Database checks: completed")
return nil
}
tx.Rollback()
}
runtime.Log.Error("Database checks: failed: ", err)
return err
}
return nil // not the leader, so ignore errors
}
func getLastMigration(tx *sqlx.Tx) (lastMigration string, err error) {
var stmt *sql.Stmt
stmt, err = tx.Prepare("SELECT JSON_EXTRACT(`config`,'$.database') FROM `config` WHERE `key` = 'META';")
if err == nil {
defer streamutil.Close(stmt)
var item = make([]uint8, 0)
row := stmt.QueryRow()
err = row.Scan(&item)
if err == nil {
if len(item) > 1 {
q := []byte(`"`)
lastMigration = string(bytes.TrimPrefix(bytes.TrimSuffix(item, q), q))
}
}
}
return
}
// Migrate the database as required, consolidated action.
func Migrate(runtime *env.Runtime, ConfigTableExists bool) error {
amLeader := false
if ConfigTableExists {
var err error
amLeader, err = lockDB(runtime)
if err != nil {
runtime.Log.Error("unable to lock DB", err)
}
} else {
amLeader = true // what else can you do?
}
tx, err := runtime.Db.Beginx()
if err != nil {
return migrateEnd(runtime, tx, err, amLeader)
}
lastMigration := ""
if ConfigTableExists {
lastMigration, err = getLastMigration(tx)
if err != nil {
return migrateEnd(runtime, tx, err, amLeader)
}
runtime.Log.Info("Database checks: last applied " + lastMigration)
}
mig, err := migrations(lastMigration)
if err != nil {
return migrateEnd(runtime, tx, err, amLeader)
}
if len(mig) == 0 {
runtime.Log.Info("Database checks: no updates required")
return migrateEnd(runtime, tx, nil, amLeader) // no migrations to perform
}
if amLeader {
runtime.Log.Info("Database checks: will execute the following update files: " + strings.Join([]string(mig), ", "))
return migrateEnd(runtime, tx, mig.migrate(runtime, tx), amLeader)
}
// a follower instance
targetMigration := string(mig[len(mig)-1])
for targetMigration != lastMigration {
time.Sleep(time.Second)
runtime.Log.Info("Waiting for database migration completion")
tx.Rollback() // ignore error
tx, err := runtime.Db.Beginx() // need this in order to see the changed situation since last tx
if err != nil {
return migrateEnd(runtime, tx, err, amLeader)
}
lastMigration, _ = getLastMigration(tx)
}
return migrateEnd(runtime, tx, nil, amLeader)
}
func processSQLfile(tx *sqlx.Tx, v env.StoreType, buf []byte) error {
stmts := getStatements(buf)
for _, stmt := range stmts {
// MariaDB has no specific JSON column type (but has JSON queries)
if v == env.StoreTypeMariaDB {
stmt = strings.Replace(stmt, "` JSON", "` TEXT", -1)
}
_, err := tx.Exec(stmt)
if err != nil {
return err
}
}
return nil
}
// getStatement strips out the comments and returns all the individual SQL commands (apart from "USE") as a []string.
func getStatements(bytes []byte) []string {
/* Strip comments of the form '-- comment' or like this one */
stripped := regexp.MustCompile("(?s)--.*?\n|/\\*.*?\\*/").ReplaceAll(bytes, []byte("\n"))
sqls := strings.Split(string(stripped), ";")
ret := make([]string, 0, len(sqls))
for _, v := range sqls {
trimmed := strings.TrimSpace(v)
if len(trimmed) > 0 &&
!strings.HasPrefix(strings.ToUpper(trimmed), "USE ") { // make sure we don't USE the wrong database
ret = append(ret, trimmed+";")
}
}
return ret
}

80
core/database/scripts.go Normal file
View file

@ -0,0 +1,80 @@
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
//
// This software (Documize Community Edition) is licensed under
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
//
// You can operate outside the AGPL restrictions by purchasing
// Documize Enterprise Edition and obtaining a commercial license
// by contacting <sales@documize.com>.
//
// https://documize.com
package database
import (
"fmt"
"github.com/documize/community/core/env"
"sort"
"github.com/documize/community/server/web"
)
// Scripts holds all .SQL files for all supported database providers.
type Scripts struct {
MySQLScripts []Script
PostgresSQLScripts []Script
SQLServerScripts []Script
}
// Script holds SQL script and it's associated version number.
type Script struct {
Version int
Script []byte
}
// LoadScripts returns .SQL scripts for supported database providers.
func LoadScripts() (s Scripts, err error) {
assetDir := "bindata/scripts"
// MySQL
s.MySQLScripts, err = loadFiles(fmt.Sprintf("%s/mysql", assetDir))
if err != nil {
return
}
return s, nil
}
// SpecificScripts returns SQL scripts for current databasse provider.
func SpecificScripts(runtime *env.Runtime, all Scripts) (s []Script) {
switch runtime.Storage.Type {
case env.StoreTypeMySQL, env.StoreTypeMariaDB, env.StoreTypePercona:
return all.MySQLScripts
case env.StoreTypePostgreSQL:
return all.PostgresSQLScripts
case env.StoreTypeMSSQL:
return all.SQLServerScripts
}
return
}
// loadFiles returns all SQL scripts in specified folder as [][]byte.
func loadFiles(path string) (b []Script, err error) {
buf := []byte{}
scripts, err := web.AssetDir(path)
if err != nil {
return
}
sort.Strings(scripts)
for _, file := range scripts {
buf, err = web.Asset(fmt.Sprintf("%s/%s", path, file))
if err != nil {
return
}
b = append(b, Script{Version: extractVersionNumber(file), Script: buf})
}
return b, nil
}

View file

@ -89,7 +89,7 @@ func (h *Handler) Setup(w http.ResponseWriter, r *http.Request) {
return return
} }
if err = Migrate(h.Runtime, false /* no tables exist yet */); err != nil { if err = InstallUpgrade(h.Runtime, false); err != nil {
h.Runtime.Log.Error("database.Setup migrate", err) h.Runtime.Log.Error("database.Setup migrate", err)
return return
} }

View file

@ -50,7 +50,6 @@ func (s Scope) Add(ctx domain.RequestContext, model page.NewPage) (err error) {
row := s.Runtime.Db.QueryRow("SELECT max(sequence) FROM page WHERE orgid=? AND documentid=?", ctx.OrgID, model.Page.DocumentID) row := s.Runtime.Db.QueryRow("SELECT max(sequence) FROM page WHERE orgid=? AND documentid=?", ctx.OrgID, model.Page.DocumentID)
var maxSeq float64 var maxSeq float64
err = row.Scan(&maxSeq) err = row.Scan(&maxSeq)
if err != nil { if err != nil {
maxSeq = 2048 maxSeq = 2048
} }

View file

@ -19,6 +19,7 @@ import (
"github.com/documize/community/core/env" "github.com/documize/community/core/env"
"github.com/documize/community/core/secrets" "github.com/documize/community/core/secrets"
"github.com/documize/community/domain" "github.com/documize/community/domain"
"github.com/documize/community/edition/storage"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
) )
@ -50,13 +51,19 @@ func InitRuntime(r *env.Runtime, s *domain.Store) bool {
switch r.Flags.DBType { switch r.Flags.DBType {
case "mysql": case "mysql":
r.Storage = env.StoreProvider{Type: env.StoreTypeMySQL, DriverName: "mysql"} r.Storage = env.StoreProvider{Type: env.StoreTypeMySQL, DriverName: "mysql"}
StoreMySQL(r, s) storage.SetMySQLProvider(r, s)
case "mariadb": case "mariadb":
r.Storage = env.StoreProvider{Type: env.StoreTypeMariaDB, DriverName: "mysql"} r.Storage = env.StoreProvider{Type: env.StoreTypeMariaDB, DriverName: "mysql"}
StoreMySQL(r, s) storage.SetMySQLProvider(r, s)
case "percona": case "percona":
r.Storage = env.StoreProvider{Type: env.StoreTypePercona, DriverName: "mysql"} r.Storage = env.StoreProvider{Type: env.StoreTypePercona, DriverName: "mysql"}
StoreMySQL(r, s) storage.SetMySQLProvider(r, s)
case "pggg":
r.Storage = env.StoreProvider{Type: env.StoreTypePercona, DriverName: "pgggggg"}
// storage.SetPostgresSQLProvider(r, s)
case "mssql":
r.Storage = env.StoreProvider{Type: env.StoreTypePercona, DriverName: "sqlserver"}
// storage.SetSQLServerProvider(r, s)
} }
// Open connection to database // Open connection to database
@ -77,7 +84,7 @@ func InitRuntime(r *env.Runtime, s *domain.Store) bool {
// Go into setup mode if required. // Go into setup mode if required.
if r.Flags.SiteMode != env.SiteModeOffline { if r.Flags.SiteMode != env.SiteModeOffline {
if database.Check(r) { if database.Check(r) {
if err := database.Migrate(r, true /* the config table exists */); err != nil { if err := database.InstallUpgrade(r, true); err != nil {
r.Log.Error("unable to run database migration", err) r.Log.Error("unable to run database migration", err)
return false return false
} }

View file

@ -9,8 +9,8 @@
// //
// https://documize.com // https://documize.com
// Package boot prepares runtime environment. // Package storage sets up database persistence providers.
package boot package storage
import ( import (
"github.com/documize/community/core/env" "github.com/documize/community/core/env"
@ -35,8 +35,8 @@ import (
user "github.com/documize/community/domain/user/mysql" user "github.com/documize/community/domain/user/mysql"
) )
// StoreMySQL creates MySQL provider // SetMySQLProvider creates MySQL provider
func StoreMySQL(r *env.Runtime, s *domain.Store) { func SetMySQLProvider(r *env.Runtime, s *domain.Store) {
// Required connection string parameters and defaults. // Required connection string parameters and defaults.
r.Storage.Params = map[string]string{ r.Storage.Params = map[string]string{
"charset": "utf8mb4", "charset": "utf8mb4",

File diff suppressed because one or more lines are too long