mirror of
https://github.com/documize/community.git
synced 2025-07-19 05:09:42 +02:00
[WIP] Backup process outline
This commit is contained in:
parent
8bbb0d3e82
commit
4094677792
18 changed files with 678 additions and 220 deletions
314
domain/backup/backup.go
Normal file
314
domain/backup/backup.go
Normal file
|
@ -0,0 +1,314 @@
|
||||||
|
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
|
||||||
|
//
|
||||||
|
// This software (Documize Community Edition) is licensed under
|
||||||
|
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
|
||||||
|
//
|
||||||
|
// You can operate outside the AGPL restrictions by purchasing
|
||||||
|
// Documize Enterprise Edition and obtaining a commercial license
|
||||||
|
// by contacting <sales@documize.com>.
|
||||||
|
//
|
||||||
|
// https://documize.com
|
||||||
|
|
||||||
|
// Package backup handle data backup/restore to/from ZIP format.
|
||||||
|
package backup
|
||||||
|
|
||||||
|
// The backup process can be told to export all data or just for the
|
||||||
|
// current organization (tenant).
|
||||||
|
//
|
||||||
|
// Selected data is marshalled to JSON format and then zipped up
|
||||||
|
// into a single file on the server. The resultant file is then sent
|
||||||
|
// to the caller (e.g. web browser) as a file download. Unless specified,
|
||||||
|
// the file is deleted at the end of the process.
|
||||||
|
//
|
||||||
|
// The backup file contains a manifest file that describes the backup.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/documize/community/core/env"
|
||||||
|
"github.com/documize/community/core/uniqueid"
|
||||||
|
"github.com/documize/community/domain"
|
||||||
|
"github.com/documize/community/domain/store"
|
||||||
|
"github.com/documize/community/model/account"
|
||||||
|
m "github.com/documize/community/model/backup"
|
||||||
|
"github.com/documize/community/model/group"
|
||||||
|
"github.com/documize/community/model/org"
|
||||||
|
"github.com/documize/community/model/space"
|
||||||
|
"github.com/documize/community/model/user"
|
||||||
|
uuid "github.com/nu7hatch/gouuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler contains the runtime information such as logging and database.
|
||||||
|
type backerHandler struct {
|
||||||
|
Runtime *env.Runtime
|
||||||
|
Store *store.Store
|
||||||
|
Spec m.ExportSpec
|
||||||
|
Context domain.RequestContext
|
||||||
|
}
|
||||||
|
|
||||||
|
// Represents backup file.
|
||||||
|
type backupItem struct {
|
||||||
|
Filename, Content string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export data to JSON format, indented to look nice.
|
||||||
|
func toJSON(v interface{}) (string, error) {
|
||||||
|
j, err := json.MarshalIndent(v, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(j), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateBackup produces ZIP file of specified content.GenerateBackup
|
||||||
|
// File is located at the same location as the running program.
|
||||||
|
// NOTE: it is up to the caller to remove the file from disk.
|
||||||
|
func (b backerHandler) GenerateBackup() (filename string, err error) {
|
||||||
|
// As precaution we first generate short string first.
|
||||||
|
var id = uniqueid.Generate()
|
||||||
|
newUUID, err := uuid.NewV4()
|
||||||
|
if err == nil {
|
||||||
|
id = newUUID.String()
|
||||||
|
}
|
||||||
|
filename = fmt.Sprintf("dmz-backup-%s.zip", id)
|
||||||
|
|
||||||
|
bf, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer bf.Close()
|
||||||
|
|
||||||
|
// Create a zip writer on the file write
|
||||||
|
zw := zip.NewWriter(bf)
|
||||||
|
|
||||||
|
// Get the files to write to the ZIP file.
|
||||||
|
files, err := b.produce(id)
|
||||||
|
if err != nil {
|
||||||
|
return filename, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write backup data to zip file on disk.
|
||||||
|
for _, file := range files {
|
||||||
|
fileWriter, e2 := zw.Create(file.Filename)
|
||||||
|
if e2 != nil {
|
||||||
|
return filename, e2
|
||||||
|
}
|
||||||
|
_, e2 = fileWriter.Write([]byte(file.Content))
|
||||||
|
if err != nil {
|
||||||
|
return filename, e2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close out process.
|
||||||
|
err = zw.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return filename, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manifest describes envrionement of backup source.
|
||||||
|
func (b backerHandler) getManifest(id string) (string, error) {
|
||||||
|
m := m.Manifest{
|
||||||
|
ID: id,
|
||||||
|
Edition: b.Runtime.Product.Edition,
|
||||||
|
Version: b.Runtime.Product.Version,
|
||||||
|
Major: b.Runtime.Product.Major,
|
||||||
|
Minor: b.Runtime.Product.Minor,
|
||||||
|
Patch: b.Runtime.Product.Patch,
|
||||||
|
Revision: b.Runtime.Product.Revision,
|
||||||
|
StoreType: b.Runtime.StoreProvider.Type(),
|
||||||
|
Created: time.Now().UTC(),
|
||||||
|
OrgID: b.Spec.OrgID,
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := toJSON(m)
|
||||||
|
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Produce collection of files to be included in backup file.
|
||||||
|
func (b backerHandler) produce(id string) (files []backupItem, err error) {
|
||||||
|
// Backup manifest
|
||||||
|
c, err := b.getManifest(id)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
files = append(files, backupItem{Filename: "manifest.json", Content: c})
|
||||||
|
|
||||||
|
// Organization
|
||||||
|
err = b.dmzOrg(&files)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// User, Account
|
||||||
|
err = b.dmzUserAccount(&files)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group, Member
|
||||||
|
err = b.dmzGroup(&files)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Space
|
||||||
|
err = b.dmzSpace(&files)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Organization.
|
||||||
|
func (b backerHandler) dmzOrg(files *[]backupItem) (err error) {
|
||||||
|
w := ""
|
||||||
|
if !b.Spec.SystemBackup() {
|
||||||
|
w = fmt.Sprintf(" WHERE c_refid='%s' ", b.Spec.OrgID)
|
||||||
|
}
|
||||||
|
|
||||||
|
o := []org.Organization{}
|
||||||
|
err = b.Runtime.Db.Select(&o, `SELECT id, c_refid AS refid,
|
||||||
|
c_title AS title, c_message AS message, c_domain AS domain,
|
||||||
|
c_service AS conversionendpoint, c_email AS email, c_serial AS serial, c_active AS active,
|
||||||
|
c_anonaccess AS allowanonymousaccess, c_authprovider AS authprovider,
|
||||||
|
coalesce(c_authconfig,`+b.Runtime.StoreProvider.JSONEmpty()+`) AS authconfig, c_maxtags AS maxtags,
|
||||||
|
c_created AS created, c_revised AS revised
|
||||||
|
FROM dmz_org`+w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := toJSON(o)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*files = append(*files, backupItem{Filename: "dmz_org.json", Content: content})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// User, Account.
|
||||||
|
func (b backerHandler) dmzUserAccount(files *[]backupItem) (err error) {
|
||||||
|
w := ""
|
||||||
|
if !b.Spec.SystemBackup() {
|
||||||
|
w = fmt.Sprintf(" , dmz_user_account a WHERE u.c_refid=a.c_userid AND a.c_orgid='%s' ", b.Spec.OrgID)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := []user.User{}
|
||||||
|
err = b.Runtime.Db.Select(&u, `SELECT u.id, u.c_refid AS refid,
|
||||||
|
u.c_firstname AS firstname, u.c_lastname AS lastname, u.c_email AS email,
|
||||||
|
u.c_initials AS initials, u.c_globaladmin AS globaladmin,
|
||||||
|
u.c_password AS password, u.c_salt AS salt, u.c_reset AS reset, u.c_lastversion AS lastversion,
|
||||||
|
u.c_created AS created, u.c_revised AS revised
|
||||||
|
FROM dmz_user u`+w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := toJSON(u)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*files = append(*files, backupItem{Filename: "dmz_user.json", Content: content})
|
||||||
|
|
||||||
|
w = ""
|
||||||
|
if !b.Spec.SystemBackup() {
|
||||||
|
w = fmt.Sprintf(" WHERE c_orgid='%s' ", b.Spec.OrgID)
|
||||||
|
}
|
||||||
|
acc := []account.Account{}
|
||||||
|
err = b.Runtime.Db.Select(&acc, `SELECT id, c_refid AS refid, c_orgid AS orgid, c_userid AS userid,
|
||||||
|
c_editor AS editor, c_admin AS admin, c_users AS users, c_analytics AS analytics,
|
||||||
|
c_active AS active, c_created AS created, c_revised AS revised
|
||||||
|
FROM dmz_user_account`+w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err = toJSON(acc)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*files = append(*files, backupItem{Filename: "dmz_user_account.json", Content: content})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Group, Group Member.
|
||||||
|
func (b backerHandler) dmzGroup(files *[]backupItem) (err error) {
|
||||||
|
w := ""
|
||||||
|
if !b.Spec.SystemBackup() {
|
||||||
|
w = fmt.Sprintf(" WHERE c_orgid='%s' ", b.Spec.OrgID)
|
||||||
|
}
|
||||||
|
|
||||||
|
g := []group.Group{}
|
||||||
|
err = b.Runtime.Db.Select(&g, `
|
||||||
|
SELECT id, c_refid AS refid,
|
||||||
|
c_orgid AS orgid, c_name AS name, c_desc AS purpose,
|
||||||
|
c_created AS created, c_revised AS revised
|
||||||
|
FROM dmz_group`+w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := toJSON(g)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*files = append(*files, backupItem{Filename: "dmz_group.json", Content: content})
|
||||||
|
|
||||||
|
w = ""
|
||||||
|
if !b.Spec.SystemBackup() {
|
||||||
|
w = fmt.Sprintf(" WHERE c_orgid='%s' ", b.Spec.OrgID)
|
||||||
|
}
|
||||||
|
gm := []group.Member{}
|
||||||
|
err = b.Runtime.Db.Select(&gm, `
|
||||||
|
SELECT id, c_orgid AS orgid, c_groupid AS groupid, c_userid AS userid
|
||||||
|
FROM dmz_group_member`+w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err = toJSON(gm)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*files = append(*files, backupItem{Filename: "dmz_group_member.json", Content: content})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Space.
|
||||||
|
func (b backerHandler) dmzSpace(files *[]backupItem) (err error) {
|
||||||
|
w := ""
|
||||||
|
if !b.Spec.SystemBackup() {
|
||||||
|
w = fmt.Sprintf(" WHERE c_orgid='%s' ", b.Spec.OrgID)
|
||||||
|
}
|
||||||
|
|
||||||
|
sp := []space.Space{}
|
||||||
|
err = b.Runtime.Db.Select(&sp, `SELECT id, c_refid AS refid,
|
||||||
|
c_name AS name, c_orgid AS orgid, c_userid AS userid,
|
||||||
|
c_type AS type, c_lifecycle AS lifecycle, c_likes AS likes,
|
||||||
|
c_created AS created, c_revised AS revised
|
||||||
|
FROM dmz_space`+w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := toJSON(sp)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*files = append(*files, backupItem{Filename: "dmz_space.json", Content: content})
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
|
@ -9,11 +9,29 @@
|
||||||
//
|
//
|
||||||
// https://documize.com
|
// https://documize.com
|
||||||
|
|
||||||
|
// Package backup handle data backup/restore to/from ZIP format.
|
||||||
package backup
|
package backup
|
||||||
|
|
||||||
|
// Documize data is all held in the SQL database in relational format.
|
||||||
|
// The objective is to export the data into a compressed file that
|
||||||
|
// can be restored again as required.
|
||||||
|
//
|
||||||
|
// This allows for the following scenarios to be supported:
|
||||||
|
//
|
||||||
|
// 1. Copying data from one Documize instance to another.
|
||||||
|
// 2. Changing database provider (e.g. from MySQL to PostgreSQL).
|
||||||
|
// 3. Moving between Documize Cloud and self-hosted instances.
|
||||||
|
// 4. GDPR compliance (send copy of data and nuke whatever remains).
|
||||||
|
// 5. Setting up sample Documize instance with pre-defined content.
|
||||||
|
//
|
||||||
|
// The initial implementation is restricted to tenant or global
|
||||||
|
// backup/restore operations and can only be performed by a verified
|
||||||
|
// Global Administrator.
|
||||||
|
//
|
||||||
|
// In future the process should be able to support per space backup/restore
|
||||||
|
// operations. This is subject to further review.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -23,10 +41,10 @@ import (
|
||||||
"github.com/documize/community/core/env"
|
"github.com/documize/community/core/env"
|
||||||
"github.com/documize/community/core/response"
|
"github.com/documize/community/core/response"
|
||||||
"github.com/documize/community/core/streamutil"
|
"github.com/documize/community/core/streamutil"
|
||||||
"github.com/documize/community/core/uniqueid"
|
|
||||||
"github.com/documize/community/domain"
|
"github.com/documize/community/domain"
|
||||||
indexer "github.com/documize/community/domain/search"
|
indexer "github.com/documize/community/domain/search"
|
||||||
"github.com/documize/community/domain/store"
|
"github.com/documize/community/domain/store"
|
||||||
|
m "github.com/documize/community/model/backup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Handler contains the runtime information such as logging and database.
|
// Handler contains the runtime information such as logging and database.
|
||||||
|
@ -57,7 +75,7 @@ func (h *Handler) Backup(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
spec := backupSpec{}
|
spec := m.ExportSpec{}
|
||||||
err = json.Unmarshal(body, &spec)
|
err = json.Unmarshal(body, &spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
response.WriteBadRequestError(w, method, err.Error())
|
response.WriteBadRequestError(w, method, err.Error())
|
||||||
|
@ -65,31 +83,36 @@ func (h *Handler) Backup(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// data, err := backup(ctx, *h.Store, spec)
|
bh := backerHandler{Runtime: h.Runtime, Store: h.Store, Context: ctx, Spec: spec}
|
||||||
// if err != nil {
|
|
||||||
// response.WriteServerError(w, method, err)
|
|
||||||
// h.Runtime.Log.Error(method, err)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Filename is current timestamp
|
// Produce zip file on disk.
|
||||||
fn := fmt.Sprintf("dmz-backup-%s.zip", uniqueid.Generate())
|
filename, err := bh.GenerateBackup()
|
||||||
|
|
||||||
ziptest(fn)
|
|
||||||
|
|
||||||
bb, err := ioutil.ReadFile(fn)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
response.WriteServerError(w, method, err)
|
response.WriteServerError(w, method, err)
|
||||||
h.Runtime.Log.Error(method, err)
|
h.Runtime.Log.Error(method, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/zip")
|
// Read backup file into memory.
|
||||||
w.Header().Set("Content-Disposition", `attachment; filename="`+fn+`" ; `+`filename*="`+fn+`"`)
|
// DEBT: write file directly to HTTP response stream?
|
||||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bb)))
|
bk, err := ioutil.ReadFile(filename)
|
||||||
w.Header().Set("x-documize-filename", fn)
|
if err != nil {
|
||||||
|
response.WriteServerError(w, method, err)
|
||||||
|
h.Runtime.Log.Error(method, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
x, err := w.Write(bb)
|
// Standard HTTP headers.
|
||||||
|
w.Header().Set("Content-Type", "application/zip")
|
||||||
|
w.Header().Set("Content-Disposition", `attachment; filename="`+filename+`" ; `+`filename*="`+filename+`"`)
|
||||||
|
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bk)))
|
||||||
|
// Custom HTTP header helps API consumer to extract backup filename cleanly
|
||||||
|
// instead of parsing 'Content-Disposition' header.
|
||||||
|
// This HTTP header is CORS white-listed.
|
||||||
|
w.Header().Set("x-documize-filename", filename)
|
||||||
|
|
||||||
|
// Write backup to response stream.
|
||||||
|
x, err := w.Write(bk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
response.WriteServerError(w, method, err)
|
response.WriteServerError(w, method, err)
|
||||||
h.Runtime.Log.Error(method, err)
|
h.Runtime.Log.Error(method, err)
|
||||||
|
@ -97,90 +120,10 @@ func (h *Handler) Backup(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
h.Runtime.Log.Info(fmt.Sprintf("Backup completed for %s by %s, size %d", ctx.OrgID, ctx.UserID, x))
|
h.Runtime.Log.Info(fmt.Sprintf("Backup completed for %s by %s, size %d", ctx.OrgID, ctx.UserID, x))
|
||||||
}
|
|
||||||
|
|
||||||
type backupSpec struct {
|
// Delete backup file if not requested to keep it.
|
||||||
}
|
if !spec.Retain {
|
||||||
|
os.Remove(filename)
|
||||||
func backup(ctx domain.RequestContext, s store.Store, spec backupSpec) (file []byte, err error) {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
zw := zip.NewWriter(buf)
|
|
||||||
|
|
||||||
// Add some files to the archive.
|
|
||||||
var files = []struct {
|
|
||||||
Name, Body string
|
|
||||||
}{
|
|
||||||
{"readme.txt", "This archive contains some text files."},
|
|
||||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
|
||||||
{"todo.txt", "Get animal handling licence.\nWrite more examples."},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
f, err := zw.Create(file.Name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.Write([]byte(file.Body))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure to check the error on Close.
|
|
||||||
err = zw.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ziptest(filename string) {
|
|
||||||
// Create a file to write the archive buffer to
|
|
||||||
// Could also use an in memory buffer.
|
|
||||||
outFile, err := os.Create(filename)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
|
||||||
defer outFile.Close()
|
|
||||||
|
|
||||||
// Create a zip writer on top of the file writer
|
|
||||||
zipWriter := zip.NewWriter(outFile)
|
|
||||||
|
|
||||||
// Add files to archive
|
|
||||||
// We use some hard coded data to demonstrate,
|
|
||||||
// but you could iterate through all the files
|
|
||||||
// in a directory and pass the name and contents
|
|
||||||
// of each file, or you can take data from your
|
|
||||||
// program and write it write in to the archive
|
|
||||||
// without
|
|
||||||
var filesToArchive = []struct {
|
|
||||||
Name, Body string
|
|
||||||
}{
|
|
||||||
{"test.txt", "String contents of file"},
|
|
||||||
{"test2.txt", "\x61\x62\x63\n"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create and write files to the archive, which in turn
|
|
||||||
// are getting written to the underlying writer to the
|
|
||||||
// .zip file we created at the beginning
|
|
||||||
for _, file := range filesToArchive {
|
|
||||||
fileWriter, err := zipWriter.Create(file.Name)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
|
||||||
_, err = fileWriter.Write([]byte(file.Body))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up
|
|
||||||
err = zipWriter.Close()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
20
domain/backup/restore.go
Normal file
20
domain/backup/restore.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
|
||||||
|
//
|
||||||
|
// This software (Documize Community Edition) is licensed under
|
||||||
|
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
|
||||||
|
//
|
||||||
|
// You can operate outside the AGPL restrictions by purchasing
|
||||||
|
// Documize Enterprise Edition and obtaining a commercial license
|
||||||
|
// by contacting <sales@documize.com>.
|
||||||
|
//
|
||||||
|
// https://documize.com
|
||||||
|
|
||||||
|
// Package backup handle data backup/restore to/from ZIP format.
|
||||||
|
package backup
|
||||||
|
|
||||||
|
// DESIGN
|
||||||
|
// ------
|
||||||
|
//
|
||||||
|
// The restore operation allows an admin to upload a backup file
|
||||||
|
|
||||||
|
import ()
|
|
@ -203,7 +203,10 @@ func (s Store) RemoveDocumentCategories(ctx domain.RequestContext, documentID st
|
||||||
// DeleteBySpace removes all category and category associations for given space.
|
// DeleteBySpace removes all category and category associations for given space.
|
||||||
func (s Store) DeleteBySpace(ctx domain.RequestContext, spaceID string) (rows int64, err error) {
|
func (s Store) DeleteBySpace(ctx domain.RequestContext, spaceID string) (rows int64, err error) {
|
||||||
s1 := fmt.Sprintf("DELETE FROM dmz_category_member WHERE c_orgid='%s' AND c_spaceid='%s'", ctx.OrgID, spaceID)
|
s1 := fmt.Sprintf("DELETE FROM dmz_category_member WHERE c_orgid='%s' AND c_spaceid='%s'", ctx.OrgID, spaceID)
|
||||||
s.DeleteWhere(ctx.Transaction, s1)
|
_, err = s.DeleteWhere(ctx.Transaction, s1)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
s2 := fmt.Sprintf("DELETE FROM dmz_category WHERE c_orgid='%s' AND c_spaceid='%s'", ctx.OrgID, spaceID)
|
s2 := fmt.Sprintf("DELETE FROM dmz_category WHERE c_orgid='%s' AND c_spaceid='%s'", ctx.OrgID, spaceID)
|
||||||
return s.DeleteWhere(ctx.Transaction, s2)
|
return s.DeleteWhere(ctx.Transaction, s2)
|
||||||
|
|
|
@ -161,9 +161,6 @@ func (h *Handler) convert(w http.ResponseWriter, r *http.Request, job, folderID
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
a, _ := h.Store.Attachment.GetAttachments(ctx, nd.RefID)
|
|
||||||
go h.Indexer.IndexDocument(ctx, nd, a)
|
|
||||||
|
|
||||||
response.WriteJSON(w, nd)
|
response.WriteJSON(w, nd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,7 +249,6 @@ func processDocument(ctx domain.RequestContext, r *env.Runtime, store *store.Sto
|
||||||
ActivityType: activity.TypeCreated})
|
ActivityType: activity.TypeCreated})
|
||||||
|
|
||||||
err = ctx.Transaction.Commit()
|
err = ctx.Transaction.Commit()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "cannot commit new document import")
|
err = errors.Wrap(err, "cannot commit new document import")
|
||||||
return
|
return
|
||||||
|
@ -260,12 +256,11 @@ func processDocument(ctx domain.RequestContext, r *env.Runtime, store *store.Sto
|
||||||
|
|
||||||
newDocument, err = store.Document.Get(ctx, documentID)
|
newDocument, err = store.Document.Get(ctx, documentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.Transaction.Rollback()
|
|
||||||
err = errors.Wrap(err, "cannot fetch new document")
|
err = errors.Wrap(err, "cannot fetch new document")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
indexer.IndexDocument(ctx, newDocument, da)
|
go indexer.IndexDocument(ctx, newDocument, da)
|
||||||
|
|
||||||
store.Audit.Record(ctx, audit.EventTypeDocumentUpload)
|
store.Audit.Record(ctx, audit.EventTypeDocumentUpload)
|
||||||
|
|
||||||
|
|
|
@ -278,7 +278,6 @@ func (s Store) Delete(ctx domain.RequestContext, documentID string) (rows int64,
|
||||||
// Remove document pages, revisions, attachments, updates the search subsystem.
|
// Remove document pages, revisions, attachments, updates the search subsystem.
|
||||||
func (s Store) DeleteBySpace(ctx domain.RequestContext, spaceID string) (rows int64, err error) {
|
func (s Store) DeleteBySpace(ctx domain.RequestContext, spaceID string) (rows int64, err error) {
|
||||||
rows, err = s.DeleteWhere(ctx.Transaction, fmt.Sprintf("DELETE FROM dmz_section WHERE c_docid IN (SELECT c_refid FROM dmz_doc WHERE c_spaceid='%s' AND c_orgid='%s')", spaceID, ctx.OrgID))
|
rows, err = s.DeleteWhere(ctx.Transaction, fmt.Sprintf("DELETE FROM dmz_section WHERE c_docid IN (SELECT c_refid FROM dmz_doc WHERE c_spaceid='%s' AND c_orgid='%s')", spaceID, ctx.OrgID))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/documize/community/core/env"
|
"github.com/documize/community/core/env"
|
||||||
"github.com/documize/community/core/streamutil"
|
|
||||||
"github.com/documize/community/domain"
|
"github.com/documize/community/domain"
|
||||||
"github.com/documize/community/domain/store"
|
"github.com/documize/community/domain/store"
|
||||||
"github.com/documize/community/model/org"
|
"github.com/documize/community/model/org"
|
||||||
|
@ -50,22 +49,16 @@ func (s Store) AddOrganization(ctx domain.RequestContext, org org.Organization)
|
||||||
|
|
||||||
// GetOrganization returns the Organization reocrod from the organization database table with the given id.
|
// GetOrganization returns the Organization reocrod from the organization database table with the given id.
|
||||||
func (s Store) GetOrganization(ctx domain.RequestContext, id string) (org org.Organization, err error) {
|
func (s Store) GetOrganization(ctx domain.RequestContext, id string) (org org.Organization, err error) {
|
||||||
stmt, err := s.Runtime.Db.Preparex(s.Bind(`SELECT id, c_refid AS refid,
|
err = s.Runtime.Db.Get(&org, s.Bind(`SELECT id, c_refid AS refid,
|
||||||
c_title AS title, c_message AS message, c_domain AS domain,
|
c_title AS title, c_message AS message, c_domain AS domain,
|
||||||
c_service AS conversionendpoint, c_email AS email, c_serial AS serial, c_active AS active,
|
c_service AS conversionendpoint, c_email AS email, c_serial AS serial, c_active AS active,
|
||||||
c_anonaccess AS allowanonymousaccess, c_authprovider AS authprovider,
|
c_anonaccess AS allowanonymousaccess, c_authprovider AS authprovider,
|
||||||
coalesce(c_authconfig,` + s.EmptyJSON() + `) AS authconfig, c_maxtags AS maxtags,
|
coalesce(c_authconfig,`+s.EmptyJSON()+`) AS authconfig, c_maxtags AS maxtags,
|
||||||
c_created AS created, c_revised AS revised
|
c_created AS created, c_revised AS revised
|
||||||
FROM dmz_org
|
FROM dmz_org
|
||||||
WHERE c_refid=?`))
|
WHERE c_refid=?`),
|
||||||
defer streamutil.Close(stmt)
|
id)
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
err = errors.Wrap(err, fmt.Sprintf("unable to prepare select for org %s", id))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = stmt.Get(&org, id)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, fmt.Sprintf("unable to get org %s", id))
|
err = errors.Wrap(err, fmt.Sprintf("unable to get org %s", id))
|
||||||
return
|
return
|
||||||
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/documize/community/core/env"
|
"github.com/documize/community/core/env"
|
||||||
"github.com/documize/community/core/streamutil"
|
|
||||||
"github.com/documize/community/core/stringutil"
|
"github.com/documize/community/core/stringutil"
|
||||||
"github.com/documize/community/domain"
|
"github.com/documize/community/domain"
|
||||||
"github.com/documize/community/domain/store"
|
"github.com/documize/community/domain/store"
|
||||||
|
@ -26,7 +25,6 @@ import (
|
||||||
"github.com/documize/community/model/page"
|
"github.com/documize/community/model/page"
|
||||||
"github.com/documize/community/model/search"
|
"github.com/documize/community/model/search"
|
||||||
"github.com/documize/community/model/workflow"
|
"github.com/documize/community/model/workflow"
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -189,24 +187,16 @@ func (s Store) DeleteContent(ctx domain.RequestContext, pageID string) (err erro
|
||||||
method := "search.DeleteContent"
|
method := "search.DeleteContent"
|
||||||
|
|
||||||
// remove all search entries
|
// remove all search entries
|
||||||
var stmt1 *sqlx.Stmt
|
_, err = ctx.Transaction.Exec(s.Bind("DELETE FROM dmz_search WHERE c_orgid=? AND c_itemid=? AND c_itemtype=?"),
|
||||||
stmt1, err = ctx.Transaction.Preparex(s.Bind("DELETE FROM dmz_search WHERE c_orgid=? AND c_itemid=? AND c_itemtype=?"))
|
ctx.OrgID, pageID, "page")
|
||||||
defer streamutil.Close(stmt1)
|
|
||||||
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
|
||||||
err = errors.Wrap(err, "prepare delete document content entry")
|
|
||||||
s.Runtime.Log.Error(method, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = stmt1.Exec(ctx.OrgID, pageID, "page")
|
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
err = errors.Wrap(err, "execute delete document content entry")
|
err = errors.Wrap(err, "execute delete document content entry")
|
||||||
s.Runtime.Log.Error(method, err)
|
s.Runtime.Log.Error(method, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Documents searches the documents that the client is allowed to see, using the keywords search string, then audits that search.
|
// Documents searches the documents that the client is allowed to see, using the keywords search string, then audits that search.
|
||||||
|
|
|
@ -745,6 +745,17 @@ func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close out the delete process
|
||||||
|
ctx.Transaction.Commit()
|
||||||
|
|
||||||
|
// Record this action.
|
||||||
|
ctx.Transaction, err = h.Runtime.Db.Beginx()
|
||||||
|
if err != nil {
|
||||||
|
response.WriteServerError(w, method, err)
|
||||||
|
h.Runtime.Log.Error(method, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
err = h.Store.Activity.RecordUserActivity(ctx, activity.UserActivity{
|
err = h.Store.Activity.RecordUserActivity(ctx, activity.UserActivity{
|
||||||
SpaceID: id,
|
SpaceID: id,
|
||||||
SourceType: activity.SourceTypeSpace,
|
SourceType: activity.SourceTypeSpace,
|
||||||
|
@ -754,8 +765,6 @@ func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) {
|
||||||
h.Runtime.Log.Error(method, err)
|
h.Runtime.Log.Error(method, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Transaction.Commit()
|
|
||||||
|
|
||||||
h.Store.Audit.Record(ctx, audit.EventTypeSpaceDelete)
|
h.Store.Audit.Record(ctx, audit.EventTypeSpaceDelete)
|
||||||
|
|
||||||
event.Handler().Publish(string(event.TypeRemoveSpace))
|
event.Handler().Publish(string(event.TypeRemoveSpace))
|
||||||
|
|
|
@ -43,61 +43,57 @@ func (c *Context) Bind(sql string) string {
|
||||||
|
|
||||||
// Delete record.
|
// Delete record.
|
||||||
func (c *Context) Delete(tx *sqlx.Tx, table string, id string) (rows int64, err error) {
|
func (c *Context) Delete(tx *sqlx.Tx, table string, id string) (rows int64, err error) {
|
||||||
result, err := tx.Exec(c.Bind("DELETE FROM "+table+" WHERE c_refid=?"), id)
|
_, err = tx.Exec(c.Bind("DELETE FROM "+table+" WHERE c_refid=?"), id)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
if err != nil && err != sql.ErrNoRows {
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
err = errors.Wrap(err, fmt.Sprintf("unable to delete row in table %s", table))
|
err = errors.Wrap(err, fmt.Sprintf("unable to delete row in table %s", table))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err = result.RowsAffected()
|
|
||||||
err = nil
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteConstrained record constrained to Organization using refid.
|
// DeleteConstrained record constrained to Organization using refid.
|
||||||
func (c *Context) DeleteConstrained(tx *sqlx.Tx, table string, orgID, id string) (rows int64, err error) {
|
func (c *Context) DeleteConstrained(tx *sqlx.Tx, table string, orgID, id string) (rows int64, err error) {
|
||||||
result, err := tx.Exec(c.Bind("DELETE FROM "+table+" WHERE c_orgid=? AND c_refid=?"), orgID, id)
|
_, err = tx.Exec(c.Bind("DELETE FROM "+table+" WHERE c_orgid=? AND c_refid=?"), orgID, id)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
if err != nil && err != sql.ErrNoRows {
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
err = errors.Wrap(err, fmt.Sprintf("unable to delete row in table %s", table))
|
err = errors.Wrap(err, fmt.Sprintf("unable to delete row in table %s", table))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err = result.RowsAffected()
|
|
||||||
err = nil
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteConstrainedWithID record constrained to Organization using non refid.
|
// DeleteConstrainedWithID record constrained to Organization using non refid.
|
||||||
func (c *Context) DeleteConstrainedWithID(tx *sqlx.Tx, table string, orgID, id string) (rows int64, err error) {
|
func (c *Context) DeleteConstrainedWithID(tx *sqlx.Tx, table string, orgID, id string) (rows int64, err error) {
|
||||||
result, err := tx.Exec(c.Bind("DELETE FROM "+table+" WHERE c_orgid=? AND id=?"), orgID, id)
|
_, err = tx.Exec(c.Bind("DELETE FROM "+table+" WHERE c_orgid=? AND id=?"), orgID, id)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
if err != nil && err != sql.ErrNoRows {
|
err = nil
|
||||||
err = errors.Wrap(err, fmt.Sprintf("unable to delete row in table %s", table))
|
}
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, fmt.Sprintf("unable to delete rows by id: %s", id))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err = result.RowsAffected()
|
|
||||||
err = nil
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteWhere free form query.
|
// DeleteWhere free form query.
|
||||||
func (c *Context) DeleteWhere(tx *sqlx.Tx, statement string) (rows int64, err error) {
|
func (c *Context) DeleteWhere(tx *sqlx.Tx, statement string) (rows int64, err error) {
|
||||||
result, err := tx.Exec(statement)
|
_, err = tx.Exec(statement)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
if err != nil && err != sql.ErrNoRows {
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
err = errors.Wrap(err, fmt.Sprintf("unable to delete rows: %s", statement))
|
err = errors.Wrap(err, fmt.Sprintf("unable to delete rows: %s", statement))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err = result.RowsAffected()
|
|
||||||
err = nil
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -409,6 +409,7 @@ func (h *Handler) Use(w http.ResponseWriter, r *http.Request) {
|
||||||
// Clone categories.
|
// Clone categories.
|
||||||
cats, err := h.Store.Category.GetDocumentCategoryMembership(ctx, templateID)
|
cats, err := h.Store.Category.GetDocumentCategoryMembership(ctx, templateID)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
ctx.Transaction.Rollback()
|
||||||
response.WriteServerError(w, method, err)
|
response.WriteServerError(w, method, err)
|
||||||
h.Runtime.Log.Error(method, err)
|
h.Runtime.Log.Error(method, err)
|
||||||
return
|
return
|
||||||
|
@ -422,6 +423,7 @@ func (h *Handler) Use(w http.ResponseWriter, r *http.Request) {
|
||||||
cc.SpaceID = d.SpaceID
|
cc.SpaceID = d.SpaceID
|
||||||
err = h.Store.Category.AssociateDocument(ctx, cc)
|
err = h.Store.Category.AssociateDocument(ctx, cc)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
|
ctx.Transaction.Rollback()
|
||||||
response.WriteServerError(w, method, err)
|
response.WriteServerError(w, method, err)
|
||||||
h.Runtime.Log.Error(method, err)
|
h.Runtime.Log.Error(method, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -17,15 +17,36 @@ export default Component.extend(Notifier, {
|
||||||
appMeta: service(),
|
appMeta: service(),
|
||||||
browserSvc: service('browser'),
|
browserSvc: service('browser'),
|
||||||
buttonLabel: 'Run Backup',
|
buttonLabel: 'Run Backup',
|
||||||
|
backupSpec: null,
|
||||||
|
backupFilename: '',
|
||||||
|
backupError: false,
|
||||||
|
backupSuccess: false,
|
||||||
|
|
||||||
|
didReceiveAttrs() {
|
||||||
|
this._super(...arguments);
|
||||||
|
this.set('backupSpec', {
|
||||||
|
retain: true,
|
||||||
|
org: '*'
|
||||||
|
// org: this.get('appMeta.orgId')
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
actions: {
|
actions: {
|
||||||
onBackup() {
|
onBackup() {
|
||||||
this.showWait();
|
this.showWait();
|
||||||
this.set('buttonLabel', 'Please wait, backup running...');
|
this.set('buttonLabel', 'Please wait, backup running...');
|
||||||
|
this.set('backupFilename', '');
|
||||||
|
this.set('backupSuccess', false);
|
||||||
|
this.set('backupFailed', false);
|
||||||
|
|
||||||
this.get('onBackup')({}).then(() => {
|
this.get('onBackup')(this.get('backupSpec')).then((filename) => {
|
||||||
this.set('buttonLabel', 'Run Backup');
|
this.set('buttonLabel', 'Run Backup');
|
||||||
this.showDone();
|
this.showDone();
|
||||||
|
this.set('backupSuccess', true);
|
||||||
|
this.set('backupFilename', filename);
|
||||||
|
}, ()=> {
|
||||||
|
this.set('buttonLabel', 'Run Backup');
|
||||||
|
this.set('backupFailed', true);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
{{#if session.isGlobalAdmin}}
|
{{#if session.isGlobalAdmin}}
|
||||||
{{#link-to 'customize.license' activeClass='selected' class="tab tab-vertical" tagName="li" }}Product{{/link-to}}
|
{{#link-to 'customize.license' activeClass='selected' class="tab tab-vertical" tagName="li" }}Product{{/link-to}}
|
||||||
{{/if}}
|
{{/if}}
|
||||||
|
{{#link-to 'customize.backup' activeClass='selected' class="tab tab-vertical" tagName="li" }}Backup & Restore{{/link-to}}
|
||||||
</ul>
|
</ul>
|
||||||
</div>
|
</div>
|
||||||
{{/layout/middle-zone-sidebar}}
|
{{/layout/middle-zone-sidebar}}
|
||||||
|
|
|
@ -143,7 +143,7 @@ export default Service.extend({
|
||||||
|
|
||||||
// Run tenant level backup.
|
// Run tenant level backup.
|
||||||
backup(spec) {
|
backup(spec) {
|
||||||
return new EmberPromise((resolve) => {
|
return new EmberPromise((resolve, reject) => {
|
||||||
let url = this.get('appMeta.endpoint');
|
let url = this.get('appMeta.endpoint');
|
||||||
let token = this.get('sessionService.session.content.authenticated.token');
|
let token = this.get('sessionService.session.content.authenticated.token');
|
||||||
let uploadUrl = `${url}/global/backup?token=${token}`;
|
let uploadUrl = `${url}/global/backup?token=${token}`;
|
||||||
|
@ -162,18 +162,26 @@ export default Service.extend({
|
||||||
a.style = "display: none";
|
a.style = "display: none";
|
||||||
document.body.appendChild(a);
|
document.body.appendChild(a);
|
||||||
|
|
||||||
|
let filename = xhr.getResponseHeader('x-documize-filename').replace('"', '');
|
||||||
|
|
||||||
let url = window.URL.createObjectURL(blob);
|
let url = window.URL.createObjectURL(blob);
|
||||||
a.href = url;
|
a.href = url;
|
||||||
a.download = xhr.getResponseHeader('x-documize-filename').replace('"', '');
|
a.download = filename;
|
||||||
a.click();
|
a.click();
|
||||||
|
|
||||||
window.URL.revokeObjectURL(url);
|
window.URL.revokeObjectURL(url);
|
||||||
document.body.removeChild(a);
|
document.body.removeChild(a);
|
||||||
|
|
||||||
resolve();
|
resolve(filename);
|
||||||
|
} else {
|
||||||
|
reject();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
xhr.onerror= function() {
|
||||||
|
reject();
|
||||||
|
}
|
||||||
|
|
||||||
xhr.send(JSON.stringify(spec));
|
xhr.send(JSON.stringify(spec));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col">
|
<div class="col">
|
||||||
<div class="view-customize">
|
<div class="view-customize">
|
||||||
|
@ -12,7 +11,14 @@
|
||||||
<form class="mt-5 ">
|
<form class="mt-5 ">
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<p>It can take several minutes to complete the backup process — please be patient while the backup is running.</p>
|
<p>It can take several minutes to complete the backup process — please be patient while the backup is running.</p>
|
||||||
<div class="btn btn-success mt-3" {{action 'onBackup'}}>{{buttonLabel}}</div>
|
<div class="btn btn-success my-5" {{action 'onBackup'}}>{{buttonLabel}}</div>
|
||||||
|
{{#if backupFailed}}
|
||||||
|
<p class="text-danger">Backup failed — please check server logs</p>
|
||||||
|
{{/if}}
|
||||||
|
{{#if backupSuccess}}
|
||||||
|
<p>Backup successful ({{backupFilename}})</p>
|
||||||
|
{{/if}}
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
62
model/action/action.go
Normal file
62
model/action/action.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
|
||||||
|
//
|
||||||
|
// This software (Documize Community Edition) is licensed under
|
||||||
|
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
|
||||||
|
//
|
||||||
|
// You can operate outside the AGPL restrictions by purchasing
|
||||||
|
// Documize Enterprise Edition and obtaining a commercial license
|
||||||
|
// by contacting <sales@documize.com>.
|
||||||
|
//
|
||||||
|
// https://documize.com
|
||||||
|
|
||||||
|
package action
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/documize/community/core/timeutil"
|
||||||
|
"github.com/documize/community/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserAction represents an action that a user should perform on a document.
|
||||||
|
type UserAction struct {
|
||||||
|
model.BaseEntity
|
||||||
|
OrgID string `json:"orgId"`
|
||||||
|
DocumentID string `json:"documentId"`
|
||||||
|
UserID string `json:"userId"`
|
||||||
|
ActionType Type `json:"actionType"`
|
||||||
|
RefType string `json:"refType"` // page or attachment
|
||||||
|
RefTypeID string `json:"refTypeId"` // page or attachment ID
|
||||||
|
Note string `json:"note"`
|
||||||
|
RequestorID string `json:"requestorId"`
|
||||||
|
Requested time.Time `json:"requestedDate"`
|
||||||
|
Due time.Time `json:"dueDate"`
|
||||||
|
Completed timeutil.NullTime `json:"completedDate"`
|
||||||
|
IsComplete bool `json:"isComplete"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type determines type of action that has been requested of a user
|
||||||
|
type Type int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ActionTypeRead document
|
||||||
|
ActionTypeRead Type = 1
|
||||||
|
|
||||||
|
// ActionTypeFeedback for a document
|
||||||
|
ActionTypeFeedback Type = 2
|
||||||
|
|
||||||
|
// ActionTypeContribute to document
|
||||||
|
ActionTypeContribute Type = 3
|
||||||
|
|
||||||
|
// ActionTypeApprovalRequest for a section change
|
||||||
|
ActionTypeApprovalRequest Type = 4
|
||||||
|
|
||||||
|
// ActionTypeApproved section change
|
||||||
|
ActionTypeApproved Type = 5
|
||||||
|
|
||||||
|
// ActionTypeRejected section change
|
||||||
|
ActionTypeRejected Type = 6
|
||||||
|
|
||||||
|
// ActionTypePublish content as Live
|
||||||
|
ActionTypePublish Type = 7
|
||||||
|
)
|
|
@ -79,4 +79,30 @@ const (
|
||||||
EventTypeGroupUpdate EventType = "updated-group"
|
EventTypeGroupUpdate EventType = "updated-group"
|
||||||
EventTypeGroupJoin EventType = "joined-group"
|
EventTypeGroupJoin EventType = "joined-group"
|
||||||
EventTypeGroupLeave EventType = "left-group"
|
EventTypeGroupLeave EventType = "left-group"
|
||||||
|
EventTypeSecureShare EventType = "shared-secure-document"
|
||||||
|
EventTypeFeedbackAdd EventType = "added-feedback"
|
||||||
|
EventTypeFeedbackEdit EventType = "edited-feedback"
|
||||||
|
EventTypePDF EventType = "generated-pdf"
|
||||||
|
EventTypeActionAdd EventType = "added-action"
|
||||||
|
EventTypeActionUpdate EventType = "updated-action"
|
||||||
|
EventTypeActionView EventType = "viewed-actions"
|
||||||
|
EventTypeActionDelete EventType = "removed-action"
|
||||||
|
EventTypeWorkflowApprovalRequested EventType = "request-approval"
|
||||||
|
EventTypeWorkflowApprovalWithdrawn EventType = "withdrew-approval"
|
||||||
|
EventTypeWorkflowDiscardChanges EventType = "discarded-changes"
|
||||||
|
EventTypeWorkflowApprovedChange EventType = "approved-change"
|
||||||
|
EventTypeWorkflowRejectedChange EventType = "rejected-change"
|
||||||
|
EventTypeWorkflowPublishRequested EventType = "requested-publication"
|
||||||
|
|
||||||
|
// EventTypeVersionAdd records addition of version
|
||||||
|
EventTypeVersionAdd EventType = "added-version"
|
||||||
|
|
||||||
|
// EventTypeVersionRemove records removal of version
|
||||||
|
EventTypeVersionRemove EventType = "removed-version"
|
||||||
|
|
||||||
|
// EventTypeVersionUnversion records disassociation of document from versioning group
|
||||||
|
EventTypeVersionUnversion EventType = "un-versioned-document"
|
||||||
|
|
||||||
|
// EventTypeVersionReorder records reordering of versions
|
||||||
|
EventTypeVersionReorder EventType = "reordered-version"
|
||||||
)
|
)
|
||||||
|
|
70
model/backup/backup.go
Normal file
70
model/backup/backup.go
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
// Copyright 2016 Documize Inc. <legal@documize.com>. All rights reserved.
|
||||||
|
//
|
||||||
|
// This software (Documize Community Edition) is licensed under
|
||||||
|
// GNU AGPL v3 http://www.gnu.org/licenses/agpl-3.0.en.html
|
||||||
|
//
|
||||||
|
// You can operate outside the AGPL restrictions by purchasing
|
||||||
|
// Documize Enterprise Edition and obtaining a commercial license
|
||||||
|
// by contacting <sales@documize.com>.
|
||||||
|
//
|
||||||
|
// https://documize.com
|
||||||
|
|
||||||
|
// Package backup handle data backup/restore to/from ZIP format.
|
||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/documize/community/core/env"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Manifest contains backup meta information.
|
||||||
|
type Manifest struct {
|
||||||
|
// ID is unique per backup.
|
||||||
|
ID string `json:"id"`
|
||||||
|
|
||||||
|
// A value of "*' means all tenants/oragnizations are backed up (requires global admin permission).
|
||||||
|
// A genuine ID means only that specific organization is backed up.
|
||||||
|
OrgID string `json:"org"`
|
||||||
|
|
||||||
|
// Product edition at the time of the backup.
|
||||||
|
Edition string `json:"edition"`
|
||||||
|
|
||||||
|
// When the backup took place.
|
||||||
|
Created time.Time `json:"created"`
|
||||||
|
|
||||||
|
// Product version at the time of the backup.
|
||||||
|
Major string `json:"major"`
|
||||||
|
Minor string `json:"minor"`
|
||||||
|
Patch string `json:"patch"`
|
||||||
|
Revision int `json:"revision"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
|
||||||
|
// Database provider used by source system.
|
||||||
|
StoreType env.StoreType `json:"storeType"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportSpec controls what data is exported to the backup file.
|
||||||
|
type ExportSpec struct {
|
||||||
|
// A value of "*' means all tenants/oragnizations are backed up (requires global admin permission).
|
||||||
|
// A genuine ID means only that specific organization is backed up.
|
||||||
|
OrgID string `json:"org"`
|
||||||
|
|
||||||
|
// Retain will keep the backup file on disk after operation is complete.
|
||||||
|
// File is located in the same folder as the running executable.
|
||||||
|
Retain bool `json:"retain"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemBackup happens if org ID is "*".
|
||||||
|
func (e *ExportSpec) SystemBackup() bool {
|
||||||
|
return e.OrgID == "*"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportSpec controls what content is imported and how.
|
||||||
|
type ImportSpec struct {
|
||||||
|
// Overwrite current organization settings.
|
||||||
|
OverwriteOrg bool `json:"overwriteOrg"`
|
||||||
|
|
||||||
|
// Recreate users.
|
||||||
|
CreateUsers bool `json:"createUsers"`
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue