mirror of
https://github.com/documize/community.git
synced 2025-07-23 07:09:43 +02:00
[WIP] Backup process outline
This commit is contained in:
parent
8bbb0d3e82
commit
4094677792
18 changed files with 678 additions and 220 deletions
|
@ -9,11 +9,29 @@
|
|||
//
|
||||
// https://documize.com
|
||||
|
||||
// Package backup handle data backup/restore to/from ZIP format.
|
||||
package backup
|
||||
|
||||
// Documize data is all held in the SQL database in relational format.
|
||||
// The objective is to export the data into a compressed file that
|
||||
// can be restored again as required.
|
||||
//
|
||||
// This allows for the following scenarios to be supported:
|
||||
//
|
||||
// 1. Copying data from one Documize instance to another.
|
||||
// 2. Changing database provider (e.g. from MySQL to PostgreSQL).
|
||||
// 3. Moving between Documize Cloud and self-hosted instances.
|
||||
// 4. GDPR compliance (send copy of data and nuke whatever remains).
|
||||
// 5. Setting up sample Documize instance with pre-defined content.
|
||||
//
|
||||
// The initial implementation is restricted to tenant or global
|
||||
// backup/restore operations and can only be performed by a verified
|
||||
// Global Administrator.
|
||||
//
|
||||
// In future the process should be able to support per space backup/restore
|
||||
// operations. This is subject to further review.
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
@ -23,10 +41,10 @@ import (
|
|||
"github.com/documize/community/core/env"
|
||||
"github.com/documize/community/core/response"
|
||||
"github.com/documize/community/core/streamutil"
|
||||
"github.com/documize/community/core/uniqueid"
|
||||
"github.com/documize/community/domain"
|
||||
indexer "github.com/documize/community/domain/search"
|
||||
"github.com/documize/community/domain/store"
|
||||
m "github.com/documize/community/model/backup"
|
||||
)
|
||||
|
||||
// Handler contains the runtime information such as logging and database.
|
||||
|
@ -57,7 +75,7 @@ func (h *Handler) Backup(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
spec := backupSpec{}
|
||||
spec := m.ExportSpec{}
|
||||
err = json.Unmarshal(body, &spec)
|
||||
if err != nil {
|
||||
response.WriteBadRequestError(w, method, err.Error())
|
||||
|
@ -65,31 +83,36 @@ func (h *Handler) Backup(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
// data, err := backup(ctx, *h.Store, spec)
|
||||
// if err != nil {
|
||||
// response.WriteServerError(w, method, err)
|
||||
// h.Runtime.Log.Error(method, err)
|
||||
// return
|
||||
// }
|
||||
bh := backerHandler{Runtime: h.Runtime, Store: h.Store, Context: ctx, Spec: spec}
|
||||
|
||||
// Filename is current timestamp
|
||||
fn := fmt.Sprintf("dmz-backup-%s.zip", uniqueid.Generate())
|
||||
|
||||
ziptest(fn)
|
||||
|
||||
bb, err := ioutil.ReadFile(fn)
|
||||
// Produce zip file on disk.
|
||||
filename, err := bh.GenerateBackup()
|
||||
if err != nil {
|
||||
response.WriteServerError(w, method, err)
|
||||
h.Runtime.Log.Error(method, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/zip")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="`+fn+`" ; `+`filename*="`+fn+`"`)
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bb)))
|
||||
w.Header().Set("x-documize-filename", fn)
|
||||
// Read backup file into memory.
|
||||
// DEBT: write file directly to HTTP response stream?
|
||||
bk, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
response.WriteServerError(w, method, err)
|
||||
h.Runtime.Log.Error(method, err)
|
||||
return
|
||||
}
|
||||
|
||||
x, err := w.Write(bb)
|
||||
// Standard HTTP headers.
|
||||
w.Header().Set("Content-Type", "application/zip")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="`+filename+`" ; `+`filename*="`+filename+`"`)
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bk)))
|
||||
// Custom HTTP header helps API consumer to extract backup filename cleanly
|
||||
// instead of parsing 'Content-Disposition' header.
|
||||
// This HTTP header is CORS white-listed.
|
||||
w.Header().Set("x-documize-filename", filename)
|
||||
|
||||
// Write backup to response stream.
|
||||
x, err := w.Write(bk)
|
||||
if err != nil {
|
||||
response.WriteServerError(w, method, err)
|
||||
h.Runtime.Log.Error(method, err)
|
||||
|
@ -97,90 +120,10 @@ func (h *Handler) Backup(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
h.Runtime.Log.Info(fmt.Sprintf("Backup completed for %s by %s, size %d", ctx.OrgID, ctx.UserID, x))
|
||||
}
|
||||
|
||||
type backupSpec struct {
|
||||
}
|
||||
|
||||
func backup(ctx domain.RequestContext, s store.Store, spec backupSpec) (file []byte, err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
zw := zip.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling licence.\nWrite more examples."},
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
f, err := zw.Create(file.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte(file.Body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
err = zw.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func ziptest(filename string) {
|
||||
// Create a file to write the archive buffer to
|
||||
// Could also use an in memory buffer.
|
||||
outFile, err := os.Create(filename)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// Create a zip writer on top of the file writer
|
||||
zipWriter := zip.NewWriter(outFile)
|
||||
|
||||
// Add files to archive
|
||||
// We use some hard coded data to demonstrate,
|
||||
// but you could iterate through all the files
|
||||
// in a directory and pass the name and contents
|
||||
// of each file, or you can take data from your
|
||||
// program and write it write in to the archive
|
||||
// without
|
||||
var filesToArchive = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"test.txt", "String contents of file"},
|
||||
{"test2.txt", "\x61\x62\x63\n"},
|
||||
}
|
||||
|
||||
// Create and write files to the archive, which in turn
|
||||
// are getting written to the underlying writer to the
|
||||
// .zip file we created at the beginning
|
||||
for _, file := range filesToArchive {
|
||||
fileWriter, err := zipWriter.Create(file.Name)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
_, err = fileWriter.Write([]byte(file.Body))
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
err = zipWriter.Close()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// Delete backup file if not requested to keep it.
|
||||
if !spec.Retain {
|
||||
os.Remove(filename)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue