mirror of
https://github.com/portainer/portainer.git
synced 2025-08-02 20:35:25 +02:00
feat(database): add encryption support EE-1983 (#6316)
* bootstrap encryption key * secret key message change in cli and secret key file content trimmed * Migrate encryption code to latest version * pull in newer code * tidying up * working data encryption layer * fix tests * remove stray comment * fix a few minor issues and improve the comments * split out databasefilename with param to two methods to be more obvious * DB encryption integration (#6374) * json methods moved under DBConnection * store encryption fixed * cleaned * review comments addressed * newstore value fixed * backup test updated * logrus format config updated * Fix for newStore Co-authored-by: Matt Hook <hookenz@gmail.com> * Minor improvements * Improve the export code. Add missing webhook for import * rename HelmUserRepositorys to HelmUserRepositories * fix logging messages * when starting portainer with a key (first use) http is disabled by default. But when starting fresh without a key, http is enabled? * Fix bug for default settings on new installs Co-authored-by: Prabhat Khera <prabhat.khera@portainer.io> Co-authored-by: Prabhat Khera <91852476+prabhat-org@users.noreply.github.com>
This commit is contained in:
parent
59ec22f706
commit
34cc8ea96a
22 changed files with 548 additions and 147 deletions
|
@ -13,7 +13,7 @@ func importFromJson(fileService portainer.FileService, store *datastore.Store) {
|
|||
importFile := "/data/import.json"
|
||||
if exists, _ := fileService.FileExists(importFile); exists {
|
||||
if err := store.Import(importFile); err != nil {
|
||||
logrus.WithError(err).Debugf("import %s failed", importFile)
|
||||
logrus.WithError(err).Debugf("Import %s failed", importFile)
|
||||
|
||||
// TODO: should really rollback on failure, but then we have nothing.
|
||||
} else {
|
||||
|
@ -23,7 +23,7 @@ func importFromJson(fileService portainer.FileService, store *datastore.Store) {
|
|||
// I also suspect that everything from "Init to Init" is potentially a migration
|
||||
err := store.Init()
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing data store: %v", err)
|
||||
log.Fatalf("Failed initializing data store: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,18 +1,41 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type portainerFormatter struct {
|
||||
logrus.TextFormatter
|
||||
}
|
||||
|
||||
func (f *portainerFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case logrus.DebugLevel, logrus.TraceLevel:
|
||||
levelColor = 31 // gray
|
||||
case logrus.WarnLevel:
|
||||
levelColor = 33 // yellow
|
||||
case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel:
|
||||
levelColor = 31 // red
|
||||
default:
|
||||
levelColor = 36 // blue
|
||||
}
|
||||
return []byte(fmt.Sprintf("\x1b[%dm%s\x1b[0m %s %s\n", levelColor, strings.ToUpper(entry.Level.String()), entry.Time.Format(f.TimestampFormat), entry.Message)), nil
|
||||
}
|
||||
|
||||
func configureLogger() {
|
||||
logger := logrus.New() // logger is to implicitly substitute stdlib's log
|
||||
log.SetOutput(logger.Writer())
|
||||
|
||||
formatter := &logrus.TextFormatter{DisableTimestamp: true, DisableLevelTruncation: true}
|
||||
formatterLogrus := &portainerFormatter{logrus.TextFormatter{DisableTimestamp: false, DisableLevelTruncation: true, TimestampFormat: "2006/01/02 15:04:05", FullTimestamp: true}}
|
||||
|
||||
logger.SetFormatter(formatter)
|
||||
logrus.SetFormatter(formatter)
|
||||
logrus.SetFormatter(formatterLogrus)
|
||||
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -47,12 +48,12 @@ func initCLI() *portainer.CLIFlags {
|
|||
var cliService portainer.CLIService = &cli.Service{}
|
||||
flags, err := cliService.ParseFlags(portainer.APIVersion)
|
||||
if err != nil {
|
||||
log.Fatalf("failed parsing flags: %v", err)
|
||||
log.Fatalf("Failed parsing flags: %v", err)
|
||||
}
|
||||
|
||||
err = cliService.ValidateFlags(flags)
|
||||
if err != nil {
|
||||
log.Fatalf("failed validating flags:%v", err)
|
||||
log.Fatalf("Failed validating flags:%v", err)
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
@ -60,26 +61,26 @@ func initCLI() *portainer.CLIFlags {
|
|||
func initFileService(dataStorePath string) portainer.FileService {
|
||||
fileService, err := filesystem.NewService(dataStorePath, "")
|
||||
if err != nil {
|
||||
log.Fatalf("failed creating file service: %v", err)
|
||||
log.Fatalf("Failed creating file service: %v", err)
|
||||
}
|
||||
return fileService
|
||||
}
|
||||
|
||||
func initDataStore(flags *portainer.CLIFlags, fileService portainer.FileService, shutdownCtx context.Context) dataservices.DataStore {
|
||||
connection, err := database.NewDatabase("boltdb", *flags.Data)
|
||||
func initDataStore(flags *portainer.CLIFlags, secretKey []byte, fileService portainer.FileService, shutdownCtx context.Context) dataservices.DataStore {
|
||||
connection, err := database.NewDatabase("boltdb", *flags.Data, secretKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err.Error())
|
||||
}
|
||||
store := datastore.NewStore(*flags.Data, fileService, connection)
|
||||
isNew, err := store.Open()
|
||||
if err != nil {
|
||||
log.Fatalf("failed opening store: %v", err)
|
||||
log.Fatalf("Failed opening store: %v", err)
|
||||
}
|
||||
|
||||
if *flags.Rollback {
|
||||
err := store.Rollback(false)
|
||||
if err != nil {
|
||||
log.Fatalf("failed rolling back: %s", err)
|
||||
log.Fatalf("Failed rolling back: %v", err)
|
||||
}
|
||||
|
||||
log.Println("Exiting rollback")
|
||||
|
@ -90,31 +91,27 @@ func initDataStore(flags *portainer.CLIFlags, fileService portainer.FileService,
|
|||
// Init sets some defaults - its basically a migration
|
||||
err = store.Init()
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing data store: %v", err)
|
||||
log.Fatalf("Failed initializing data store: %v", err)
|
||||
}
|
||||
|
||||
if isNew {
|
||||
// from MigrateData
|
||||
store.VersionService.StoreDBVersion(portainer.DBVersion)
|
||||
|
||||
// Disabled for now. Can't use feature flags due to the way that works
|
||||
// EXPERIMENTAL, will only activate if `/data/import.json` exists
|
||||
//importFromJson(fileService, store)
|
||||
|
||||
err := updateSettingsFromFlags(store, flags)
|
||||
if err != nil {
|
||||
log.Fatalf("failed updating settings from flags: %v", err)
|
||||
log.Fatalf("Failed updating settings from flags: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
storedVersion, err := store.VersionService.DBVersion()
|
||||
if err != nil {
|
||||
log.Fatalf("Something failed during creation of new database: %v", err)
|
||||
}
|
||||
if storedVersion != portainer.DBVersion {
|
||||
err = store.MigrateData()
|
||||
} else {
|
||||
storedVersion, err := store.VersionService.DBVersion()
|
||||
if err != nil {
|
||||
log.Fatalf("failed migration: %v", err)
|
||||
log.Fatalf("Something Failed during creation of new database: %v", err)
|
||||
}
|
||||
if storedVersion != portainer.DBVersion {
|
||||
err = store.MigrateData()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed migration: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +124,7 @@ func initDataStore(flags *portainer.CLIFlags, fileService portainer.FileService,
|
|||
|
||||
err := store.Export(exportFilename)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Debugf("failed to export to %s", exportFilename)
|
||||
logrus.WithError(err).Debugf("Failed to export to %s", exportFilename)
|
||||
} else {
|
||||
logrus.Debugf("exported to %s", exportFilename)
|
||||
}
|
||||
|
@ -139,7 +136,7 @@ func initDataStore(flags *portainer.CLIFlags, fileService portainer.FileService,
|
|||
func initComposeStackManager(assetsPath string, configPath string, reverseTunnelService portainer.ReverseTunnelService, proxyManager *proxy.Manager) portainer.ComposeStackManager {
|
||||
composeWrapper, err := exec.NewComposeStackManager(assetsPath, configPath, proxyManager)
|
||||
if err != nil {
|
||||
log.Fatalf("failed creating compose manager: %s", err)
|
||||
log.Fatalf("Failed creating compose manager: %v", err)
|
||||
}
|
||||
|
||||
return composeWrapper
|
||||
|
@ -347,7 +344,7 @@ func generateAndStoreKeyPair(fileService portainer.FileService, signatureService
|
|||
func initKeyPair(fileService portainer.FileService, signatureService portainer.DigitalSignatureService) error {
|
||||
existingKeyPair, err := fileService.KeyPairFilesExist()
|
||||
if err != nil {
|
||||
log.Fatalf("failed checking for existing key pair: %v", err)
|
||||
log.Fatalf("Failed checking for existing key pair: %v", err)
|
||||
}
|
||||
|
||||
if existingKeyPair {
|
||||
|
@ -491,19 +488,40 @@ func initEndpoint(flags *portainer.CLIFlags, dataStore dataservices.DataStore, s
|
|||
return createUnsecuredEndpoint(*flags.EndpointURL, dataStore, snapshotService)
|
||||
}
|
||||
|
||||
func loadEncryptionSecretKey(keyfilename string) []byte {
|
||||
content, err := os.ReadFile(path.Join("/run/secrets", keyfilename))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
log.Printf("Encryption key file `%s` not present", keyfilename)
|
||||
} else {
|
||||
log.Printf("Error reading encryption key file: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// return a 32 byte hash of the secret (required for AES)
|
||||
hash := sha256.Sum256(content)
|
||||
return hash[:]
|
||||
}
|
||||
|
||||
func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
||||
shutdownCtx, shutdownTrigger := context.WithCancel(context.Background())
|
||||
|
||||
fileService := initFileService(*flags.Data)
|
||||
encryptionKey := loadEncryptionSecretKey(*flags.SecretKeyName)
|
||||
if encryptionKey == nil {
|
||||
log.Println("proceeding without encryption key")
|
||||
}
|
||||
|
||||
dataStore := initDataStore(flags, fileService, shutdownCtx)
|
||||
dataStore := initDataStore(flags, encryptionKey, fileService, shutdownCtx)
|
||||
|
||||
if err := dataStore.CheckCurrentEdition(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
instanceID, err := dataStore.Version().InstanceID()
|
||||
if err != nil {
|
||||
log.Fatalf("failed getting instance id: %v", err)
|
||||
log.Fatalf("Failed getting instance id: %v", err)
|
||||
}
|
||||
|
||||
apiKeyService := initAPIKeyService(dataStore)
|
||||
|
@ -514,12 +532,12 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
}
|
||||
jwtService, err := initJWTService(settings.UserSessionTimeout, dataStore)
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing JWT service: %v", err)
|
||||
log.Fatalf("Failed initializing JWT service: %v", err)
|
||||
}
|
||||
|
||||
err = enableFeaturesFromFlags(dataStore, flags)
|
||||
if err != nil {
|
||||
log.Fatalf("failed enabling feature flag: %v", err)
|
||||
log.Fatalf("Failed enabling feature flag: %v", err)
|
||||
}
|
||||
|
||||
ldapService := initLDAPService()
|
||||
|
@ -538,12 +556,12 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
|
||||
sslSettings, err := sslService.GetSSLSettings()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get ssl settings: %s", err)
|
||||
log.Fatalf("Failed to get ssl settings: %s", err)
|
||||
}
|
||||
|
||||
err = initKeyPair(fileService, digitalSignatureService)
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing key pair: %v", err)
|
||||
log.Fatalf("Failed initializing key pair: %v", err)
|
||||
}
|
||||
|
||||
reverseTunnelService := chisel.NewService(dataStore, shutdownCtx)
|
||||
|
@ -553,7 +571,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
|
||||
snapshotService, err := initSnapshotService(*flags.SnapshotInterval, dataStore, dockerClientFactory, kubernetesClientFactory, shutdownCtx)
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing snapshot service: %v", err)
|
||||
log.Fatalf("Failed initializing snapshot service: %v", err)
|
||||
}
|
||||
snapshotService.Start()
|
||||
|
||||
|
@ -574,37 +592,37 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
|
||||
swarmStackManager, err := initSwarmStackManager(*flags.Assets, dockerConfigPath, digitalSignatureService, fileService, reverseTunnelService, dataStore)
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing swarm stack manager: %s", err)
|
||||
log.Fatalf("Failed initializing swarm stack manager: %v", err)
|
||||
}
|
||||
|
||||
kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, digitalSignatureService, proxyManager, *flags.Assets)
|
||||
|
||||
helmPackageManager, err := initHelmPackageManager(*flags.Assets)
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing helm package manager: %s", err)
|
||||
log.Fatalf("Failed initializing helm package manager: %v", err)
|
||||
}
|
||||
|
||||
err = edge.LoadEdgeJobs(dataStore, reverseTunnelService)
|
||||
if err != nil {
|
||||
log.Fatalf("failed loading edge jobs from database: %v", err)
|
||||
log.Fatalf("Failed loading edge jobs from database: %v", err)
|
||||
}
|
||||
|
||||
applicationStatus := initStatus(instanceID)
|
||||
|
||||
err = initEndpoint(flags, dataStore, snapshotService)
|
||||
if err != nil {
|
||||
log.Fatalf("failed initializing environment: %v", err)
|
||||
log.Fatalf("Failed initializing environment: %v", err)
|
||||
}
|
||||
|
||||
adminPasswordHash := ""
|
||||
if *flags.AdminPasswordFile != "" {
|
||||
content, err := fileService.GetFileContent(*flags.AdminPasswordFile, "")
|
||||
if err != nil {
|
||||
log.Fatalf("failed getting admin password file: %v", err)
|
||||
log.Fatalf("Failed getting admin password file: %v", err)
|
||||
}
|
||||
adminPasswordHash, err = cryptoService.Hash(strings.TrimSuffix(string(content), "\n"))
|
||||
if err != nil {
|
||||
log.Fatalf("failed hashing admin password: %v", err)
|
||||
log.Fatalf("Failed hashing admin password: %v", err)
|
||||
}
|
||||
} else if *flags.AdminPassword != "" {
|
||||
adminPasswordHash = *flags.AdminPassword
|
||||
|
@ -613,7 +631,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
if adminPasswordHash != "" {
|
||||
users, err := dataStore.User().UsersByRole(portainer.AdministratorRole)
|
||||
if err != nil {
|
||||
log.Fatalf("failed getting admin user: %v", err)
|
||||
log.Fatalf("Failed getting admin user: %v", err)
|
||||
}
|
||||
|
||||
if len(users) == 0 {
|
||||
|
@ -625,7 +643,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
}
|
||||
err := dataStore.User().Create(user)
|
||||
if err != nil {
|
||||
log.Fatalf("failed creating admin user: %v", err)
|
||||
log.Fatalf("Failed creating admin user: %v", err)
|
||||
}
|
||||
} else {
|
||||
log.Println("Instance already has an administrator user defined. Skipping admin password related flags.")
|
||||
|
@ -634,12 +652,12 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
|
|||
|
||||
err = reverseTunnelService.StartTunnelServer(*flags.TunnelAddr, *flags.TunnelPort, snapshotService)
|
||||
if err != nil {
|
||||
log.Fatalf("failed starting tunnel server: %s", err)
|
||||
log.Fatalf("Failed starting tunnel server: %v", err)
|
||||
}
|
||||
|
||||
sslDBSettings, err := dataStore.SSLSettings().Settings()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to fetch ssl settings from DB")
|
||||
log.Fatalf("Failed to fetch ssl settings from DB")
|
||||
}
|
||||
|
||||
scheduler := scheduler.NewScheduler(shutdownCtx)
|
||||
|
@ -692,6 +710,6 @@ func main() {
|
|||
server := buildServer(flags)
|
||||
log.Printf("[INFO] [cmd,main] Starting Portainer version %s\n", portainer.APIVersion)
|
||||
err := server.Start()
|
||||
log.Printf("[INFO] [cmd,main] Http server exited: %s\n", err)
|
||||
log.Printf("[INFO] [cmd,main] Http server exited: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue