1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-08-07 23:05:26 +02:00

Compare commits

..

24 commits

Author SHA1 Message Date
Steven Kang
b46bdac85b
chore: bump 2.27.1 - rel 227 (#469) 2025-02-27 11:26:22 +13:00
Oscar Zhou
52afa6cf67 fix(libstack): miss to read default .env file [BE-11638] (#460) 2025-02-26 13:00:36 +13:00
Steven Kang
1abb77aea5 fix: cve-2024-50338 - release 2.27 (#462) 2025-02-25 12:55:52 +13:00
Steven Kang
ab824da5d7 chore: bump version to 2.27.0 - release 2.27 (#446) 2025-02-20 09:42:54 +13:00
Viktor Pettersson
ded33a33a0 fix(edge): configure persisted mTLS certificates on start-up [BE-11622] (#440)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
Co-authored-by: oscarzhou <oscar.zhou@portainer.io>
Co-authored-by: Oscar Zhou <100548325+oscarzhou-portainer@users.noreply.github.com>
2025-02-19 14:46:44 +13:00
Steven Kang
4bd9569e63 version: bump version to 2.27.0-rc3 - release 2.27 (#427) 2025-02-14 08:39:05 +13:00
LP B
9e04145875 fix(swarm): fix the Host field when listing images (#369)
Co-authored-by: andres-portainer <andres-portainer@users.noreply.github.com>
2025-02-12 00:47:50 +01:00
Oscar Zhou
3c6f61134e fix(platform): remove error log when local env is not found [BE-11353] (#375) 2025-02-12 09:24:08 +13:00
Steven Kang
9ac8641f7e workaround: leave the globally set helm repo to empty and add disclaimer - release 2.27 (#410) 2025-02-11 15:36:33 +13:00
Oscar Zhou
0fddedc1a9 fix(podman): missing filter in homepage [BE-11502] (#405) 2025-02-10 21:08:41 +13:00
Oscar Zhou
2e6a3a42be fix(setting): failed to persist edge computer setting [BE-11403] (#396) 2025-02-10 21:05:20 +13:00
Steven Kang
a245e93902 remove deprecated api endpoints - release 2.27 [BE-11510] (#400) 2025-02-10 10:46:48 +13:00
Steven Kang
d1f48ce043 feat: improve diagnostics stability - release 2.27 (#398) 2025-02-10 10:45:43 +13:00
Steven Kang
2c1156da75 version: bump version to 2.27.0-rc2 - release 2.27 (#403) 2025-02-07 14:47:54 +13:00
Steven Kang
5ed95ce714 chore: bump go version to 1.23.5 release 2.27 (#393) 2025-02-07 08:48:22 +13:00
viktigpetterr
3e5ec79b21 fix(endpoints): use the post method for batch delete API operations [BE-11573] (#397) 2025-02-06 18:17:13 +01:00
Steven Kang
157c83deee security: cve-2025-21613 release 227 (#391) 2025-02-05 15:56:35 +13:00
Oscar Zhou
2865fd6b84 fix(edge): check all endpoint_relation db query logic [BE-11602] (#379) 2025-02-05 15:20:27 +13:00
Steven Kang
96285817ab security: cve-2024-45338 release 2.27 (#387) 2025-02-05 15:03:42 +13:00
Oscar Zhou
c2c1ac70f8 fix(libstack): cannot open std edge stack log page [BE-11603] (#385) 2025-02-05 12:17:26 +13:00
James Player
b73f846397 fix(datatables): "Select all" should select only elements of the current page (#377) 2025-02-04 15:51:11 +13:00
Oscar Zhou
a43bb23bef fix(edgegroup): failed to associate env to static edge group [BE-11599] (#374) 2025-02-04 09:41:19 +13:00
LP B
c93b2fedb4 fix(app/edge): edge stacks webhooks cannot be disabled once created (#373) 2025-02-03 20:50:31 +01:00
LP B
156b223287 fix(api/edge): backend panic on edge stack removal (#370) 2025-02-03 20:25:31 +01:00
860 changed files with 13218 additions and 39838 deletions

View file

@ -2,6 +2,7 @@ name: Bug Report
description: Create a report to help us improve.
labels: kind/bug,bug/need-confirmation
body:
- type: markdown
attributes:
value: |
@ -91,29 +92,9 @@ body:
- type: dropdown
attributes:
label: Portainer version
description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [updating first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed.
description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [upgrading first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed.
multiple: false
options:
- '2.32.0'
- '2.31.3'
- '2.31.2'
- '2.31.1'
- '2.31.0'
- '2.30.1'
- '2.30.0'
- '2.29.2'
- '2.29.1'
- '2.29.0'
- '2.28.1'
- '2.28.0'
- '2.27.9'
- '2.27.8'
- '2.27.7'
- '2.27.6'
- '2.27.5'
- '2.27.4'
- '2.27.3'
- '2.27.2'
- '2.27.1'
- '2.27.0'
- '2.26.1'
@ -130,6 +111,20 @@ body:
- '2.21.2'
- '2.21.1'
- '2.21.0'
- '2.20.3'
- '2.20.2'
- '2.20.1'
- '2.20.0'
- '2.19.5'
- '2.19.4'
- '2.19.3'
- '2.19.2'
- '2.19.1'
- '2.19.0'
- '2.18.4'
- '2.18.3'
- '2.18.2'
- '2.18.1'
validations:
required: true

View file

@ -12,18 +12,8 @@ linters:
- copyloopvar
- intrange
- perfsprint
- ineffassign
- bodyclose
- forbidigo
linters-settings:
forbidigo:
analyze-types: true
forbid:
- p: ^tls\.Config$
msg: 'Use crypto.CreateTLSConfiguration() instead'
- p: ^tls\.Config\.(InsecureSkipVerify|MinVersion|MaxVersion|CipherSuites|CurvePreferences)$
msg: 'Do not set this field directly, use crypto.CreateTLSConfiguration() instead'
depguard:
rules:
main:

View file

@ -8,9 +8,9 @@ Portainer consists of a single container that can run on any cluster. It can be
**Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users.
- [Compare Portainer CE and Compare Portainer BE](https://www.portainer.io/features)
- [Compare Portainer CE and Compare Portainer BE](https://portainer.io/products)
- [Take3 get 3 free nodes of Portainer Business for as long as you want them](https://www.portainer.io/take-3)
- [Portainer BE install guide](https://academy.portainer.io/install/)
- [Portainer BE install guide](https://install.portainer.io)
## Latest Version
@ -20,19 +20,22 @@ Portainer CE is updated regularly. We aim to do an update release every couple o
## Getting started
- [Deploy Portainer](https://docs.portainer.io/start/install-ce)
- [Deploy Portainer](https://docs.portainer.io/start/install)
- [Documentation](https://docs.portainer.io)
- [Contribute to the project](https://docs.portainer.io/contribute/contribute)
## Features & Functions
View [this](https://www.portainer.io/features) table to see all of the Portainer CE functionality and compare to Portainer Business.
View [this](https://www.portainer.io/products) table to see all of the Portainer CE functionality and compare to Portainer Business.
- [Portainer CE for Docker / Docker Swarm](https://www.portainer.io/solutions/docker)
- [Portainer CE for Kubernetes](https://www.portainer.io/solutions/kubernetes-ui)
## Getting help
Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io
Learn more about Portainer's community support channels [here.](https://www.portainer.io/resources/get-help/get-support)
Learn more about Portainer's community support channels [here.](https://www.portainer.io/get-support-for-portainer)
- Issues: https://github.com/portainer/portainer/issues
- Slack (chat): [https://portainer.io/slack](https://portainer.io/slack)
@ -50,13 +53,13 @@ You can join the Portainer Community by visiting [https://www.portainer.io/join-
## Work for us
If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to success@portainer.io with your details and/or visit our [careers page](https://apply.workable.com/portainer/).
If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to info@portainer.io with your details and/or visit our [careers page](https://portainer.io/careers).
## Privacy
**To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.**
When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/legal/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer.
When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer.
## Limitations

View file

@ -16,7 +16,7 @@ import (
// GetAgentVersionAndPlatform returns the agent version and platform
//
// it sends a ping to the agent and parses the version and platform from the headers
func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) { //nolint:forbidigo
func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) {
httpCli := &http.Client{
Timeout: 3 * time.Second,
}

View file

@ -2,6 +2,7 @@ package archive
import (
"archive/zip"
"bytes"
"fmt"
"io"
"os"
@ -11,6 +12,50 @@ import (
"github.com/pkg/errors"
)
// UnzipArchive will unzip an archive from bytes into the dest destination folder on disk
func UnzipArchive(archiveData []byte, dest string) error {
zipReader, err := zip.NewReader(bytes.NewReader(archiveData), int64(len(archiveData)))
if err != nil {
return err
}
for _, zipFile := range zipReader.File {
err := extractFileFromArchive(zipFile, dest)
if err != nil {
return err
}
}
return nil
}
func extractFileFromArchive(file *zip.File, dest string) error {
f, err := file.Open()
if err != nil {
return err
}
defer f.Close()
data, err := io.ReadAll(f)
if err != nil {
return err
}
fpath := filepath.Join(dest, file.Name)
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
_, err = io.Copy(outFile, bytes.NewReader(data))
if err != nil {
return err
}
return outFile.Close()
}
// UnzipFile will decompress a zip archive, moving all files and folders
// within the zip file (parameter 1) to an output directory (parameter 2).
func UnzipFile(src string, dest string) error {
@ -31,11 +76,11 @@ func UnzipFile(src string, dest string) error {
if f.FileInfo().IsDir() {
// Make Folder
os.MkdirAll(p, os.ModePerm)
continue
}
if err := unzipFile(f, p); err != nil {
err = unzipFile(f, p)
if err != nil {
return err
}
}
@ -48,20 +93,20 @@ func unzipFile(f *zip.File, p string) error {
if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil {
return errors.Wrapf(err, "unzipFile: can't make a path %s", p)
}
outFile, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return errors.Wrapf(err, "unzipFile: can't create file %s", p)
}
defer outFile.Close()
rc, err := f.Open()
if err != nil {
return errors.Wrapf(err, "unzipFile: can't open zip file %s in the archive", f.Name)
}
defer rc.Close()
if _, err = io.Copy(outFile, rc); err != nil {
_, err = io.Copy(outFile, rc)
if err != nil {
return errors.Wrapf(err, "unzipFile: can't copy an archived file content")
}

View file

@ -9,15 +9,10 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/require"
)
func init() {
fips.InitFIPS(false)
}
func TestPingAgentPanic(t *testing.T) {
endpoint := &portainer.Endpoint{
ID: 1,

View file

@ -4,6 +4,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"math/rand"
"net"
"strings"
"time"
@ -13,7 +14,6 @@ import (
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/pkg/libcrypto"
"github.com/portainer/portainer/pkg/librand"
"github.com/dchest/uniuri"
"github.com/rs/zerolog/log"
@ -200,9 +200,7 @@ func (service *Service) getUnusedPort() int {
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil {
if err := conn.Close(); err != nil {
log.Warn().Msg("failed to close tcp connection that checks if port is free")
}
conn.Close()
log.Debug().
Int("port", port).
@ -215,7 +213,7 @@ func (service *Service) getUnusedPort() int {
}
func randomInt(min, max int) int {
return min + librand.Intn(max-min)
return min + rand.Intn(max-min)
}
func generateRandomCredentials() (string, string) {

View file

@ -1,79 +0,0 @@
package chisel
import (
"net"
"strings"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
type testSettingsService struct {
dataservices.SettingsService
}
func (s *testSettingsService) Settings() (*portainer.Settings, error) {
return &portainer.Settings{
EdgeAgentCheckinInterval: 1,
}, nil
}
type testStore struct {
dataservices.DataStore
}
func (s *testStore) Settings() dataservices.SettingsService {
return &testSettingsService{}
}
func TestGetUnusedPort(t *testing.T) {
testCases := []struct {
name string
existingTunnels map[portainer.EndpointID]*portainer.TunnelDetails
expectedError error
}{
{
name: "simple case",
},
{
name: "existing tunnels",
existingTunnels: map[portainer.EndpointID]*portainer.TunnelDetails{
portainer.EndpointID(1): {
Port: 53072,
},
portainer.EndpointID(2): {
Port: 63072,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
store := &testStore{}
s := NewService(store, nil, nil)
s.activeTunnels = tc.existingTunnels
port := s.getUnusedPort()
if port < 49152 || port > 65535 {
t.Fatalf("Expected port to be inbetween 49152 and 65535 but got %d", port)
}
for _, tun := range tc.existingTunnels {
if tun.Port == port {
t.Fatalf("returned port %d already has an existing tunnel", port)
}
}
conn, err := net.DialTCP("tcp", nil, &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
if err == nil {
// Ignore error
_ = conn.Close()
t.Fatalf("expected port %d to be unused", port)
} else if !strings.Contains(err.Error(), "connection refused") {
t.Fatalf("unexpected error: %v", err)
}
})
}
}

View file

@ -60,14 +60,11 @@ func CLIFlags() *portainer.CLIFlags {
LogLevel: kingpin.Flag("log-level", "Set the minimum logging level to show").Default("INFO").Enum("DEBUG", "INFO", "WARN", "ERROR"),
LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("NOCOLOR", "PRETTY", "JSON"),
KubectlShellImage: kingpin.Flag("kubectl-shell-image", "Kubectl shell image").Envar(portainer.KubectlShellImageEnvVar).Default(portainer.DefaultKubectlShellImage).String(),
PullLimitCheckDisabled: kingpin.Flag("pull-limit-check-disabled", "Pull limit check").Envar(portainer.PullLimitCheckDisabledEnvVar).Default(defaultPullLimitCheckDisabled).Bool(),
TrustedOrigins: kingpin.Flag("trusted-origins", "List of trusted origins for CSRF protection. Separate multiple origins with a comma.").Envar(portainer.TrustedOriginsEnvVar).String(),
CSP: kingpin.Flag("csp", "Content Security Policy (CSP) header").Envar(portainer.CSPEnvVar).Default("true").Bool(),
}
}
// ParseFlags parse the CLI flags and return a portainer.Flags struct
func (Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
func (*Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
kingpin.Version(version)
flags := CLIFlags()
@ -87,7 +84,7 @@ func (Service) ParseFlags(version string) (*portainer.CLIFlags, error) {
}
// ValidateFlags validates the values of the flags.
func (Service) ValidateFlags(flags *portainer.CLIFlags) error {
func (*Service) ValidateFlags(flags *portainer.CLIFlags) error {
displayDeprecationWarnings(flags)
if err := validateEndpointURL(*flags.EndpointURL); err != nil {

View file

@ -1,24 +0,0 @@
package cli
import (
"os"
"testing"
"github.com/stretchr/testify/require"
)
func TestOptionParser(t *testing.T) {
p := Service{}
require.NotNil(t, p)
a := os.Args
defer func() { os.Args = a }()
os.Args = []string{"portainer", "--edge-compute"}
opts, err := p.ParseFlags("2.34.5")
require.NoError(t, err)
require.False(t, *opts.HTTPDisabled)
require.True(t, *opts.EnableEdgeComputeFeatures)
}

View file

@ -4,21 +4,20 @@
package cli
const (
defaultBindAddress = ":9000"
defaultHTTPSBindAddress = ":9443"
defaultTunnelServerAddress = "0.0.0.0"
defaultTunnelServerPort = "8000"
defaultDataDirectory = "/data"
defaultAssetsDirectory = "./"
defaultTLS = "false"
defaultTLSSkipVerify = "false"
defaultTLSCACertPath = "/certs/ca.pem"
defaultTLSCertPath = "/certs/cert.pem"
defaultTLSKeyPath = "/certs/key.pem"
defaultHTTPDisabled = "false"
defaultHTTPEnabled = "false"
defaultSSL = "false"
defaultBaseURL = "/"
defaultSecretKeyName = "portainer"
defaultPullLimitCheckDisabled = "false"
defaultBindAddress = ":9000"
defaultHTTPSBindAddress = ":9443"
defaultTunnelServerAddress = "0.0.0.0"
defaultTunnelServerPort = "8000"
defaultDataDirectory = "/data"
defaultAssetsDirectory = "./"
defaultTLS = "false"
defaultTLSSkipVerify = "false"
defaultTLSCACertPath = "/certs/ca.pem"
defaultTLSCertPath = "/certs/cert.pem"
defaultTLSKeyPath = "/certs/key.pem"
defaultHTTPDisabled = "false"
defaultHTTPEnabled = "false"
defaultSSL = "false"
defaultBaseURL = "/"
defaultSecretKeyName = "portainer"
)

View file

@ -1,22 +1,21 @@
package cli
const (
defaultBindAddress = ":9000"
defaultHTTPSBindAddress = ":9443"
defaultTunnelServerAddress = "0.0.0.0"
defaultTunnelServerPort = "8000"
defaultDataDirectory = "C:\\data"
defaultAssetsDirectory = "./"
defaultTLS = "false"
defaultTLSSkipVerify = "false"
defaultTLSCACertPath = "C:\\certs\\ca.pem"
defaultTLSCertPath = "C:\\certs\\cert.pem"
defaultTLSKeyPath = "C:\\certs\\key.pem"
defaultHTTPDisabled = "false"
defaultHTTPEnabled = "false"
defaultSSL = "false"
defaultSnapshotInterval = "5m"
defaultBaseURL = "/"
defaultSecretKeyName = "portainer"
defaultPullLimitCheckDisabled = "false"
defaultBindAddress = ":9000"
defaultHTTPSBindAddress = ":9443"
defaultTunnelServerAddress = "0.0.0.0"
defaultTunnelServerPort = "8000"
defaultDataDirectory = "C:\\data"
defaultAssetsDirectory = "./"
defaultTLS = "false"
defaultTLSSkipVerify = "false"
defaultTLSCACertPath = "C:\\certs\\ca.pem"
defaultTLSCertPath = "C:\\certs\\cert.pem"
defaultTLSKeyPath = "C:\\certs\\key.pem"
defaultHTTPDisabled = "false"
defaultHTTPEnabled = "false"
defaultSSL = "false"
defaultSnapshotInterval = "5m"
defaultBaseURL = "/"
defaultSecretKeyName = "portainer"
)

45
api/cli/pairlistbool.go Normal file
View file

@ -0,0 +1,45 @@
package cli
import (
"strings"
portainer "github.com/portainer/portainer/api"
"gopkg.in/alecthomas/kingpin.v2"
)
type pairListBool []portainer.Pair
// Set implementation for a list of portainer.Pair
func (l *pairListBool) Set(value string) error {
p := new(portainer.Pair)
// default to true. example setting=true is equivalent to setting
parts := strings.SplitN(value, "=", 2)
if len(parts) != 2 {
p.Name = parts[0]
p.Value = "true"
} else {
p.Name = parts[0]
p.Value = parts[1]
}
*l = append(*l, *p)
return nil
}
// String implementation for a list of pair
func (l *pairListBool) String() string {
return ""
}
// IsCumulative implementation for a list of pair
func (l *pairListBool) IsCumulative() bool {
return true
}
func BoolPairs(s kingpin.Settings) (target *[]portainer.Pair) {
target = new([]portainer.Pair)
s.SetValue((*pairListBool)(target))
return
}

View file

@ -1,4 +1,4 @@
package logs
package main
import (
"fmt"
@ -10,7 +10,7 @@ import (
"github.com/rs/zerolog/pkgerrors"
)
func ConfigureLogger() {
func configureLogger() {
zerolog.ErrorStackFieldName = "stack_trace"
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
@ -21,7 +21,7 @@ func ConfigureLogger() {
log.Logger = log.Logger.With().Caller().Stack().Logger()
}
func SetLoggingLevel(level string) {
func setLoggingLevel(level string) {
switch level {
case "ERROR":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
@ -34,7 +34,7 @@ func SetLoggingLevel(level string) {
}
}
func SetLoggingMode(mode string) {
func setLoggingMode(mode string) {
switch mode {
case "PRETTY":
log.Logger = log.Output(zerolog.ConsoleWriter{

View file

@ -39,7 +39,6 @@ import (
"github.com/portainer/portainer/api/kubernetes"
kubecli "github.com/portainer/portainer/api/kubernetes/cli"
"github.com/portainer/portainer/api/ldap"
"github.com/portainer/portainer/api/logs"
"github.com/portainer/portainer/api/oauth"
"github.com/portainer/portainer/api/pendingactions"
"github.com/portainer/portainer/api/pendingactions/actions"
@ -49,18 +48,15 @@ import (
"github.com/portainer/portainer/api/stacks/deployments"
"github.com/portainer/portainer/pkg/build"
"github.com/portainer/portainer/pkg/featureflags"
"github.com/portainer/portainer/pkg/fips"
"github.com/portainer/portainer/pkg/libhelm"
libhelmtypes "github.com/portainer/portainer/pkg/libhelm/types"
"github.com/portainer/portainer/pkg/libstack/compose"
"github.com/portainer/portainer/pkg/validate"
"github.com/gofrs/uuid"
"github.com/rs/zerolog/log"
)
func initCLI() *portainer.CLIFlags {
cliService := cli.Service{}
cliService := &cli.Service{}
flags, err := cliService.ParseFlags(portainer.APIVersion)
if err != nil {
@ -169,12 +165,12 @@ func checkDBSchemaServerVersionMatch(dbStore dataservices.DataStore, serverVersi
return v.SchemaVersion == serverVersion && v.Edition == serverEdition
}
func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager) portainer.KubernetesDeployer {
return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager)
func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager, assetsPath string) portainer.KubernetesDeployer {
return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager, assetsPath)
}
func initHelmPackageManager() (libhelmtypes.HelmPackageManager, error) {
return libhelm.NewHelmPackageManager()
func initHelmPackageManager(assetsPath string) (libhelm.HelmPackageManager, error) {
return libhelm.NewHelmPackageManager(libhelm.HelmConfig{BinaryPath: assetsPath})
}
func initAPIKeyService(datastore dataservices.DataStore) apikey.APIKeyService {
@ -332,21 +328,6 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
featureflags.Parse(*flags.FeatureFlags, portainer.SupportedFeatureFlags)
}
trustedOrigins := []string{}
if *flags.TrustedOrigins != "" {
// validate if the trusted origins are valid urls
for _, origin := range strings.Split(*flags.TrustedOrigins, ",") {
if !validate.IsTrustedOrigin(origin) {
log.Fatal().Str("trusted_origin", origin).Msg("invalid url for trusted origin. Please check the trusted origins flag.")
}
trustedOrigins = append(trustedOrigins, origin)
}
}
// -ce can not ever be run in FIPS mode
fips.InitFIPS(false)
fileService := initFileService(*flags.Data)
encryptionKey := loadEncryptionSecretKey(*flags.SecretKeyName)
if encryptionKey == nil {
@ -381,16 +362,15 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("failed initializing JWT service")
}
ldapService := ldap.Service{}
ldapService := &ldap.Service{}
oauthService := oauth.NewService()
gitService := git.NewService(shutdownCtx)
// Setting insecureSkipVerify to true to preserve the old behaviour.
openAMTService := openamt.NewService(true)
openAMTService := openamt.NewService()
cryptoService := crypto.Service{}
cryptoService := &crypto.Service{}
signatureService := initDigitalSignatureService()
@ -441,7 +421,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
log.Fatal().Err(err).Msg("failed initializing swarm stack manager")
}
kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager)
kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager, *flags.Assets)
pendingActionsService := pendingactions.NewService(dataStore, kubernetesClientFactory)
pendingActionsService.RegisterHandler(actions.CleanNAPWithOverridePolicies, handlers.NewHandlerCleanNAPWithOverridePolicies(authorizationService, dataStore))
@ -455,9 +435,9 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
snapshotService.Start()
proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService, jwtService)
proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService)
helmPackageManager, err := initHelmPackageManager()
helmPackageManager, err := initHelmPackageManager(*flags.Assets)
if err != nil {
log.Fatal().Err(err).Msg("failed initializing helm package manager")
}
@ -563,7 +543,6 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
Status: applicationStatus,
BindAddress: *flags.Addr,
BindAddressHTTPS: *flags.AddrHTTPS,
CSP: *flags.CSP,
HTTPEnabled: sslDBSettings.HTTPEnabled,
AssetsPath: *flags.Assets,
DataStore: dataStore,
@ -596,19 +575,17 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server {
AdminCreationDone: adminCreationDone,
PendingActionsService: pendingActionsService,
PlatformService: platformService,
PullLimitCheckDisabled: *flags.PullLimitCheckDisabled,
TrustedOrigins: trustedOrigins,
}
}
func main() {
logs.ConfigureLogger()
logs.SetLoggingMode("PRETTY")
configureLogger()
setLoggingMode("PRETTY")
flags := initCLI()
logs.SetLoggingLevel(*flags.LogLevel)
logs.SetLoggingMode(*flags.LogMode)
setLoggingLevel(*flags.LogLevel)
setLoggingMode(*flags.LogMode)
for {
server := buildServer(flags)

View file

@ -6,10 +6,8 @@ import (
type ReadTransaction interface {
GetObject(bucketName string, key []byte, object any) error
GetRawBytes(bucketName string, key []byte) ([]byte, error)
GetAll(bucketName string, obj any, append func(o any) (any, error)) error
GetAllWithKeyPrefix(bucketName string, keyPrefix []byte, obj any, append func(o any) (any, error)) error
KeyExists(bucketName string, key []byte) (bool, error)
}
type Transaction interface {

View file

@ -6,15 +6,11 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"errors"
"fmt"
"io"
"strings"
"github.com/portainer/portainer/pkg/fips"
"golang.org/x/crypto/argon2"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
)
@ -23,32 +19,20 @@ const (
aesGcmHeader = "AES256-GCM" // The encrypted file header
aesGcmBlockSize = 1024 * 1024 // 1MB block for aes gcm
aesGcmFIPSHeader = "FIPS-AES256-GCM"
aesGcmFIPSBlockSize = 16 * 1024 * 1024 // 16MB block for aes gcm
// Argon2 settings
// Recommended settings lower memory hardware according to current OWASP recommendations
// Recommded settings lower memory hardware according to current OWASP recommendations
// Considering some people run portainer on a NAS I think it's prudent not to assume we're on server grade hardware
// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id
argon2MemoryCost = 12 * 1024
argon2TimeCost = 3
argon2Threads = 1
argon2KeyLength = 32
pbkdf2Iterations = 600_000 // use recommended iterations from https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 a little overkill for this use
pbkdf2SaltLength = 32
)
// AesEncrypt reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key
func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
if fips.FIPSMode() {
if err := aesEncryptGCMFIPS(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
} else {
if err := aesEncryptGCM(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
if err := aesEncryptGCM(input, output, passphrase); err != nil {
return fmt.Errorf("error encrypting file: %w", err)
}
return nil
@ -56,36 +40,14 @@ func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error {
// AesDecrypt reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from
func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) {
fipsMode := fips.FIPSMode()
return aesDecrypt(input, passphrase, fipsMode)
}
func aesDecrypt(input io.Reader, passphrase []byte, fipsMode bool) (io.Reader, error) {
// Read file header to determine how it was encrypted
inputReader := bufio.NewReader(input)
header, err := inputReader.Peek(len(aesGcmFIPSHeader))
header, err := inputReader.Peek(len(aesGcmHeader))
if err != nil {
return nil, fmt.Errorf("error reading encrypted backup file header: %w", err)
}
if strings.HasPrefix(string(header), aesGcmFIPSHeader) {
if !fipsMode {
return nil, errors.New("fips encrypted file detected but fips mode is not enabled")
}
reader, err := aesDecryptGCMFIPS(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err)
}
return reader, nil
}
if strings.HasPrefix(string(header), aesGcmHeader) {
if fipsMode {
return nil, errors.New("fips mode is enabled but non-fips encrypted file detected")
}
if string(header) == aesGcmHeader {
reader, err := aesDecryptGCM(inputReader, passphrase)
if err != nil {
return nil, fmt.Errorf("error decrypting file: %w", err)
@ -241,126 +203,6 @@ func aesDecryptGCM(input io.Reader, passphrase []byte) (io.Reader, error) {
return &buf, nil
}
// aesEncryptGCMFIPS reads from input, encrypts with AES-256 in a fips compliant
// way and writes to output. passphrase is used to generate an encryption key.
func aesEncryptGCMFIPS(input io.Reader, output io.Writer, passphrase []byte) error {
salt := make([]byte, pbkdf2SaltLength)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
return err
}
key := pbkdf2.Key(passphrase, salt, pbkdf2Iterations, 32, sha256.New)
block, err := aes.NewCipher(key)
if err != nil {
return err
}
// write the header
if _, err := output.Write([]byte(aesGcmFIPSHeader)); err != nil {
return err
}
// Write nonce and salt to the output file
if _, err := output.Write(salt); err != nil {
return err
}
// Buffer for reading plaintext blocks
buf := make([]byte, aesGcmFIPSBlockSize)
// Encrypt plaintext in blocks
for {
// new random nonce for each block
aesgcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return fmt.Errorf("error creating gcm: %w", err)
}
n, err := io.ReadFull(input, buf)
if n == 0 {
break // end of plaintext input
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
return err
}
// Seal encrypts the plaintext
ciphertext := aesgcm.Seal(nil, nil, buf[:n], nil)
_, err = output.Write(ciphertext)
if err != nil {
return err
}
}
return nil
}
// aesDecryptGCMFIPS reads from input, decrypts with AES-256 in a fips compliant
// way and returns the reader to read the decrypted content from.
func aesDecryptGCMFIPS(input io.Reader, passphrase []byte) (io.Reader, error) {
// Reader & verify header
header := make([]byte, len(aesGcmFIPSHeader))
if _, err := io.ReadFull(input, header); err != nil {
return nil, err
}
if string(header) != aesGcmFIPSHeader {
return nil, errors.New("invalid header")
}
// Read salt
salt := make([]byte, pbkdf2SaltLength)
if _, err := io.ReadFull(input, salt); err != nil {
return nil, err
}
key := pbkdf2.Key(passphrase, salt, pbkdf2Iterations, 32, sha256.New)
// Initialize AES cipher block
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
// Initialize a buffer to store decrypted data
buf := bytes.Buffer{}
// Decrypt the ciphertext in blocks
for {
// Create GCM mode with the cipher block
aesgcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
return nil, err
}
// Read a block of ciphertext from the input reader
ciphertextBlock := make([]byte, aesGcmFIPSBlockSize+aesgcm.Overhead())
n, err := io.ReadFull(input, ciphertextBlock)
if n == 0 {
break // end of ciphertext
}
if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
return nil, err
}
// Decrypt the block of ciphertext
plaintext, err := aesgcm.Open(nil, nil, ciphertextBlock[:n], nil)
if err != nil {
return nil, err
}
if _, err := buf.Write(plaintext); err != nil {
return nil, err
}
}
return &buf, nil
}
// aesDecryptOFB reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from.
// passphrase is used to generate an encryption key.
// note: This function used to decrypt files that were encrypted without a header i.e. old archives

View file

@ -7,15 +7,9 @@ import (
"path/filepath"
"testing"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func init() {
fips.InitFIPS(false)
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func randBytes(n int) []byte {
@ -26,296 +20,198 @@ func randBytes(n int) []byte {
return b
}
type encryptFunc func(input io.Reader, output io.Writer, passphrase []byte) error
type decryptFunc func(input io.Reader, passphrase []byte) (io.Reader, error)
func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) {
const passphrase = "passphrase"
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc, decryptShouldSucceed bool) {
tmpdir := t.TempDir()
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
content := randBytes(1024*1024*100 + 523)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(1024*1024*100 + 523)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := encrypt(originFile, encryptedFileWriter, []byte(passphrase))
require.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
require.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase))
if !decryptShouldSucceed {
require.Error(t, err, "Failed to decrypt file as indicated by decryptShouldSucceed")
} else {
require.NoError(t, err, "Failed to decrypt file indicated by decryptShouldSucceed")
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS, true)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM, true)
})
t.Run("system_fips_mode_public_entry_points", func(t *testing.T) {
// use the init mode, public entry points
testFunc(t, AesEncrypt, AesDecrypt, true)
})
t.Run("fips_encrypted_file_header_fails_in_non_fips_mode", func(t *testing.T) {
// use aesDecrypt which checks the header, confirm that it fails
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, false)
}
testFunc(t, aesEncryptGCMFIPS, decrypt, false)
})
t.Run("non_fips_encrypted_file_header_fails_in_fips_mode", func(t *testing.T) {
// use aesDecrypt which checks the header, confirm that it fails
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, true)
}
testFunc(t, aesEncryptGCM, decrypt, false)
})
t.Run("fips_encrypted_file_fails_in_non_fips_mode", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCM, false)
})
t.Run("non_fips_encrypted_file_with_fips_mode_should_fail", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCMFIPS, false)
})
t.Run("fips_with_base_aesDecrypt", func(t *testing.T) {
// maximize coverage, use the base aesDecrypt function with valid fips mode
decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) {
return aesDecrypt(input, passphrase, true)
}
testFunc(t, aesEncryptGCMFIPS, decrypt, true)
})
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
func Test_encryptAndDecrypt_withStrongPassphrase(t *testing.T) {
const passphrase = "A strong passphrase with special characters: !@#$%^&*()_+"
tmpdir := t.TempDir()
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
err := encrypt(originFile, encryptedFileWriter, []byte(passphrase))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase))
assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
func Test_encryptAndDecrypt_withTheSamePasswordSmallFile(t *testing.T) {
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
var (
originFilePath = filepath.Join(tmpdir, "origin2")
encryptedFilePath = filepath.Join(tmpdir, "encrypted2")
decryptedFilePath = filepath.Join(tmpdir, "decrypted2")
)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(500)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
encryptedFileWriter, _ := os.Create(encryptedFilePath)
err := encrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedFileWriter.Close()
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedReader, err := decrypt(encryptedFileReader, []byte("passphrase"))
assert.Nil(t, err, "Failed to decrypt file")
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("passphrase"))
assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
func Test_encryptAndDecrypt_withEmptyPassword(t *testing.T) {
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
content := randBytes(1024 * 50)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(1024 * 50)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
err := encrypt(originFile, encryptedFileWriter, []byte(""))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
err := AesEncrypt(originFile, encryptedFileWriter, []byte(""))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedReader, err := decrypt(encryptedFileReader, []byte(""))
assert.Nil(t, err, "Failed to decrypt file")
decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(""))
assert.Nil(t, err, "Failed to decrypt file")
io.Copy(decryptedFileWriter, decryptedReader)
io.Copy(decryptedFileWriter, decryptedReader)
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
decryptedContent, _ := os.ReadFile(decryptedFilePath)
assert.Equal(t, content, decryptedContent, "Original and decrypted content should match")
}
func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T) {
testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) {
tmpdir := t.TempDir()
tmpdir := t.TempDir()
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
var (
originFilePath = filepath.Join(tmpdir, "origin")
encryptedFilePath = filepath.Join(tmpdir, "encrypted")
decryptedFilePath = filepath.Join(tmpdir, "decrypted")
)
content := randBytes(1034)
os.WriteFile(originFilePath, content, 0600)
content := randBytes(1034)
os.WriteFile(originFilePath, content, 0600)
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
originFile, _ := os.Open(originFilePath)
defer originFile.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
encryptedFileWriter, _ := os.Create(encryptedFilePath)
defer encryptedFileWriter.Close()
err := encrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase"))
assert.Nil(t, err, "Failed to encrypt a file")
encryptedContent, err := os.ReadFile(encryptedFilePath)
assert.Nil(t, err, "Couldn't read encrypted file")
assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted")
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
encryptedFileReader, _ := os.Open(encryptedFilePath)
defer encryptedFileReader.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
decryptedFileWriter, _ := os.Create(decryptedFilePath)
defer decryptedFileWriter.Close()
_, err = decrypt(encryptedFileReader, []byte("garbage"))
assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase")
}
t.Run("fips", func(t *testing.T) {
testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS)
})
t.Run("non_fips", func(t *testing.T) {
testFunc(t, aesEncryptGCM, aesDecryptGCM)
})
_, err = AesDecrypt(encryptedFileReader, []byte("garbage"))
assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase")
}

View file

@ -112,7 +112,7 @@ func (service *ECDSAService) CreateSignature(message string) (string, error) {
message = service.secret
}
hash := libcrypto.InsecureHashFromBytes([]byte(message))
hash := libcrypto.HashFromBytes([]byte(message))
r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash)
if err != nil {

View file

@ -1,22 +0,0 @@
package crypto
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCreateSignature(t *testing.T) {
var s = NewECDSAService("secret")
privKey, pubKey, err := s.GenerateKeyPair()
require.NoError(t, err)
require.Greater(t, len(privKey), 0)
require.Greater(t, len(pubKey), 0)
m := "test message"
r, err := s.CreateSignature(m)
require.NoError(t, err)
require.NotEqual(t, r, m)
require.Greater(t, len(r), 0)
}

View file

@ -8,16 +8,15 @@ import (
type Service struct{}
// Hash hashes a string using the bcrypt algorithm
func (Service) Hash(data string) (string, error) {
func (*Service) Hash(data string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(data), bcrypt.DefaultCost)
if err != nil {
return "", err
}
return string(bytes), err
}
// CompareHashAndData compares a hash to clear data and returns an error if the comparison fails.
func (Service) CompareHashAndData(hash string, data string) error {
func (*Service) CompareHashAndData(hash string, data string) error {
return bcrypt.CompareHashAndPassword([]byte(hash), []byte(data))
}

View file

@ -2,12 +2,10 @@ package crypto
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestService_Hash(t *testing.T) {
var s = Service{}
var s = &Service{}
type args struct {
hash string
@ -53,11 +51,3 @@ func TestService_Hash(t *testing.T) {
})
}
}
func TestHash(t *testing.T) {
s := Service{}
hash, err := s.Hash("Passw0rd!")
require.NoError(t, err)
require.NotEmpty(t, hash)
}

View file

@ -15,7 +15,7 @@ func NewNonce(size int) *Nonce {
}
// NewRandomNonce generates a new initial nonce with the lower byte set to a random value
// This ensures there are plenty of nonce values available before rolling over
// This ensures there are plenty of nonce values availble before rolling over
// Based on ideas from the Secure Programming Cookbook for C and C++ by John Viega, Matt Messier
// https://www.oreilly.com/library/view/secure-programming-cookbook/0596003943/ch04s09.html
func NewRandomNonce(size int) (*Nonce, error) {

View file

@ -1,36 +1,14 @@
package crypto
import (
"crypto/fips140"
"crypto/tls"
"crypto/x509"
"os"
portainer "github.com/portainer/portainer/api"
)
// CreateTLSConfiguration creates a basic tls.Config with recommended TLS settings
func CreateTLSConfiguration(insecureSkipVerify bool) *tls.Config { //nolint:forbidigo
// TODO: use fips.FIPSMode() instead
return createTLSConfiguration(fips140.Enabled(), insecureSkipVerify)
}
func createTLSConfiguration(fipsEnabled bool, insecureSkipVerify bool) *tls.Config { //nolint:forbidigo
if fipsEnabled {
return &tls.Config{ //nolint:forbidigo
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS13,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
},
CurvePreferences: []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521},
}
}
return &tls.Config{ //nolint:forbidigo
func CreateTLSConfiguration() *tls.Config {
return &tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_AES_128_GCM_SHA256,
@ -51,34 +29,24 @@ func createTLSConfiguration(fipsEnabled bool, insecureSkipVerify bool) *tls.Conf
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
},
InsecureSkipVerify: insecureSkipVerify, //nolint:forbidigo
}
}
// CreateTLSConfigurationFromBytes initializes a tls.Config using a CA certificate, a certificate and a key
// loaded from memory.
func CreateTLSConfigurationFromBytes(useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo
// TODO: use fips.FIPSMode() instead
return createTLSConfigurationFromBytes(fips140.Enabled(), useTLS, caCert, cert, key, skipClientVerification, skipServerVerification)
}
func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) {
config := CreateTLSConfiguration()
config.InsecureSkipVerify = skipServerVerification
func createTLSConfigurationFromBytes(fipsEnabled, useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo
if !useTLS {
return nil, nil
}
config := createTLSConfiguration(fipsEnabled, skipServerVerification)
if !skipClientVerification || fipsEnabled {
if !skipClientVerification {
certificate, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, err
}
config.Certificates = []tls.Certificate{certificate}
}
if !skipServerVerification || fipsEnabled {
if !skipServerVerification {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
config.RootCAs = caCertPool
@ -89,37 +57,29 @@ func createTLSConfigurationFromBytes(fipsEnabled, useTLS bool, caCert, cert, key
// CreateTLSConfigurationFromDisk initializes a tls.Config using a CA certificate, a certificate and a key
// loaded from disk.
func CreateTLSConfigurationFromDisk(config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo
// TODO: use fips.FIPSMode() instead
return createTLSConfigurationFromDisk(fips140.Enabled(), config)
}
func CreateTLSConfigurationFromDisk(caCertPath, certPath, keyPath string, skipServerVerification bool) (*tls.Config, error) {
config := CreateTLSConfiguration()
config.InsecureSkipVerify = skipServerVerification
func createTLSConfigurationFromDisk(fipsEnabled bool, config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo
if !config.TLS {
return nil, nil
}
tlsConfig := createTLSConfiguration(fipsEnabled, config.TLSSkipVerify)
if config.TLSCertPath != "" && config.TLSKeyPath != "" {
cert, err := tls.LoadX509KeyPair(config.TLSCertPath, config.TLSKeyPath)
if certPath != "" && keyPath != "" {
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
config.Certificates = []tls.Certificate{cert}
}
if !tlsConfig.InsecureSkipVerify && config.TLSCACertPath != "" { //nolint:forbidigo
caCert, err := os.ReadFile(config.TLSCACertPath)
if !skipServerVerification && caCertPath != "" {
caCert, err := os.ReadFile(caCertPath)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caCertPool
config.RootCAs = caCertPool
}
return tlsConfig, nil
return config, nil
}

View file

@ -1,87 +0,0 @@
package crypto
import (
"crypto/tls"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestCreateTLSConfiguration(t *testing.T) {
// InsecureSkipVerify = false
config := CreateTLSConfiguration(false)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
// InsecureSkipVerify = true
config = CreateTLSConfiguration(true)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.True(t, config.InsecureSkipVerify) //nolint:forbidigo
}
func TestCreateTLSConfigurationFIPS(t *testing.T) {
fips := true
fipsCipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
fipsCurvePreferences := []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521}
config := createTLSConfiguration(fips, false)
require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo
require.Equal(t, config.MaxVersion, uint16(tls.VersionTLS13)) //nolint:forbidigo
require.Equal(t, config.CipherSuites, fipsCipherSuites) //nolint:forbidigo
require.Equal(t, config.CurvePreferences, fipsCurvePreferences) //nolint:forbidigo
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
}
func TestCreateTLSConfigurationFromBytes(t *testing.T) {
// No TLS
config, err := CreateTLSConfigurationFromBytes(false, nil, nil, nil, false, false)
require.Nil(t, err)
require.Nil(t, config)
// Skip TLS client/server verifications
config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, true, true)
require.NoError(t, err)
require.NotNil(t, config)
// Empty TLS
config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, false, false)
require.Error(t, err)
require.Nil(t, config)
}
func TestCreateTLSConfigurationFromDisk(t *testing.T) {
// No TLS
config, err := CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{})
require.Nil(t, err)
require.Nil(t, config)
// Skip TLS verifications
config, err = CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
})
require.NoError(t, err)
require.NotNil(t, config)
}
func TestCreateTLSConfigurationFromDiskFIPS(t *testing.T) {
fips := true
// Skipping TLS verifications cannot be done in FIPS mode
config, err := createTLSConfigurationFromDisk(fips, portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
})
require.NoError(t, err)
require.NotNil(t, config)
require.False(t, config.InsecureSkipVerify) //nolint:forbidigo
}

View file

@ -138,8 +138,6 @@ func (connection *DbConnection) Open() error {
db, err := bolt.Open(databasePath, 0600, &bolt.Options{
Timeout: 1 * time.Second,
InitialMmapSize: connection.InitialMmapSize,
FreelistType: bolt.FreelistMapType,
NoFreelistSync: true,
})
if err != nil {
return err
@ -246,32 +244,6 @@ func (connection *DbConnection) GetObject(bucketName string, key []byte, object
})
}
func (connection *DbConnection) GetRawBytes(bucketName string, key []byte) ([]byte, error) {
var value []byte
err := connection.ViewTx(func(tx portainer.Transaction) error {
var err error
value, err = tx.GetRawBytes(bucketName, key)
return err
})
return value, err
}
func (connection *DbConnection) KeyExists(bucketName string, key []byte) (bool, error) {
var exists bool
err := connection.ViewTx(func(tx portainer.Transaction) error {
var err error
exists, err = tx.KeyExists(bucketName, key)
return err
})
return exists, err
}
func (connection *DbConnection) getEncryptionKey() []byte {
if !connection.isEncrypted {
return nil

View file

@ -4,6 +4,8 @@ import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"io"
"github.com/pkg/errors"
"github.com/segmentio/encoding/json"
@ -63,18 +65,18 @@ func (connection *DbConnection) UnmarshalObject(data []byte, object any) error {
// https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption
func encrypt(plaintext []byte, passphrase []byte) (encrypted []byte, err error) {
block, err := aes.NewCipher(passphrase)
block, _ := aes.NewCipher(passphrase)
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, err
}
// NewGCMWithRandomNonce in go 1.24 handles setting up the nonce and adding it to the encrypted output
gcm, err := cipher.NewGCMWithRandomNonce(block)
if err != nil {
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return encrypted, err
}
return gcm.Seal(nil, nil, plaintext, nil), nil
return gcm.Seal(nonce, nonce, plaintext, nil), nil
}
func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err error) {
@ -87,17 +89,19 @@ func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err err
return encrypted, errors.Wrap(err, "Error creating cypher block")
}
// NewGCMWithRandomNonce in go 1.24 handles reading the nonce from the encrypted input for us
gcm, err := cipher.NewGCMWithRandomNonce(block)
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating GCM")
}
if len(encrypted) < gcm.NonceSize() {
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
return encrypted, errEncryptedStringTooShort
}
plaintextByte, err = gcm.Open(nil, nil, encrypted, nil)
nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:]
plaintextByte, err = gcm.Open(nil, nonce, ciphertextByteClean, nil)
if err != nil {
return encrypted, errors.Wrap(err, "Error decrypting text")
}

View file

@ -1,23 +1,16 @@
package boltdb
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"testing"
"github.com/gofrs/uuid"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
jsonobject = `{"LogoURL":"","BlackListedLabels":[],"AuthenticationMethod":1,"InternalAuthSettings": {"RequiredPasswordLength": 12}"LDAPSettings":{"AnonymousMode":true,"ReaderDN":"","URL":"","TLSConfig":{"TLS":false,"TLSSkipVerify":false},"StartTLS":false,"SearchSettings":[{"BaseDN":"","Filter":"","UserNameAttribute":""}],"GroupSearchSettings":[{"GroupBaseDN":"","GroupFilter":"","GroupAttribute":""}],"AutoCreateUsers":true},"OAuthSettings":{"ClientID":"","AccessTokenURI":"","AuthorizationURI":"","ResourceURI":"","RedirectURI":"","UserIdentifier":"","Scopes":"","OAuthAutoCreateUsers":false,"DefaultTeamID":0,"SSO":true,"LogoutURI":"","KubeSecretKey":"j0zLVtY/lAWBk62ByyF0uP80SOXaitsABP0TTJX8MhI="},"OpenAMTConfiguration":{"Enabled":false,"MPSServer":"","MPSUser":"","MPSPassword":"","MPSToken":"","CertFileContent":"","CertFileName":"","CertFilePassword":"","DomainName":""},"FeatureFlagSettings":{},"SnapshotInterval":"5m","TemplatesURL":"https://raw.githubusercontent.com/portainer/templates/master/templates-2.0.json","EdgeAgentCheckinInterval":5,"EnableEdgeComputeFeatures":false,"UserSessionTimeout":"8h","KubeconfigExpiry":"0","EnableTelemetry":true,"HelmRepositoryURL":"https://charts.bitnami.com/bitnami","KubectlShellImage":"portainer/kubectl-shell","DisplayDonationHeader":false,"DisplayExternalContributors":false,"EnableHostManagementFeatures":false,"AllowVolumeBrowserForRegularUsers":false,"AllowBindMountsForRegularUsers":false,"AllowPrivilegedModeForRegularUsers":false,"AllowHostNamespaceForRegularUsers":false,"AllowStackManagementForRegularUsers":false,"AllowDeviceMappingForRegularUsers":false,"AllowContainerCapabilitiesForRegularUsers":false}`
jsonobject = `{"LogoURL":"","BlackListedLabels":[],"AuthenticationMethod":1,"InternalAuthSettings": {"RequiredPasswordLength": 12}"LDAPSettings":{"AnonymousMode":true,"ReaderDN":"","URL":"","TLSConfig":{"TLS":false,"TLSSkipVerify":false},"StartTLS":false,"SearchSettings":[{"BaseDN":"","Filter":"","UserNameAttribute":""}],"GroupSearchSettings":[{"GroupBaseDN":"","GroupFilter":"","GroupAttribute":""}],"AutoCreateUsers":true},"OAuthSettings":{"ClientID":"","AccessTokenURI":"","AuthorizationURI":"","ResourceURI":"","RedirectURI":"","UserIdentifier":"","Scopes":"","OAuthAutoCreateUsers":false,"DefaultTeamID":0,"SSO":true,"LogoutURI":"","KubeSecretKey":"j0zLVtY/lAWBk62ByyF0uP80SOXaitsABP0TTJX8MhI="},"OpenAMTConfiguration":{"Enabled":false,"MPSServer":"","MPSUser":"","MPSPassword":"","MPSToken":"","CertFileContent":"","CertFileName":"","CertFilePassword":"","DomainName":""},"FeatureFlagSettings":{},"SnapshotInterval":"5m","TemplatesURL":"https://raw.githubusercontent.com/portainer/templates/master/templates-2.0.json","EdgeAgentCheckinInterval":5,"EnableEdgeComputeFeatures":false,"UserSessionTimeout":"8h","KubeconfigExpiry":"0","EnableTelemetry":true,"HelmRepositoryURL":"https://kubernetes.github.io/ingress-nginx","KubectlShellImage":"portainer/kubectl-shell","DisplayDonationHeader":false,"DisplayExternalContributors":false,"EnableHostManagementFeatures":false,"AllowVolumeBrowserForRegularUsers":false,"AllowBindMountsForRegularUsers":false,"AllowPrivilegedModeForRegularUsers":false,"AllowHostNamespaceForRegularUsers":false,"AllowStackManagementForRegularUsers":false,"AllowDeviceMappingForRegularUsers":false,"AllowContainerCapabilitiesForRegularUsers":false}`
passphrase = "my secret key"
)
@ -167,7 +160,7 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) {
}
key := secretToEncryptionKey(passphrase)
conn := DbConnection{EncryptionKey: key, isEncrypted: true}
conn := DbConnection{EncryptionKey: key}
for _, test := range tests {
t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) {
@ -182,94 +175,3 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) {
})
}
}
func Test_NonceSources(t *testing.T) {
// ensure that the new go 1.24 NewGCMWithRandomNonce works correctly with
// the old way of creating and including the nonce
encryptOldFn := func(plaintext []byte, passphrase []byte) (encrypted []byte, err error) {
block, _ := aes.NewCipher(passphrase)
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, err
}
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return encrypted, err
}
return gcm.Seal(nonce, nonce, plaintext, nil), nil
}
decryptOldFn := func(encrypted []byte, passphrase []byte) (plaintext []byte, err error) {
block, err := aes.NewCipher(passphrase)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating cypher block")
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return encrypted, errors.Wrap(err, "Error creating GCM")
}
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
return encrypted, errEncryptedStringTooShort
}
nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:]
plaintext, err = gcm.Open(nil, nonce, ciphertextByteClean, nil)
if err != nil {
return encrypted, errors.Wrap(err, "Error decrypting text")
}
return plaintext, err
}
encryptNewFn := encrypt
decryptNewFn := decrypt
passphrase := make([]byte, 32)
_, err := io.ReadFull(rand.Reader, passphrase)
require.NoError(t, err)
junk := make([]byte, 1024)
_, err = io.ReadFull(rand.Reader, junk)
require.NoError(t, err)
junkEnc := make([]byte, base64.StdEncoding.EncodedLen(len(junk)))
base64.StdEncoding.Encode(junkEnc, junk)
cases := [][]byte{
[]byte("test"),
[]byte("35"),
[]byte("9ca4a1dd-a439-4593-b386-a7dfdc2e9fc6"),
[]byte(jsonobject),
passphrase,
junk,
junkEnc,
}
for _, plain := range cases {
var enc, dec []byte
var err error
enc, err = encryptOldFn(plain, passphrase)
require.NoError(t, err)
dec, err = decryptNewFn(enc, passphrase)
require.NoError(t, err)
require.Equal(t, plain, dec)
enc, err = encryptNewFn(plain, passphrase)
require.NoError(t, err)
dec, err = decryptOldFn(enc, passphrase)
require.NoError(t, err)
require.Equal(t, plain, dec)
}
}

View file

@ -6,7 +6,6 @@ import (
dserrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
bolt "go.etcd.io/bbolt"
)
@ -32,33 +31,6 @@ func (tx *DbTransaction) GetObject(bucketName string, key []byte, object any) er
return tx.conn.UnmarshalObject(value, object)
}
func (tx *DbTransaction) GetRawBytes(bucketName string, key []byte) ([]byte, error) {
bucket := tx.tx.Bucket([]byte(bucketName))
value := bucket.Get(key)
if value == nil {
return nil, fmt.Errorf("%w (bucket=%s, key=%s)", dserrors.ErrObjectNotFound, bucketName, keyToString(key))
}
if tx.conn.getEncryptionKey() != nil {
var err error
if value, err = decrypt(value, tx.conn.getEncryptionKey()); err != nil {
return value, errors.Wrap(err, "Failed decrypting object")
}
}
return value, nil
}
func (tx *DbTransaction) KeyExists(bucketName string, key []byte) (bool, error) {
bucket := tx.tx.Bucket([]byte(bucketName))
value := bucket.Get(key)
return value != nil, nil
}
func (tx *DbTransaction) UpdateObject(bucketName string, key []byte, object any) error {
data, err := tx.conn.MarshalObject(object)
if err != nil {

View file

@ -9,8 +9,7 @@ import (
type BaseCRUD[T any, I constraints.Integer] interface {
Create(element *T) error
Read(ID I) (*T, error)
Exists(ID I) (bool, error)
ReadAll(predicates ...func(T) bool) ([]T, error)
ReadAll() ([]T, error)
Update(ID I, element *T) error
Delete(ID I) error
}
@ -43,26 +42,12 @@ func (service BaseDataService[T, I]) Read(ID I) (*T, error) {
})
}
func (service BaseDataService[T, I]) Exists(ID I) (bool, error) {
var exists bool
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
exists, err = service.Tx(tx).Exists(ID)
return err
})
return exists, err
}
// ReadAll retrieves all the elements that satisfy all the provided predicates.
func (service BaseDataService[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) {
func (service BaseDataService[T, I]) ReadAll() ([]T, error) {
var collection = make([]T, 0)
return collection, service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
collection, err = service.Tx(tx).ReadAll(predicates...)
collection, err = service.Tx(tx).ReadAll()
return err
})

View file

@ -1,92 +0,0 @@
package dataservices
import (
"strconv"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/slicesx"
"github.com/stretchr/testify/require"
)
type testObject struct {
ID int
Value int
}
type mockConnection struct {
store map[int]testObject
portainer.Connection
}
func (m mockConnection) UpdateObject(bucket string, key []byte, value interface{}) error {
obj := value.(*testObject)
m.store[obj.ID] = *obj
return nil
}
func (m mockConnection) GetAll(bucketName string, obj any, appendFn func(o any) (any, error)) error {
for _, v := range m.store {
if _, err := appendFn(&v); err != nil {
return err
}
}
return nil
}
func (m mockConnection) UpdateTx(fn func(portainer.Transaction) error) error {
return fn(m)
}
func (m mockConnection) ViewTx(fn func(portainer.Transaction) error) error {
return fn(m)
}
func (m mockConnection) ConvertToKey(v int) []byte {
return []byte(strconv.Itoa(v))
}
func TestReadAll(t *testing.T) {
service := BaseDataService[testObject, int]{
Bucket: "testBucket",
Connection: mockConnection{store: make(map[int]testObject)},
}
data := []testObject{
{ID: 1, Value: 1},
{ID: 2, Value: 2},
{ID: 3, Value: 3},
{ID: 4, Value: 4},
{ID: 5, Value: 5},
}
for _, item := range data {
err := service.Update(item.ID, &item)
require.NoError(t, err)
}
// ReadAll without predicates
result, err := service.ReadAll()
require.NoError(t, err)
expected := append([]testObject{}, data...)
require.ElementsMatch(t, expected, result)
// ReadAll with predicates
hasLowID := func(obj testObject) bool { return obj.ID < 3 }
isEven := func(obj testObject) bool { return obj.Value%2 == 0 }
result, err = service.ReadAll(hasLowID, isEven)
require.NoError(t, err)
expected = slicesx.Filter(expected, hasLowID)
expected = slicesx.Filter(expected, isEven)
require.ElementsMatch(t, expected, result)
}

View file

@ -28,38 +28,13 @@ func (service BaseDataServiceTx[T, I]) Read(ID I) (*T, error) {
return &element, nil
}
func (service BaseDataServiceTx[T, I]) Exists(ID I) (bool, error) {
identifier := service.Connection.ConvertToKey(int(ID))
return service.Tx.KeyExists(service.Bucket, identifier)
}
// ReadAll retrieves all the elements that satisfy all the provided predicates.
func (service BaseDataServiceTx[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) {
func (service BaseDataServiceTx[T, I]) ReadAll() ([]T, error) {
var collection = make([]T, 0)
if len(predicates) == 0 {
return collection, service.Tx.GetAll(
service.Bucket,
new(T),
AppendFn(&collection),
)
}
filterFn := func(element T) bool {
for _, p := range predicates {
if !p(element) {
return false
}
}
return true
}
return collection, service.Tx.GetAll(
service.Bucket,
new(T),
FilterFn(&collection, filterFn),
AppendFn(&collection),
)
}

View file

@ -17,29 +17,11 @@ func (service ServiceTx) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFun
}
func (service ServiceTx) Create(group *portainer.EdgeGroup) error {
es := group.Endpoints
group.Endpoints = nil // Clear deprecated field
err := service.Tx.CreateObject(
return service.Tx.CreateObject(
BucketName,
func(id uint64) (int, any) {
group.ID = portainer.EdgeGroupID(id)
return int(group.ID), group
},
)
group.Endpoints = es // Restore endpoints after create
return err
}
func (service ServiceTx) Update(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error {
es := group.Endpoints
group.Endpoints = nil // Clear deprecated field
err := service.BaseDataServiceTx.Update(ID, group)
group.Endpoints = es // Restore endpoints after update
return err
}

View file

@ -1,50 +0,0 @@
package edgestack
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/stretchr/testify/require"
)
func TestUpdate(t *testing.T) {
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn, func(portainer.Transaction, portainer.EdgeStackID) {})
require.NoError(t, err)
const edgeStackID = 1
edgeStack := &portainer.EdgeStack{
ID: edgeStackID,
Name: "Test Stack",
}
err = service.Create(edgeStackID, edgeStack)
require.NoError(t, err)
err = service.UpdateEdgeStackFunc(edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.Name = "Updated Stack"
})
require.NoError(t, err)
updatedStack, err := service.EdgeStack(edgeStackID)
require.NoError(t, err)
require.Equal(t, "Updated Stack", updatedStack.Name)
err = conn.UpdateTx(func(tx portainer.Transaction) error {
return service.UpdateEdgeStackFuncTx(tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.Name = "Updated Stack Again"
})
})
require.NoError(t, err)
updatedStack, err = service.EdgeStack(edgeStackID)
require.NoError(t, err)
require.Equal(t, "Updated Stack Again", updatedStack.Name)
}

View file

@ -1,89 +0,0 @@
package edgestackstatus
import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
var _ dataservices.EdgeStackStatusService = &Service{}
const BucketName = "edge_stack_status"
type Service struct {
conn portainer.Connection
}
func (service *Service) BucketName() string {
return BucketName
}
func NewService(connection portainer.Connection) (*Service, error) {
if err := connection.SetServiceName(BucketName); err != nil {
return nil, err
}
return &Service{conn: connection}, nil
}
func (s *Service) Tx(tx portainer.Transaction) ServiceTx {
return ServiceTx{
service: s,
tx: tx,
}
}
func (s *Service) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Create(edgeStackID, endpointID, status)
})
}
func (s *Service) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) {
var element *portainer.EdgeStackStatusForEnv
return element, s.conn.ViewTx(func(tx portainer.Transaction) error {
var err error
element, err = s.Tx(tx).Read(edgeStackID, endpointID)
return err
})
}
func (s *Service) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) {
var collection = make([]portainer.EdgeStackStatusForEnv, 0)
return collection, s.conn.ViewTx(func(tx portainer.Transaction) error {
var err error
collection, err = s.Tx(tx).ReadAll(edgeStackID)
return err
})
}
func (s *Service) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Update(edgeStackID, endpointID, status)
})
}
func (s *Service) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Delete(edgeStackID, endpointID)
})
}
func (s *Service) DeleteAll(edgeStackID portainer.EdgeStackID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).DeleteAll(edgeStackID)
})
}
func (s *Service) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error {
return s.conn.UpdateTx(func(tx portainer.Transaction) error {
return s.Tx(tx).Clear(edgeStackID, relatedEnvironmentsIDs)
})
}
func (s *Service) key(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) []byte {
return append(s.conn.ConvertToKey(int(edgeStackID)), s.conn.ConvertToKey(int(endpointID))...)
}

View file

@ -1,95 +0,0 @@
package edgestackstatus
import (
"fmt"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
)
var _ dataservices.EdgeStackStatusService = &Service{}
type ServiceTx struct {
service *Service
tx portainer.Transaction
}
func (service ServiceTx) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
identifier := service.service.key(edgeStackID, endpointID)
return service.tx.CreateObjectWithStringId(BucketName, identifier, status)
}
func (s ServiceTx) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) {
var status portainer.EdgeStackStatusForEnv
identifier := s.service.key(edgeStackID, endpointID)
if err := s.tx.GetObject(BucketName, identifier, &status); err != nil {
return nil, err
}
return &status, nil
}
func (s ServiceTx) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) {
keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID))
statuses := make([]portainer.EdgeStackStatusForEnv, 0)
if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil {
return nil, fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err)
}
return statuses, nil
}
func (s ServiceTx) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error {
identifier := s.service.key(edgeStackID, endpointID)
return s.tx.UpdateObject(BucketName, identifier, status)
}
func (s ServiceTx) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error {
identifier := s.service.key(edgeStackID, endpointID)
return s.tx.DeleteObject(BucketName, identifier)
}
func (s ServiceTx) DeleteAll(edgeStackID portainer.EdgeStackID) error {
keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID))
statuses := make([]portainer.EdgeStackStatusForEnv, 0)
if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil {
return fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err)
}
for _, status := range statuses {
if err := s.tx.DeleteObject(BucketName, s.service.key(edgeStackID, status.EndpointID)); err != nil {
return fmt.Errorf("unable to delete EdgeStackStatus for EdgeStack %d and Endpoint %d: %w", edgeStackID, status.EndpointID, err)
}
}
return nil
}
func (s ServiceTx) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error {
for _, envID := range relatedEnvironmentsIDs {
existingStatus, err := s.Read(edgeStackID, envID)
if err != nil && !dataservices.IsErrObjectNotFound(err) {
return fmt.Errorf("unable to retrieve status for environment %d: %w", envID, err)
}
var deploymentInfo portainer.StackDeploymentInfo
if existingStatus != nil {
deploymentInfo = existingStatus.DeploymentInfo
}
if err := s.Update(edgeStackID, envID, &portainer.EdgeStackStatusForEnv{
EndpointID: envID,
Status: []portainer.EdgeStackDeploymentStatus{},
DeploymentInfo: deploymentInfo,
}); err != nil {
return err
}
}
return nil
}

View file

@ -6,6 +6,8 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/rs/zerolog/log"
)
// BucketName represents the name of the bucket where this service stores data.
@ -14,20 +16,21 @@ const BucketName = "endpoint_relations"
// Service represents a service for managing environment(endpoint) relation data.
type Service struct {
connection portainer.Connection
updateStackFn func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
updateStackFnTx func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error
endpointRelationsCache []portainer.EndpointRelation
mu sync.Mutex
}
var _ dataservices.EndpointRelationService = &Service{}
func (service *Service) BucketName() string {
return BucketName
}
func (service *Service) RegisterUpdateStackFunction(
updateFunc func(portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
updateFuncTx func(portainer.Transaction, portainer.EdgeStackID, func(*portainer.EdgeStack)) error,
) {
service.updateStackFn = updateFunc
service.updateStackFnTx = updateFuncTx
}
@ -86,26 +89,94 @@ func (service *Service) Create(endpointRelation *portainer.EndpointRelation) err
// UpdateEndpointRelation updates an Environment(Endpoint) relation object
func (service *Service) UpdateEndpointRelation(endpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).UpdateEndpointRelation(endpointID, endpointRelation)
})
}
previousRelationState, _ := service.EndpointRelation(endpointID)
func (service *Service) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).AddEndpointRelationsForEdgeStack(endpointIDs, edgeStackID)
})
}
identifier := service.connection.ConvertToKey(int(endpointID))
err := service.connection.UpdateObject(BucketName, identifier, endpointRelation)
cache.Del(endpointID)
if err != nil {
return err
}
func (service *Service) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).RemoveEndpointRelationsForEdgeStack(endpointIDs, edgeStackID)
})
updatedRelationState, _ := service.EndpointRelation(endpointID)
service.mu.Lock()
service.endpointRelationsCache = nil
service.mu.Unlock()
service.updateEdgeStacksAfterRelationChange(previousRelationState, updatedRelationState)
return nil
}
// DeleteEndpointRelation deletes an Environment(Endpoint) relation object
func (service *Service) DeleteEndpointRelation(endpointID portainer.EndpointID) error {
return service.connection.UpdateTx(func(tx portainer.Transaction) error {
return service.Tx(tx).DeleteEndpointRelation(endpointID)
})
deletedRelation, _ := service.EndpointRelation(endpointID)
identifier := service.connection.ConvertToKey(int(endpointID))
err := service.connection.DeleteObject(BucketName, identifier)
cache.Del(endpointID)
if err != nil {
return err
}
service.mu.Lock()
service.endpointRelationsCache = nil
service.mu.Unlock()
service.updateEdgeStacksAfterRelationChange(deletedRelation, nil)
return nil
}
func (service *Service) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) {
relations, _ := service.EndpointRelations()
stacksToUpdate := map[portainer.EdgeStackID]bool{}
if previousRelationState != nil {
for stackId, enabled := range previousRelationState.EdgeStacks {
// flag stack for update if stack is not in the updated relation state
// = stack has been removed for this relation
// or this relation has been deleted
if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
if updatedRelationState != nil {
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
// for each stack referenced by the updated relation
// list how many time this stack is referenced in all relations
// in order to update the stack deployments count
for refStackId, refStackEnabled := range stacksToUpdate {
if !refStackEnabled {
continue
}
numDeployments := 0
for _, r := range relations {
for sId, enabled := range r.EdgeStacks {
if enabled && sId == refStackId {
numDeployments += 1
}
}
}
if err := service.updateStackFn(refStackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments = numDeployments
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
}
}

View file

@ -1,104 +0,0 @@
package endpointrelation
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/boltdb"
"github.com/portainer/portainer/api/internal/edge/cache"
"github.com/stretchr/testify/require"
)
func TestUpdateRelation(t *testing.T) {
const endpointID = 1
const edgeStackID1 = 1
const edgeStackID2 = 2
var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()}
err := conn.Open()
require.NoError(t, err)
defer conn.Close()
service, err := NewService(conn)
require.NoError(t, err)
updateStackFnTxCalled := false
edgeStacks := make(map[portainer.EdgeStackID]portainer.EdgeStack)
edgeStacks[edgeStackID1] = portainer.EdgeStack{ID: edgeStackID1}
edgeStacks[edgeStackID2] = portainer.EdgeStack{ID: edgeStackID2}
service.RegisterUpdateStackFunction(func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error {
updateStackFnTxCalled = true
s, ok := edgeStacks[ID]
require.True(t, ok)
updateFunc(&s)
edgeStacks[ID] = s
return nil
})
// Nil relation
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, nil)
_, cacheKeyExists := cache.Get(endpointID)
require.NoError(t, err)
require.False(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
// Add a relation to two edge stacks
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{
EndpointID: endpointID,
EdgeStacks: map[portainer.EdgeStackID]bool{
edgeStackID1: true,
edgeStackID2: true,
},
})
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 1, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments)
// Remove a relation to one edge stack
updateStackFnTxCalled = false
cache.Set(endpointID, []byte("value"))
err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{
EndpointID: endpointID,
EdgeStacks: map[portainer.EdgeStackID]bool{
2: true,
},
})
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments)
// Delete the relation
updateStackFnTxCalled = false
cache.Set(endpointID, []byte("value"))
err = service.DeleteEndpointRelation(endpointID)
_, cacheKeyExists = cache.Get(endpointID)
require.NoError(t, err)
require.True(t, updateStackFnTxCalled)
require.False(t, cacheKeyExists)
require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments)
require.Equal(t, 0, edgeStacks[edgeStackID2].NumDeployments)
}

View file

@ -13,8 +13,6 @@ type ServiceTx struct {
tx portainer.Transaction
}
var _ dataservices.EndpointRelationService = &ServiceTx{}
func (service ServiceTx) BucketName() string {
return BucketName
}
@ -76,66 +74,6 @@ func (service ServiceTx) UpdateEndpointRelation(endpointID portainer.EndpointID,
return nil
}
func (service ServiceTx) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
for _, endpointID := range endpointIDs {
rel, err := service.EndpointRelation(endpointID)
if err != nil {
return err
}
rel.EdgeStacks[edgeStackID] = true
identifier := service.service.connection.ConvertToKey(int(endpointID))
err = service.tx.UpdateObject(BucketName, identifier, rel)
cache.Del(endpointID)
if err != nil {
return err
}
}
service.service.mu.Lock()
service.service.endpointRelationsCache = nil
service.service.mu.Unlock()
if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments += len(endpointIDs)
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
return nil
}
func (service ServiceTx) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error {
for _, endpointID := range endpointIDs {
rel, err := service.EndpointRelation(endpointID)
if err != nil {
return err
}
delete(rel.EdgeStacks, edgeStackID)
identifier := service.service.connection.ConvertToKey(int(endpointID))
err = service.tx.UpdateObject(BucketName, identifier, rel)
cache.Del(endpointID)
if err != nil {
return err
}
}
service.service.mu.Lock()
service.service.endpointRelationsCache = nil
service.service.mu.Unlock()
if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments -= len(endpointIDs)
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
return nil
}
// DeleteEndpointRelation deletes an Environment(Endpoint) relation object
func (service ServiceTx) DeleteEndpointRelation(endpointID portainer.EndpointID) error {
deletedRelation, _ := service.EndpointRelation(endpointID)
@ -186,49 +124,53 @@ func (service ServiceTx) cachedEndpointRelations() ([]portainer.EndpointRelation
}
func (service ServiceTx) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) {
relations, _ := service.EndpointRelations()
stacksToUpdate := map[portainer.EdgeStackID]bool{}
if previousRelationState != nil {
for stackId, enabled := range previousRelationState.EdgeStacks {
// flag stack for update if stack is not in the updated relation state
// = stack has been removed for this relation
// or this relation has been deleted
if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) {
if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) {
// Sanity check
if edgeStack.NumDeployments <= 0 {
log.Error().
Int("edgestack_id", int(edgeStack.ID)).
Int("endpoint_id", int(previousRelationState.EndpointID)).
Int("num_deployments", edgeStack.NumDeployments).
Msg("cannot decrement the number of deployments for an edge stack with zero deployments")
return
}
edgeStack.NumDeployments--
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
cache.Del(previousRelationState.EndpointID)
stacksToUpdate[stackId] = true
}
}
}
if updatedRelationState == nil {
return
if updatedRelationState != nil {
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
stacksToUpdate[stackId] = true
}
}
}
for stackId, enabled := range updatedRelationState.EdgeStacks {
// flag stack for update if stack is not in the previous relation state
// = stack has been added for this relation
if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) {
if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments++
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
// for each stack referenced by the updated relation
// list how many time this stack is referenced in all relations
// in order to update the stack deployments count
for refStackId, refStackEnabled := range stacksToUpdate {
if !refStackEnabled {
continue
}
cache.Del(updatedRelationState.EndpointID)
numDeployments := 0
for _, r := range relations {
for sId, enabled := range r.EdgeStacks {
if enabled && sId == refStackId {
numDeployments += 1
}
}
}
if err := service.service.updateStackFnTx(service.tx, refStackId, func(edgeStack *portainer.EdgeStack) {
edgeStack.NumDeployments = numDeployments
}); err != nil {
log.Error().Err(err).Msg("could not update the number of deployments")
}
}
}

View file

@ -12,7 +12,6 @@ type (
EdgeGroup() EdgeGroupService
EdgeJob() EdgeJobService
EdgeStack() EdgeStackService
EdgeStackStatus() EdgeStackStatusService
Endpoint() EndpointService
EndpointGroup() EndpointGroupService
EndpointRelation() EndpointRelationService
@ -40,8 +39,8 @@ type (
Open() (newStore bool, err error)
Init() error
Close() error
UpdateTx(func(tx DataStoreTx) error) error
ViewTx(func(tx DataStoreTx) error) error
UpdateTx(func(DataStoreTx) error) error
ViewTx(func(DataStoreTx) error) error
MigrateData() error
Rollback(force bool) error
CheckCurrentEdition() error
@ -90,16 +89,6 @@ type (
BucketName() string
}
EdgeStackStatusService interface {
Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error
Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error)
ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error)
Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error
Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error
DeleteAll(edgeStackID portainer.EdgeStackID) error
Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error
}
// EndpointService represents a service for managing environment(endpoint) data
EndpointService interface {
Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error)
@ -126,8 +115,6 @@ type (
EndpointRelation(EndpointID portainer.EndpointID) (*portainer.EndpointRelation, error)
Create(endpointRelation *portainer.EndpointRelation) error
UpdateEndpointRelation(EndpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error
AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error
RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error
DeleteEndpointRelation(EndpointID portainer.EndpointID) error
BucketName() string
}
@ -170,7 +157,6 @@ type (
SnapshotService interface {
BaseCRUD[portainer.Snapshot, portainer.EndpointID]
ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error)
}
// SSLSettingsService represents a service for managing application settings

View file

@ -38,33 +38,3 @@ func (service *Service) Tx(tx portainer.Transaction) ServiceTx {
func (service *Service) Create(snapshot *portainer.Snapshot) error {
return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}
func (service *Service) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) {
var snapshot *portainer.Snapshot
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
snapshot, err = service.Tx(tx).ReadWithoutSnapshotRaw(ID)
return err
})
return snapshot, err
}
func (service *Service) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) {
var snapshot *portainer.SnapshotRawMessage
err := service.Connection.ViewTx(func(tx portainer.Transaction) error {
var err error
snapshot, err = service.Tx(tx).ReadRawMessage(ID)
return err
})
return snapshot, err
}
func (service *Service) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error {
return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}

View file

@ -12,42 +12,3 @@ type ServiceTx struct {
func (service ServiceTx) Create(snapshot *portainer.Snapshot) error {
return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}
func (service ServiceTx) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) {
var snapshot struct {
Docker *struct {
X struct{} `json:"DockerSnapshotRaw"`
*portainer.DockerSnapshot
} `json:"Docker"`
portainer.Snapshot
}
identifier := service.Connection.ConvertToKey(int(ID))
if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil {
return nil, err
}
if snapshot.Docker != nil {
snapshot.Snapshot.Docker = snapshot.Docker.DockerSnapshot
}
return &snapshot.Snapshot, nil
}
func (service ServiceTx) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) {
var snapshot = portainer.SnapshotRawMessage{}
identifier := service.Connection.ConvertToKey(int(ID))
if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil {
return nil, err
}
return &snapshot, nil
}
func (service ServiceTx) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error {
return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot)
}

View file

@ -232,7 +232,7 @@ func (store *Store) createAccount(username, password string, role portainer.User
user := &portainer.User{Username: username, Role: role}
// encrypt the password
cs := crypto.Service{}
cs := &crypto.Service{}
user.Password, err = cs.Hash(password)
if err != nil {
return err
@ -259,7 +259,7 @@ func (store *Store) checkAccount(username, expectPassword string, expectRole por
}
// Check the password
cs := crypto.Service{}
cs := &crypto.Service{}
expectPasswordHash, err := cs.Hash(expectPassword)
if err != nil {
return errors.Wrap(err, "hash failed")

View file

@ -40,11 +40,13 @@ func (store *Store) MigrateData() error {
}
// before we alter anything in the DB, create a backup
if _, err := store.Backup(""); err != nil {
_, err = store.Backup("")
if err != nil {
return errors.Wrap(err, "while backing up database")
}
if err := store.FailSafeMigrate(migrator, version); err != nil {
err = store.FailSafeMigrate(migrator, version)
if err != nil {
err = errors.Wrap(err, "failed to migrate database")
log.Warn().Err(err).Msg("migration failed, restoring database to previous version")
@ -83,9 +85,7 @@ func (store *Store) newMigratorParameters(version *models.Version, flags *portai
DockerhubService: store.DockerHubService,
AuthorizationService: authorization.NewService(store),
EdgeStackService: store.EdgeStackService,
EdgeStackStatusService: store.EdgeStackStatusService,
EdgeJobService: store.EdgeJobService,
EdgeGroupService: store.EdgeGroupService,
TunnelServerService: store.TunnelServerService,
PendingActionsService: store.PendingActionsService,
}
@ -140,7 +140,8 @@ func (store *Store) connectionRollback(force bool) error {
}
}
if err := store.Restore(); err != nil {
err := store.Restore()
if err != nil {
return err
}

View file

@ -1,31 +0,0 @@
package migrator
import portainer "github.com/portainer/portainer/api"
func (m *Migrator) migrateEdgeStacksStatuses_2_31_0() error {
edgeStacks, err := m.edgeStackService.EdgeStacks()
if err != nil {
return err
}
for _, edgeStack := range edgeStacks {
for envID, status := range edgeStack.Status {
if err := m.edgeStackStatusService.Create(edgeStack.ID, envID, &portainer.EdgeStackStatusForEnv{
EndpointID: envID,
Status: status.Status,
DeploymentInfo: status.DeploymentInfo,
ReadyRePullImage: status.ReadyRePullImage,
}); err != nil {
return err
}
}
edgeStack.Status = nil
if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil {
return err
}
}
return nil
}

View file

@ -1,33 +0,0 @@
package migrator
import (
"github.com/pkg/errors"
portainer "github.com/portainer/portainer/api"
perrors "github.com/portainer/portainer/api/dataservices/errors"
"github.com/portainer/portainer/api/internal/endpointutils"
)
func (m *Migrator) addEndpointRelationForEdgeAgents_2_32_0() error {
endpoints, err := m.endpointService.Endpoints()
if err != nil {
return err
}
for _, endpoint := range endpoints {
if endpointutils.IsEdgeEndpoint(&endpoint) {
_, err := m.endpointRelationService.EndpointRelation(endpoint.ID)
if err != nil && errors.Is(err, perrors.ErrObjectNotFound) {
relation := &portainer.EndpointRelation{
EndpointID: endpoint.ID,
EdgeStacks: make(map[portainer.EdgeStackID]bool),
}
if err := m.endpointRelationService.Create(relation); err != nil {
return err
}
}
}
}
return nil
}

View file

@ -1,23 +0,0 @@
package migrator
import (
"github.com/portainer/portainer/api/roar"
)
func (m *Migrator) migrateEdgeGroupEndpointsToRoars_2_33_0() error {
egs, err := m.edgeGroupService.ReadAll()
if err != nil {
return err
}
for _, eg := range egs {
eg.EndpointIDs = roar.FromSlice(eg.Endpoints)
eg.Endpoints = nil
if err := m.edgeGroupService.Update(eg.ID, &eg); err != nil {
return err
}
}
return nil
}

View file

@ -94,10 +94,6 @@ func (m *Migrator) updateEdgeStackStatusForDB100() error {
continue
}
if environmentStatus.Details == nil {
continue
}
statusArray := []portainer.EdgeStackDeploymentStatus{}
if environmentStatus.Details.Pending {
statusArray = append(statusArray, portainer.EdgeStackDeploymentStatus{

View file

@ -18,7 +18,8 @@ func (m *Migrator) updateResourceControlsToDBVersion22() error {
for _, resourceControl := range legacyResourceControls {
resourceControl.AdministratorsOnly = false
if err := m.resourceControlService.Update(resourceControl.ID, &resourceControl); err != nil {
err := m.resourceControlService.Update(resourceControl.ID, &resourceControl)
if err != nil {
return err
}
}
@ -41,8 +42,8 @@ func (m *Migrator) updateUsersAndRolesToDBVersion22() error {
for _, user := range legacyUsers {
user.PortainerAuthorizations = authorization.DefaultPortainerAuthorizations()
if err := m.userService.Update(user.ID, &user); err != nil {
err = m.userService.Update(user.ID, &user)
if err != nil {
return err
}
}
@ -51,47 +52,38 @@ func (m *Migrator) updateUsersAndRolesToDBVersion22() error {
if err != nil {
return err
}
endpointAdministratorRole.Priority = 1
endpointAdministratorRole.Authorizations = authorization.DefaultEndpointAuthorizationsForEndpointAdministratorRole()
if err := m.roleService.Update(endpointAdministratorRole.ID, endpointAdministratorRole); err != nil {
return err
}
err = m.roleService.Update(endpointAdministratorRole.ID, endpointAdministratorRole)
helpDeskRole, err := m.roleService.Read(portainer.RoleID(2))
if err != nil {
return err
}
helpDeskRole.Priority = 2
helpDeskRole.Authorizations = authorization.DefaultEndpointAuthorizationsForHelpDeskRole(settings.AllowVolumeBrowserForRegularUsers)
if err := m.roleService.Update(helpDeskRole.ID, helpDeskRole); err != nil {
return err
}
err = m.roleService.Update(helpDeskRole.ID, helpDeskRole)
standardUserRole, err := m.roleService.Read(portainer.RoleID(3))
if err != nil {
return err
}
standardUserRole.Priority = 3
standardUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForStandardUserRole(settings.AllowVolumeBrowserForRegularUsers)
if err := m.roleService.Update(standardUserRole.ID, standardUserRole); err != nil {
return err
}
err = m.roleService.Update(standardUserRole.ID, standardUserRole)
readOnlyUserRole, err := m.roleService.Read(portainer.RoleID(4))
if err != nil {
return err
}
readOnlyUserRole.Priority = 4
readOnlyUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForReadOnlyUserRole(settings.AllowVolumeBrowserForRegularUsers)
if err := m.roleService.Update(readOnlyUserRole.ID, readOnlyUserRole); err != nil {
err = m.roleService.Update(readOnlyUserRole.ID, readOnlyUserRole)
if err != nil {
return err
}

View file

@ -75,10 +75,6 @@ func (m *Migrator) updateEdgeStackStatusForDB80() error {
for _, edgeStack := range edgeStacks {
for endpointId, status := range edgeStack.Status {
if status.Details == nil {
status.Details = &portainer.EdgeStackStatusDetails{}
}
switch status.Type {
case portainer.EdgeStackStatusPending:
status.Details.Pending = true
@ -97,10 +93,10 @@ func (m *Migrator) updateEdgeStackStatusForDB80() error {
edgeStack.Status[endpointId] = status
}
if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil {
err = m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack)
if err != nil {
return err
}
}
return nil
}

View file

@ -3,13 +3,12 @@ package migrator
import (
"errors"
"github.com/Masterminds/semver"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/database/models"
"github.com/portainer/portainer/api/dataservices/dockerhub"
"github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/portainer/portainer/api/dataservices/edgejob"
"github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/dataservices/edgestackstatus"
"github.com/portainer/portainer/api/dataservices/endpoint"
"github.com/portainer/portainer/api/dataservices/endpointgroup"
"github.com/portainer/portainer/api/dataservices/endpointrelation"
@ -28,8 +27,6 @@ import (
"github.com/portainer/portainer/api/dataservices/user"
"github.com/portainer/portainer/api/dataservices/version"
"github.com/portainer/portainer/api/internal/authorization"
"github.com/Masterminds/semver"
"github.com/rs/zerolog/log"
)
@ -59,9 +56,7 @@ type (
authorizationService *authorization.Service
dockerhubService *dockerhub.Service
edgeStackService *edgestack.Service
edgeStackStatusService *edgestackstatus.Service
edgeJobService *edgejob.Service
edgeGroupService *edgegroup.Service
TunnelServerService *tunnelserver.Service
pendingActionsService *pendingactions.Service
}
@ -89,9 +84,7 @@ type (
AuthorizationService *authorization.Service
DockerhubService *dockerhub.Service
EdgeStackService *edgestack.Service
EdgeStackStatusService *edgestackstatus.Service
EdgeJobService *edgejob.Service
EdgeGroupService *edgegroup.Service
TunnelServerService *tunnelserver.Service
PendingActionsService *pendingactions.Service
}
@ -121,15 +114,12 @@ func NewMigrator(parameters *MigratorParameters) *Migrator {
authorizationService: parameters.AuthorizationService,
dockerhubService: parameters.DockerhubService,
edgeStackService: parameters.EdgeStackService,
edgeStackStatusService: parameters.EdgeStackStatusService,
edgeJobService: parameters.EdgeJobService,
edgeGroupService: parameters.EdgeGroupService,
TunnelServerService: parameters.TunnelServerService,
pendingActionsService: parameters.PendingActionsService,
}
migrator.initMigrations()
return migrator
}
@ -252,12 +242,6 @@ func (m *Migrator) initMigrations() {
m.migratePendingActionsDataForDB130,
)
m.addMigrations("2.31.0", m.migrateEdgeStacksStatuses_2_31_0)
m.addMigrations("2.32.0", m.addEndpointRelationForEdgeAgents_2_32_0)
m.addMigrations("2.33.0", m.migrateEdgeGroupEndpointsToRoars_2_33_0)
// Add new migrations above...
// One function per migration, each versions migration funcs in the same file.
}

View file

@ -13,7 +13,6 @@ import (
"github.com/portainer/portainer/api/dataservices/edgegroup"
"github.com/portainer/portainer/api/dataservices/edgejob"
"github.com/portainer/portainer/api/dataservices/edgestack"
"github.com/portainer/portainer/api/dataservices/edgestackstatus"
"github.com/portainer/portainer/api/dataservices/endpoint"
"github.com/portainer/portainer/api/dataservices/endpointgroup"
"github.com/portainer/portainer/api/dataservices/endpointrelation"
@ -40,8 +39,6 @@ import (
"github.com/segmentio/encoding/json"
)
var _ dataservices.DataStore = &Store{}
// Store defines the implementation of portainer.DataStore using
// BoltDB as the storage system.
type Store struct {
@ -54,7 +51,6 @@ type Store struct {
EdgeGroupService *edgegroup.Service
EdgeJobService *edgejob.Service
EdgeStackService *edgestack.Service
EdgeStackStatusService *edgestackstatus.Service
EndpointGroupService *endpointgroup.Service
EndpointService *endpoint.Service
EndpointRelationService *endpointrelation.Service
@ -111,13 +107,7 @@ func (store *Store) initServices() error {
return err
}
store.EdgeStackService = edgeStackService
endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFuncTx)
edgeStackStatusService, err := edgestackstatus.NewService(store.connection)
if err != nil {
return err
}
store.EdgeStackStatusService = edgeStackStatusService
endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFunc, edgeStackService.UpdateEdgeStackFuncTx)
edgeGroupService, err := edgegroup.NewService(store.connection)
if err != nil {
@ -279,10 +269,6 @@ func (store *Store) EdgeStack() dataservices.EdgeStackService {
return store.EdgeStackService
}
func (store *Store) EdgeStackStatus() dataservices.EdgeStackStatusService {
return store.EdgeStackStatusService
}
// Environment(Endpoint) gives access to the Environment(Endpoint) data management layer
func (store *Store) Endpoint() dataservices.EndpointService {
return store.EndpointService

View file

@ -32,10 +32,6 @@ func (tx *StoreTx) EdgeStack() dataservices.EdgeStackService {
return tx.store.EdgeStackService.Tx(tx.tx)
}
func (tx *StoreTx) EdgeStackStatus() dataservices.EdgeStackStatusService {
return tx.store.EdgeStackStatusService.Tx(tx.tx)
}
func (tx *StoreTx) Endpoint() dataservices.EndpointService {
return tx.store.EndpointService.Tx(tx.tx)
}

View file

@ -8,7 +8,6 @@
}
],
"edge_stack": null,
"edge_stack_status": null,
"edgegroups": null,
"edgejobs": null,
"endpoint_groups": [
@ -121,10 +120,6 @@
"Ecr": {
"Region": ""
},
"Github": {
"OrganisationName": "",
"UseOrganisation": false
},
"Gitlab": {
"InstanceURL": "",
"ProjectId": 0,
@ -610,12 +605,12 @@
"GlobalDeploymentOptions": {
"hideStacksFunctionality": false
},
"HelmRepositoryURL": "https://charts.bitnami.com/bitnami",
"HelmRepositoryURL": "",
"InternalAuthSettings": {
"RequiredPasswordLength": 12
},
"KubeconfigExpiry": "0",
"KubectlShellImage": "portainer/kubectl-shell:2.32.0",
"KubectlShellImage": "portainer/kubectl-shell:2.27.1",
"LDAPSettings": {
"AnonymousMode": true,
"AutoCreateUsers": true,
@ -683,11 +678,14 @@
"Images": null,
"Info": {
"Architecture": "",
"BridgeNfIp6tables": false,
"BridgeNfIptables": false,
"CDISpecDirs": null,
"CPUSet": false,
"CPUShares": false,
"CgroupDriver": "",
"ContainerdCommit": {
"Expected": "",
"ID": ""
},
"Containers": 0,
@ -711,6 +709,7 @@
"IndexServerAddress": "",
"InitBinary": "",
"InitCommit": {
"Expected": "",
"ID": ""
},
"Isolation": "",
@ -739,6 +738,7 @@
},
"RegistryConfig": null,
"RuncCommit": {
"Expected": "",
"ID": ""
},
"Runtimes": null,
@ -780,7 +780,6 @@
"ImageCount": 9,
"IsPodman": false,
"NodeCount": 0,
"PerformanceMetrics": null,
"RunningContainerCount": 5,
"ServiceCount": 0,
"StackCount": 2,
@ -944,7 +943,7 @@
}
],
"version": {
"VERSION": "{\"SchemaVersion\":\"2.32.0\",\"MigratorCount\":1,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}"
"VERSION": "{\"SchemaVersion\":\"2.27.1\",\"MigratorCount\":0,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}"
},
"webhooks": null
}

View file

@ -0,0 +1,15 @@
package validate
import (
"github.com/go-playground/validator/v10"
portainer "github.com/portainer/portainer/api"
)
var validate *validator.Validate
func ValidateLDAPSettings(ldp *portainer.LDAPSettings) error {
validate = validator.New()
registerValidationMethods(validate)
return validate.Struct(ldp)
}

View file

@ -0,0 +1,61 @@
package validate
import (
"testing"
portainer "github.com/portainer/portainer/api"
)
func TestValidateLDAPSettings(t *testing.T) {
tests := []struct {
name string
ldap portainer.LDAPSettings
wantErr bool
}{
{
name: "Empty LDAP Settings",
ldap: portainer.LDAPSettings{},
wantErr: true,
},
{
name: "With URL",
ldap: portainer.LDAPSettings{
AnonymousMode: true,
URL: "192.168.0.1:323",
},
wantErr: false,
},
{
name: "Validate URL and URLs",
ldap: portainer.LDAPSettings{
AnonymousMode: true,
URL: "192.168.0.1:323",
},
wantErr: false,
},
{
name: "validate client ldap",
ldap: portainer.LDAPSettings{
AnonymousMode: false,
ReaderDN: "CN=LDAP API Service Account",
Password: "Qu**dfUUU**",
URL: "aukdc15.pgc.co:389",
TLSConfig: portainer.TLSConfiguration{
TLS: false,
TLSSkipVerify: false,
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := ValidateLDAPSettings(&tt.ldap)
if (err == nil) == tt.wantErr {
t.Errorf("No error expected but got %s", err)
}
})
}
}

View file

@ -0,0 +1,17 @@
package validate
import (
"github.com/go-playground/validator/v10"
)
func registerValidationMethods(v *validator.Validate) {
v.RegisterValidation("validate_bool", ValidateBool)
}
/**
* Validation methods below are being used for custom validation
*/
func ValidateBool(fl validator.FieldLevel) bool {
_, ok := fl.Field().Interface().(bool)
return ok
}

View file

@ -73,6 +73,19 @@ func createLocalClient(endpoint *portainer.Endpoint) (*client.Client, error) {
)
}
func CreateClientFromEnv() (*client.Client, error) {
return client.NewClientWithOpts(
client.FromEnv,
client.WithAPIVersionNegotiation(),
)
}
func CreateSimpleClient() (*client.Client, error) {
return client.NewClientWithOpts(
client.WithAPIVersionNegotiation(),
)
}
func createTCPClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*client.Client, error) {
httpCli, err := httpClient(endpoint, timeout)
if err != nil {
@ -181,11 +194,10 @@ func httpClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*http.Cli
}
if endpoint.TLSConfig.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig)
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify)
if err != nil {
return nil, err
}
transport.TLSClientConfig = tlsConfig
}

View file

@ -1,26 +0,0 @@
package client
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestHttpClient(t *testing.T) {
// Valid TLS configuration
endpoint := &portainer.Endpoint{}
endpoint.TLSConfig = portainer.TLSConfiguration{TLS: true}
cli, err := httpClient(endpoint, nil)
require.NoError(t, err)
require.NotNil(t, cli)
// Invalid TLS configuration
endpoint.TLSConfig.TLSCertPath = "/invalid/path/client.crt"
endpoint.TLSConfig.TLSKeyPath = "/invalid/path/client.key"
cli, err = httpClient(endpoint, nil)
require.Error(t, err)
require.Nil(t, cli)
}

View file

@ -38,10 +38,10 @@ func NewClientWithRegistry(registryClient *RegistryClient, clientFactory *docker
func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) {
ctx, cancel := c.timeoutContext()
defer cancel()
// Docker references with both a tag and digest are currently not supported
if image.Tag != "" && image.Digest != "" {
if err := image.TrimDigest(); err != nil {
err := image.trimDigest()
if err != nil {
return "", err
}
}
@ -69,7 +69,7 @@ func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) {
// Retrieve remote digest through HEAD request
rmDigest, err := docker.GetDigest(ctx, sysCtx, rmRef)
if err != nil {
// Fallback to public registry for hub
// fallback to public registry for hub
if image.HubLink != "" {
rmDigest, err = docker.GetDigest(ctx, c.sysCtx, rmRef)
if err == nil {
@ -131,7 +131,8 @@ func ParseRepoDigests(repoDigests []string) []digest.Digest {
func ParseRepoTags(repoTags []string) []*Image {
images := make([]*Image, 0)
for _, repoTag := range repoTags {
if image := ParseRepoTag(repoTag); image != nil {
image := ParseRepoTag(repoTag)
if image != nil {
images = append(images, image)
}
}
@ -146,7 +147,7 @@ func ParseRepoDigest(repoDigest string) digest.Digest {
d, err := digest.Parse(strings.Split(repoDigest, "@")[1])
if err != nil {
log.Warn().Err(err).Str("digest", repoDigest).Msg("skip invalid repo item")
log.Warn().Msgf("Skip invalid repo digest item: %s [error: %v]", repoDigest, err)
return ""
}

View file

@ -26,7 +26,7 @@ type Image struct {
Digest digest.Digest
HubLink string
named reference.Named
Opts ParseImageOptions `json:"-"`
opts ParseImageOptions
}
// ParseImageOptions holds image options for parsing.
@ -43,10 +43,9 @@ func (i *Image) Name() string {
// FullName return the real full name may include Tag or Digest of the image, Tag first.
func (i *Image) FullName() string {
if i.Tag == "" {
return i.Name() + "@" + i.Digest.String()
return fmt.Sprintf("%s@%s", i.Name(), i.Digest)
}
return i.Name() + ":" + i.Tag
return fmt.Sprintf("%s:%s", i.Name(), i.Tag)
}
// String returns the string representation of an image, including Tag and Digest if existed.
@ -67,25 +66,22 @@ func (i *Image) Reference() string {
func (i *Image) WithDigest(digest digest.Digest) (err error) {
i.Digest = digest
i.named, err = reference.WithDigest(i.named, digest)
return err
}
func (i *Image) WithTag(tag string) (err error) {
i.Tag = tag
i.named, err = reference.WithTag(i.named, tag)
return err
}
func (i *Image) TrimDigest() error {
func (i *Image) trimDigest() error {
i.Digest = ""
named, err := ParseImage(ParseImageOptions{Name: i.FullName()})
if err != nil {
return err
}
i.named = &named
return nil
}
@ -96,12 +92,11 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) {
if err != nil {
return Image{}, errors.Wrapf(err, "parsing image %s failed", parseOpts.Name)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
i := Image{
Opts: parseOpts,
opts: parseOpts,
named: named,
Domain: reference.Domain(named),
Path: reference.Path(named),
@ -127,16 +122,15 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) {
}
func (i *Image) hubLink() (string, error) {
if i.Opts.HubTpl != "" {
if i.opts.HubTpl != "" {
var out bytes.Buffer
tmpl, err := template.New("tmpl").
Option("missingkey=error").
Parse(i.Opts.HubTpl)
Parse(i.opts.HubTpl)
if err != nil {
return "", err
}
err = tmpl.Execute(&out, i)
return out.String(), err
}
@ -148,7 +142,6 @@ func (i *Image) hubLink() (string, error) {
prefix = "_"
path = strings.Replace(i.Path, "library/", "", 1)
}
return "https://hub.docker.com/" + prefix + "/" + path, nil
case "docker.bintray.io", "jfrog-docker-reg2.bintray.io":
return "https://bintray.com/jfrog/reg2/" + strings.ReplaceAll(i.Path, "/", "%3A"), nil

View file

@ -16,7 +16,7 @@ func TestImageParser(t *testing.T) {
})
is.NoError(err, "")
is.Equal("docker.io/portainer/portainer-ee:latest", image.FullName())
is.Equal("portainer/portainer-ee", image.Opts.Name)
is.Equal("portainer/portainer-ee", image.opts.Name)
is.Equal("latest", image.Tag)
is.Equal("portainer/portainer-ee", image.Path)
is.Equal("docker.io", image.Domain)
@ -32,7 +32,7 @@ func TestImageParser(t *testing.T) {
})
is.NoError(err, "")
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name)
is.Equal("", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain)
@ -49,7 +49,7 @@ func TestImageParser(t *testing.T) {
})
is.NoError(err, "")
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name)
is.Equal("v0.0.30", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain)
@ -71,7 +71,7 @@ func TestUpdateParsedImage(t *testing.T) {
is.NoError(err, "")
_ = image.WithTag("v0.0.31")
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.31", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name)
is.Equal("v0.0.31", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain)
@ -89,7 +89,7 @@ func TestUpdateParsedImage(t *testing.T) {
is.NoError(err, "")
_ = image.WithDigest("sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b3")
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name)
is.Equal("v0.0.30", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain)
@ -105,9 +105,9 @@ func TestUpdateParsedImage(t *testing.T) {
Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2",
})
is.NoError(err, "")
_ = image.TrimDigest()
_ = image.trimDigest()
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName())
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name)
is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name)
is.Equal("v0.0.30", image.Tag)
is.Equal("k8s-minikube/kicbase", image.Path)
is.Equal("gcr.io", image.Domain)

View file

@ -29,7 +29,7 @@ func (c *RegistryClient) RegistryAuth(image Image) (string, string, error) {
return "", "", err
}
registry, err := findBestMatchRegistry(image.Opts.Name, registries)
registry, err := findBestMatchRegistry(image.opts.Name, registries)
if err != nil {
return "", "", err
}
@ -59,7 +59,7 @@ func (c *RegistryClient) EncodedRegistryAuth(image Image) (string, error) {
return "", err
}
registry, err := findBestMatchRegistry(image.Opts.Name, registries)
registry, err := findBestMatchRegistry(image.opts.Name, registries)
if err != nil {
return "", err
}

View file

@ -4,12 +4,10 @@ import (
portainer "github.com/portainer/portainer/api"
)
type kubernetesMockDeployer struct {
portainer.KubernetesDeployer
}
type kubernetesMockDeployer struct{}
// NewKubernetesDeployer creates a mock kubernetes deployer
func NewKubernetesDeployer() *kubernetesMockDeployer {
func NewKubernetesDeployer() portainer.KubernetesDeployer {
return &kubernetesMockDeployer{}
}
@ -20,7 +18,3 @@ func (deployer *kubernetesMockDeployer) Deploy(userID portainer.UserID, endpoint
func (deployer *kubernetesMockDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
return "", nil
}
func (deployer *kubernetesMockDeployer) Restart(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
return "", nil
}

View file

@ -1,8 +1,13 @@
package exec
import (
"context"
"bytes"
"fmt"
"os"
"os/exec"
"path"
"runtime"
"strings"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
@ -10,17 +15,13 @@ import (
"github.com/portainer/portainer/api/http/proxy/factory"
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/kubernetes/cli"
"github.com/portainer/portainer/pkg/libkubectl"
"github.com/pkg/errors"
)
const (
defaultServerURL = "https://kubernetes.default.svc"
)
// KubernetesDeployer represents a service to deploy resources inside a Kubernetes environment(endpoint).
type KubernetesDeployer struct {
binaryPath string
dataStore dataservices.DataStore
reverseTunnelService portainer.ReverseTunnelService
signatureService portainer.DigitalSignatureService
@ -30,8 +31,9 @@ type KubernetesDeployer struct {
}
// NewKubernetesDeployer initializes a new KubernetesDeployer service.
func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager) *KubernetesDeployer {
func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager, binaryPath string) *KubernetesDeployer {
return &KubernetesDeployer{
binaryPath: binaryPath,
dataStore: datastore,
reverseTunnelService: reverseTunnelService,
signatureService: signatureService,
@ -76,56 +78,63 @@ func (deployer *KubernetesDeployer) getToken(userID portainer.UserID, endpoint *
}
// Deploy upserts Kubernetes resources defined in manifest(s)
func (deployer *KubernetesDeployer) Deploy(userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) {
return deployer.command("apply", userID, endpoint, resources, namespace)
func (deployer *KubernetesDeployer) Deploy(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
return deployer.command("apply", userID, endpoint, manifestFiles, namespace)
}
// Remove deletes Kubernetes resources defined in manifest(s)
func (deployer *KubernetesDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) {
return deployer.command("delete", userID, endpoint, resources, namespace)
func (deployer *KubernetesDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
return deployer.command("delete", userID, endpoint, manifestFiles, namespace)
}
func (deployer *KubernetesDeployer) command(operation string, userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) {
func (deployer *KubernetesDeployer) command(operation string, userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) {
token, err := deployer.getToken(userID, endpoint, endpoint.Type == portainer.KubernetesLocalEnvironment)
if err != nil {
return "", errors.Wrap(err, "failed generating a user token")
}
serverURL := defaultServerURL
command := path.Join(deployer.binaryPath, "kubectl")
if runtime.GOOS == "windows" {
command = path.Join(deployer.binaryPath, "kubectl.exe")
}
args := []string{"--token", token}
if namespace != "" {
args = append(args, "--namespace", namespace)
}
if endpoint.Type == portainer.AgentOnKubernetesEnvironment || endpoint.Type == portainer.EdgeAgentOnKubernetesEnvironment {
url, proxy, err := deployer.getAgentURL(endpoint)
if err != nil {
return "", errors.WithMessage(err, "failed generating endpoint URL")
}
defer proxy.Close()
serverURL = url
args = append(args, "--server", url)
args = append(args, "--insecure-skip-tls-verify")
}
client, err := libkubectl.NewClient(&libkubectl.ClientAccess{
Token: token,
ServerUrl: serverURL,
}, namespace, "", true)
if operation == "delete" {
args = append(args, "--ignore-not-found=true")
}
args = append(args, operation)
for _, path := range manifestFiles {
args = append(args, "-f", strings.TrimSpace(path))
}
var stderr bytes.Buffer
cmd := exec.Command(command, args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "POD_NAMESPACE=default")
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil {
return "", errors.Wrap(err, "failed to create kubectl client")
return "", errors.Wrapf(err, "failed to execute kubectl command: %q", stderr.String())
}
operations := map[string]func(context.Context, []string) (string, error){
"apply": client.Apply,
"delete": client.Delete,
}
operationFunc, ok := operations[operation]
if !ok {
return "", errors.Errorf("unsupported operation: %s", operation)
}
output, err := operationFunc(context.Background(), resources)
if err != nil {
return "", errors.Wrapf(err, "failed to execute kubectl %s command", operation)
}
return output, nil
return string(output), nil
}
func (deployer *KubernetesDeployer) getAgentURL(endpoint *portainer.Endpoint) (string, *factory.ProxyServer, error) {

View file

@ -1,173 +0,0 @@
package exec
import (
"context"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
type mockKubectlClient struct {
applyFunc func(ctx context.Context, files []string) error
deleteFunc func(ctx context.Context, files []string) error
rolloutRestartFunc func(ctx context.Context, resources []string) error
}
func (m *mockKubectlClient) Apply(ctx context.Context, files []string) error {
if m.applyFunc != nil {
return m.applyFunc(ctx, files)
}
return nil
}
func (m *mockKubectlClient) Delete(ctx context.Context, files []string) error {
if m.deleteFunc != nil {
return m.deleteFunc(ctx, files)
}
return nil
}
func (m *mockKubectlClient) RolloutRestart(ctx context.Context, resources []string) error {
if m.rolloutRestartFunc != nil {
return m.rolloutRestartFunc(ctx, resources)
}
return nil
}
func testExecuteKubectlOperation(client *mockKubectlClient, operation string, manifestFiles []string) error {
operations := map[string]func(context.Context, []string) error{
"apply": client.Apply,
"delete": client.Delete,
"rollout-restart": client.RolloutRestart,
}
operationFunc, ok := operations[operation]
if !ok {
return fmt.Errorf("unsupported operation: %s", operation)
}
if err := operationFunc(context.Background(), manifestFiles); err != nil {
return fmt.Errorf("failed to execute kubectl %s command: %w", operation, err)
}
return nil
}
func TestExecuteKubectlOperation_Apply_Success(t *testing.T) {
called := false
mockClient := &mockKubectlClient{
applyFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"manifest1.yaml", "manifest2.yaml"}, files)
return nil
},
}
manifests := []string{"manifest1.yaml", "manifest2.yaml"}
err := testExecuteKubectlOperation(mockClient, "apply", manifests)
assert.NoError(t, err)
assert.True(t, called)
}
func TestExecuteKubectlOperation_Apply_Error(t *testing.T) {
expectedErr := errors.New("kubectl apply failed")
called := false
mockClient := &mockKubectlClient{
applyFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"error.yaml"}, files)
return expectedErr
},
}
manifests := []string{"error.yaml"}
err := testExecuteKubectlOperation(mockClient, "apply", manifests)
assert.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
func TestExecuteKubectlOperation_Delete_Success(t *testing.T) {
called := false
mockClient := &mockKubectlClient{
deleteFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"manifest1.yaml"}, files)
return nil
},
}
manifests := []string{"manifest1.yaml"}
err := testExecuteKubectlOperation(mockClient, "delete", manifests)
assert.NoError(t, err)
assert.True(t, called)
}
func TestExecuteKubectlOperation_Delete_Error(t *testing.T) {
expectedErr := errors.New("kubectl delete failed")
called := false
mockClient := &mockKubectlClient{
deleteFunc: func(ctx context.Context, files []string) error {
called = true
assert.Equal(t, []string{"error.yaml"}, files)
return expectedErr
},
}
manifests := []string{"error.yaml"}
err := testExecuteKubectlOperation(mockClient, "delete", manifests)
assert.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
func TestExecuteKubectlOperation_RolloutRestart_Success(t *testing.T) {
called := false
mockClient := &mockKubectlClient{
rolloutRestartFunc: func(ctx context.Context, resources []string) error {
called = true
assert.Equal(t, []string{"deployment/nginx"}, resources)
return nil
},
}
resources := []string{"deployment/nginx"}
err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources)
assert.NoError(t, err)
assert.True(t, called)
}
func TestExecuteKubectlOperation_RolloutRestart_Error(t *testing.T) {
expectedErr := errors.New("kubectl rollout restart failed")
called := false
mockClient := &mockKubectlClient{
rolloutRestartFunc: func(ctx context.Context, resources []string) error {
called = true
assert.Equal(t, []string{"deployment/error"}, resources)
return expectedErr
},
}
resources := []string{"deployment/error"}
err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources)
assert.Error(t, err)
assert.Contains(t, err.Error(), expectedErr.Error())
assert.True(t, called)
}
func TestExecuteKubectlOperation_UnsupportedOperation(t *testing.T) {
mockClient := &mockKubectlClient{}
err := testExecuteKubectlOperation(mockClient, "unsupported", []string{})
assert.Error(t, err)
assert.Contains(t, err.Error(), "unsupported operation")
}

View file

@ -127,7 +127,7 @@ func (manager *SwarmStackManager) Remove(stack *portainer.Stack, endpoint *porta
return err
}
args = append(args, "stack", "rm", "--detach=false", stack.Name)
args = append(args, "stack", "rm", stack.Name)
return runCommandAndCaptureStdErr(command, args, nil, "")
}

View file

@ -68,7 +68,7 @@ func copyFile(src, dst string) error {
defer from.Close()
// has to include 'execute' bit, otherwise fails. MkdirAll follows `mkdir -m` restrictions
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
if err := os.MkdirAll(filepath.Dir(dst), 0744); err != nil {
return err
}
to, err := os.Create(dst)

View file

@ -15,19 +15,15 @@ type MultiFilterArgs []struct {
}
// MultiFilterDirForPerDevConfigs filers the given dirEntries with multiple filter args, returns the merged entries for the given device
func MultiFilterDirForPerDevConfigs(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs) ([]DirEntry, []string) {
func MultiFilterDirForPerDevConfigs(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs) []DirEntry {
var filteredDirEntries []DirEntry
var envFiles []string
for _, multiFilterArg := range multiFilterArgs {
tmp, efs := FilterDirForPerDevConfigs(dirEntries, multiFilterArg.FilterKey, configPath, multiFilterArg.FilterType)
tmp := FilterDirForPerDevConfigs(dirEntries, multiFilterArg.FilterKey, configPath, multiFilterArg.FilterType)
filteredDirEntries = append(filteredDirEntries, tmp...)
envFiles = append(envFiles, efs...)
}
return deduplicate(filteredDirEntries), envFiles
return deduplicate(filteredDirEntries)
}
func deduplicate(dirEntries []DirEntry) []DirEntry {
@ -36,7 +32,8 @@ func deduplicate(dirEntries []DirEntry) []DirEntry {
marks := make(map[string]struct{})
for _, dirEntry := range dirEntries {
if _, ok := marks[dirEntry.Name]; !ok {
_, ok := marks[dirEntry.Name]
if !ok {
marks[dirEntry.Name] = struct{}{}
deduplicatedDirEntries = append(deduplicatedDirEntries, dirEntry)
}
@ -47,33 +44,34 @@ func deduplicate(dirEntries []DirEntry) []DirEntry {
// FilterDirForPerDevConfigs filers the given dirEntries, returns entries for the given device
// For given configPath A/B/C, return entries:
// 1. all entries outside of dir A/B/C
// 2. For filterType file:
// 1. all entries outside of dir A
// 2. dir entries A, A/B, A/B/C
// 3. For filterType file:
// file entries: A/B/C/<deviceName> and A/B/C/<deviceName>.*
// 3. For filterType dir:
// 4. For filterType dir:
// dir entry: A/B/C/<deviceName>
// all entries: A/B/C/<deviceName>/*
func FilterDirForPerDevConfigs(dirEntries []DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) ([]DirEntry, []string) {
func FilterDirForPerDevConfigs(dirEntries []DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) []DirEntry {
var filteredDirEntries []DirEntry
var envFiles []string
for _, dirEntry := range dirEntries {
if shouldIncludeEntry(dirEntry, deviceName, configPath, filterType) {
filteredDirEntries = append(filteredDirEntries, dirEntry)
if shouldParseEnvVars(dirEntry, deviceName, configPath, filterType) {
envFiles = append(envFiles, dirEntry.Name)
}
}
}
return filteredDirEntries, envFiles
return filteredDirEntries
}
func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool {
// Include all entries outside of dir A
if !isInConfigDir(dirEntry, configPath) {
if !isInConfigRootDir(dirEntry, configPath) {
return true
}
// Include dir entries A, A/B, A/B/C
if isParentDir(dirEntry, configPath) {
return true
}
@ -92,9 +90,21 @@ func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filter
return false
}
func isInConfigDir(dirEntry DirEntry, configPath string) bool {
// return true if entry name starts with "A/B"
return strings.HasPrefix(dirEntry.Name, appendTailSeparator(configPath))
func isInConfigRootDir(dirEntry DirEntry, configPath string) bool {
// get the first element of the configPath
rootDir := strings.Split(configPath, string(os.PathSeparator))[0]
// return true if entry name starts with "A/"
return strings.HasPrefix(dirEntry.Name, appendTailSeparator(rootDir))
}
func isParentDir(dirEntry DirEntry, configPath string) bool {
if dirEntry.IsFile {
return false
}
// return true for dir entries A, A/B, A/B/C
return strings.HasPrefix(appendTailSeparator(configPath), appendTailSeparator(dirEntry.Name))
}
func shouldIncludeFile(dirEntry DirEntry, deviceName, configPath string) bool {
@ -128,15 +138,6 @@ func shouldIncludeDir(dirEntry DirEntry, deviceName, configPath string) bool {
return strings.HasPrefix(dirEntry.Name, filterPrefix)
}
func shouldParseEnvVars(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool {
if !dirEntry.IsFile {
return false
}
return isInConfigDir(dirEntry, configPath) &&
filepath.Base(dirEntry.Name) == deviceName+".env"
}
func appendTailSeparator(path string) string {
return fmt.Sprintf("%s%c", path, os.PathSeparator)
}

View file

@ -4,17 +4,14 @@ import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMultiFilterDirForPerDevConfigs(t *testing.T) {
f := func(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs, wantDirEntries []DirEntry) {
t.Helper()
dirEntries, _ = MultiFilterDirForPerDevConfigs(dirEntries, configPath, multiFilterArgs)
require.Equal(t, wantDirEntries, dirEntries)
type args struct {
dirEntries []DirEntry
configPath string
multiFilterArgs MultiFilterArgs
}
baseDirEntries := []DirEntry{
@ -29,94 +26,67 @@ func TestMultiFilterDirForPerDevConfigs(t *testing.T) {
{"configs/folder2/config2", "", true, 420},
}
// Filter file1
f(
baseDirEntries,
"configs",
MultiFilterArgs{{"file1", portainer.PerDevConfigsTypeFile}},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3]},
)
// Filter folder1
f(
baseDirEntries,
"configs",
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
)
// Filter file1 and folder1
f(
baseDirEntries,
"configs",
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
)
// Filter file1 and file2
f(
baseDirEntries,
"configs",
MultiFilterArgs{
{"file1", portainer.PerDevConfigsTypeFile},
{"file2", portainer.PerDevConfigsTypeFile},
tests := []struct {
name string
args args
want []DirEntry
}{
{
name: "filter file1",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{{"file1", portainer.PerDevConfigsTypeFile}},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3]},
},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3], baseDirEntries[4]},
)
// Filter folder1 and folder2
f(
baseDirEntries,
"configs",
MultiFilterArgs{
{"folder1", portainer.PerDevConfigsTypeDir},
{"folder2", portainer.PerDevConfigsTypeDir},
{
name: "filter folder1",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
},
{
name: "filter file1 and folder1",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]},
},
{
name: "filter file1 and file2",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{
{"file1", portainer.PerDevConfigsTypeFile},
{"file2", portainer.PerDevConfigsTypeFile},
},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3], baseDirEntries[4]},
},
{
name: "filter folder1 and folder2",
args: args{
baseDirEntries,
"configs",
MultiFilterArgs{
{"folder1", portainer.PerDevConfigsTypeDir},
{"folder2", portainer.PerDevConfigsTypeDir},
},
},
want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6], baseDirEntries[7], baseDirEntries[8]},
},
[]DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6], baseDirEntries[7], baseDirEntries[8]},
)
}
func TestMultiFilterDirForPerDevConfigsEnvFiles(t *testing.T) {
f := func(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs, wantEnvFiles []string) {
t.Helper()
_, envFiles := MultiFilterDirForPerDevConfigs(dirEntries, configPath, multiFilterArgs)
require.Equal(t, wantEnvFiles, envFiles)
}
baseDirEntries := []DirEntry{
{".env", "", true, 420},
{"docker-compose.yaml", "", true, 420},
{"configs", "", false, 420},
{"configs/edge-id/edge-id.env", "", true, 420},
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, MultiFilterDirForPerDevConfigs(tt.args.dirEntries, tt.args.configPath, tt.args.multiFilterArgs), "MultiFilterDirForPerDevConfigs(%v, %v, %v)", tt.args.dirEntries, tt.args.configPath, tt.args.multiFilterArgs)
})
}
f(
baseDirEntries,
"configs",
MultiFilterArgs{{"edge-id", portainer.PerDevConfigsTypeDir}},
[]string{"configs/edge-id/edge-id.env"},
)
}
func TestIsInConfigDir(t *testing.T) {
f := func(dirEntry DirEntry, configPath string, expect bool) {
t.Helper()
actual := isInConfigDir(dirEntry, configPath)
assert.Equal(t, expect, actual)
}
f(DirEntry{Name: "edge-configs"}, "edge-configs", false)
f(DirEntry{Name: "edge-configs_backup"}, "edge-configs", false)
f(DirEntry{Name: "edge-configs/standalone-edge-agent-standard"}, "edge-configs", true)
f(DirEntry{Name: "parent/edge-configs/"}, "edge-configs", false)
f(DirEntry{Name: "edgestacktest"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/edgeconfigs-test.yaml"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/file1.conf"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgeconfigs-test.yaml"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/edge-configs"}, "edgestacktest/edge-configs", false)
f(DirEntry{Name: "edgestacktest/edge-configs/standalone-edge-agent-async"}, "edgestacktest/edge-configs", true)
f(DirEntry{Name: "edgestacktest/edge-configs/abc.txt"}, "edgestacktest/edge-configs", true)
}

View file

@ -60,9 +60,15 @@ func NewAzureClient() *azureClient {
}
func newHttpClientForAzure(insecureSkipVerify bool) *http.Client {
tlsConfig := crypto.CreateTLSConfiguration()
if insecureSkipVerify {
tlsConfig.InsecureSkipVerify = true
}
httpsCli := &http.Client{
Transport: &http.Transport{
TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify),
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
},
Timeout: 300 * time.Second,

View file

@ -58,15 +58,7 @@ func TestService_ClonePublicRepository_Azure(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
dst := t.TempDir()
repositoryUrl := fmt.Sprintf(tt.args.repositoryURLFormat, tt.args.password)
err := service.CloneRepository(
dst,
repositoryUrl,
tt.args.referenceName,
"",
"",
gittypes.GitCredentialAuthType_Basic,
false,
)
err := service.CloneRepository(dst, repositoryUrl, tt.args.referenceName, "", "", false)
assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md"))
})
@ -81,15 +73,7 @@ func TestService_ClonePrivateRepository_Azure(t *testing.T) {
dst := t.TempDir()
err := service.CloneRepository(
dst,
privateAzureRepoURL,
"refs/heads/main",
"",
pat,
gittypes.GitCredentialAuthType_Basic,
false,
)
err := service.CloneRepository(dst, privateAzureRepoURL, "refs/heads/main", "", pat, false)
assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md"))
}
@ -100,14 +84,7 @@ func TestService_LatestCommitID_Azure(t *testing.T) {
pat := getRequiredValue(t, "AZURE_DEVOPS_PAT")
service := NewService(context.TODO())
id, err := service.LatestCommitID(
privateAzureRepoURL,
"refs/heads/main",
"",
pat,
gittypes.GitCredentialAuthType_Basic,
false,
)
id, err := service.LatestCommitID(privateAzureRepoURL, "refs/heads/main", "", pat, false)
assert.NoError(t, err)
assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty")
}
@ -119,14 +96,7 @@ func TestService_ListRefs_Azure(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := NewService(context.TODO())
refs, err := service.ListRefs(
privateAzureRepoURL,
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
)
refs, err := service.ListRefs(privateAzureRepoURL, username, accessToken, false, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
}
@ -138,8 +108,8 @@ func TestService_ListRefs_Azure_Concurrently(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
go service.ListRefs(privateAzureRepoURL, username, accessToken, false, false)
service.ListRefs(privateAzureRepoURL, username, accessToken, false, false)
time.Sleep(2 * time.Second)
}
@ -277,17 +247,7 @@ func TestService_ListFiles_Azure(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
paths, err := service.ListFiles(
tt.args.repositoryUrl,
tt.args.referenceName,
tt.args.username,
tt.args.password,
gittypes.GitCredentialAuthType_Basic,
false,
false,
tt.extensions,
false,
)
paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false)
if tt.expect.shouldFail {
assert.Error(t, err)
if tt.expect.err != nil {
@ -310,28 +270,8 @@ func TestService_ListFiles_Azure_Concurrently(t *testing.T) {
username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListFiles(
privateAzureRepoURL,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListFiles(
privateAzureRepoURL,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
go service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false)
service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false)
time.Sleep(2 * time.Second)
}

View file

@ -19,7 +19,6 @@ type CloneOptions struct {
ReferenceName string
Username string
Password string
AuthType gittypes.GitCredentialAuthType
// TLSSkipVerify skips SSL verification when cloning the Git repository
TLSSkipVerify bool `example:"false"`
}
@ -43,15 +42,7 @@ func CloneWithBackup(gitService portainer.GitService, fileService portainer.File
cleanUp = true
if err := gitService.CloneRepository(
options.ProjectPath,
options.URL,
options.ReferenceName,
options.Username,
options.Password,
options.AuthType,
options.TLSSkipVerify,
); err != nil {
if err := gitService.CloneRepository(options.ProjectPath, options.URL, options.ReferenceName, options.Username, options.Password, options.TLSSkipVerify); err != nil {
cleanUp = false
if err := filesystem.MoveDirectory(backupProjectPath, options.ProjectPath, false); err != nil {
log.Warn().Err(err).Msg("failed restoring backup folder")

View file

@ -7,14 +7,12 @@ import (
"strings"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/rs/zerolog/log"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport"
githttp "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/storage/memory"
"github.com/pkg/errors"
@ -35,7 +33,7 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e
URL: opt.repositoryUrl,
Depth: opt.depth,
InsecureSkipTLS: opt.tlsSkipVerify,
Auth: getAuth(opt.authType, opt.username, opt.password),
Auth: getAuth(opt.username, opt.password),
Tags: git.NoTags,
}
@ -53,10 +51,7 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e
}
if !c.preserveGitDirectory {
err := os.RemoveAll(filepath.Join(dst, ".git"))
if err != nil {
log.Error().Err(err).Msg("failed to remove .git directory")
}
os.RemoveAll(filepath.Join(dst, ".git"))
}
return nil
@ -69,7 +64,7 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string
})
listOptions := &git.ListOptions{
Auth: getAuth(opt.authType, opt.username, opt.password),
Auth: getAuth(opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify,
}
@ -99,23 +94,7 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string
return "", errors.Errorf("could not find ref %q in the repository", opt.referenceName)
}
func getAuth(authType gittypes.GitCredentialAuthType, username, password string) transport.AuthMethod {
if password == "" {
return nil
}
switch authType {
case gittypes.GitCredentialAuthType_Basic:
return getBasicAuth(username, password)
case gittypes.GitCredentialAuthType_Token:
return getTokenAuth(password)
default:
log.Warn().Msg("unknown git credentials authorization type, defaulting to None")
return nil
}
}
func getBasicAuth(username, password string) *githttp.BasicAuth {
func getAuth(username, password string) *githttp.BasicAuth {
if password != "" {
if username == "" {
username = "token"
@ -129,15 +108,6 @@ func getBasicAuth(username, password string) *githttp.BasicAuth {
return nil
}
func getTokenAuth(token string) *githttp.TokenAuth {
if token != "" {
return &githttp.TokenAuth{
Token: token,
}
}
return nil
}
func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, error) {
rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{
Name: "origin",
@ -145,7 +115,7 @@ func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, err
})
listOptions := &git.ListOptions{
Auth: getAuth(opt.authType, opt.username, opt.password),
Auth: getAuth(opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify,
}
@ -173,7 +143,7 @@ func (c *gitClient) listFiles(ctx context.Context, opt fetchOption) ([]string, e
Depth: 1,
SingleBranch: true,
ReferenceName: plumbing.ReferenceName(opt.referenceName),
Auth: getAuth(opt.authType, opt.username, opt.password),
Auth: getAuth(opt.username, opt.password),
InsecureSkipTLS: opt.tlsSkipVerify,
Tags: git.NoTags,
}

View file

@ -2,8 +2,6 @@ package git
import (
"context"
"net/http"
"net/http/httptest"
"path/filepath"
"testing"
"time"
@ -26,15 +24,7 @@ func TestService_ClonePrivateRepository_GitHub(t *testing.T) {
dst := t.TempDir()
repositoryUrl := privateGitRepoURL
err := service.CloneRepository(
dst,
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
)
err := service.CloneRepository(dst, repositoryUrl, "refs/heads/main", username, accessToken, false)
assert.NoError(t, err)
assert.FileExists(t, filepath.Join(dst, "README.md"))
}
@ -47,14 +37,7 @@ func TestService_LatestCommitID_GitHub(t *testing.T) {
service := newService(context.TODO(), 0, 0)
repositoryUrl := privateGitRepoURL
id, err := service.LatestCommitID(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
)
id, err := service.LatestCommitID(repositoryUrl, "refs/heads/main", username, accessToken, false)
assert.NoError(t, err)
assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty")
}
@ -67,7 +50,7 @@ func TestService_ListRefs_GitHub(t *testing.T) {
service := newService(context.TODO(), 0, 0)
repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
}
@ -80,8 +63,8 @@ func TestService_ListRefs_Github_Concurrently(t *testing.T) {
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
repositoryUrl := privateGitRepoURL
go service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
go service.ListRefs(repositoryUrl, username, accessToken, false, false)
service.ListRefs(repositoryUrl, username, accessToken, false, false)
time.Sleep(2 * time.Second)
}
@ -219,17 +202,7 @@ func TestService_ListFiles_GitHub(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
paths, err := service.ListFiles(
tt.args.repositoryUrl,
tt.args.referenceName,
tt.args.username,
tt.args.password,
gittypes.GitCredentialAuthType_Basic,
false,
false,
tt.extensions,
false,
)
paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false)
if tt.expect.shouldFail {
assert.Error(t, err)
if tt.expect.err != nil {
@ -253,28 +226,8 @@ func TestService_ListFiles_Github_Concurrently(t *testing.T) {
username := getRequiredValue(t, "GITHUB_USERNAME")
service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond)
go service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
go service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
time.Sleep(2 * time.Second)
}
@ -287,18 +240,8 @@ func TestService_purgeCache_Github(t *testing.T) {
username := getRequiredValue(t, "GITHUB_USERNAME")
service := NewService(context.TODO())
service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListRefs(repositoryUrl, username, accessToken, false, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
assert.Equal(t, 1, service.repoRefCache.Len())
assert.Equal(t, 1, service.repoFileCache.Len())
@ -318,18 +261,8 @@ func TestService_purgeCacheByTTL_Github(t *testing.T) {
// 40*timeout is designed for giving enough time for ListRefs and ListFiles to cache the result
service := newService(context.TODO(), 2, 40*timeout)
service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
service.ListRefs(repositoryUrl, username, accessToken, false, false)
service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
assert.Equal(t, 1, service.repoRefCache.Len())
assert.Equal(t, 1, service.repoFileCache.Len())
@ -360,12 +293,12 @@ func TestService_HardRefresh_ListRefs_GitHub(t *testing.T) {
service := newService(context.TODO(), 2, 0)
repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
assert.Equal(t, 1, service.repoRefCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false)
_, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false)
assert.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len())
}
@ -378,46 +311,26 @@ func TestService_HardRefresh_ListRefs_And_RemoveAllCaches_GitHub(t *testing.T) {
service := newService(context.TODO(), 2, 0)
repositoryUrl := privateGitRepoURL
refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false)
refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(refs), 1)
assert.Equal(t, 1, service.repoRefCache.Len())
files, err := service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 1, service.repoFileCache.Len())
files, err = service.ListFiles(
repositoryUrl,
"refs/heads/test",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
files, err = service.ListFiles(repositoryUrl, "refs/heads/test", username, accessToken, false, false, []string{}, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 2, service.repoFileCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false)
_, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false)
assert.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len())
_, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, true, false)
_, err = service.ListRefs(repositoryUrl, username, "fake-token", true, false)
assert.Error(t, err)
assert.Equal(t, 1, service.repoRefCache.Len())
// The relevant file caches should be removed too
@ -431,72 +344,12 @@ func TestService_HardRefresh_ListFiles_GitHub(t *testing.T) {
accessToken := getRequiredValue(t, "GITHUB_PAT")
username := getRequiredValue(t, "GITHUB_USERNAME")
repositoryUrl := privateGitRepoURL
files, err := service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{},
false,
)
files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(files), 1)
assert.Equal(t, 1, service.repoFileCache.Len())
_, err = service.ListFiles(
repositoryUrl,
"refs/heads/main",
username,
"fake-token",
gittypes.GitCredentialAuthType_Basic,
false,
true,
[]string{},
false,
)
_, err = service.ListFiles(repositoryUrl, "refs/heads/main", username, "fake-token", false, true, []string{}, false)
assert.Error(t, err)
assert.Equal(t, 0, service.repoFileCache.Len())
}
func TestService_CloneRepository_TokenAuth(t *testing.T) {
ensureIntegrationTest(t)
service := newService(context.TODO(), 2, 0)
var requests []*http.Request
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requests = append(requests, r)
}))
accessToken := "test_access_token"
username := "test_username"
repositoryUrl := testServer.URL
// Since we aren't hitting a real git server we ignore the error
_ = service.CloneRepository(
"test_dir",
repositoryUrl,
"refs/heads/main",
username,
accessToken,
gittypes.GitCredentialAuthType_Token,
false,
)
testServer.Close()
if len(requests) != 1 {
t.Fatalf("expected 1 request sent but got %d", len(requests))
}
gotAuthHeader := requests[0].Header.Get("Authorization")
if gotAuthHeader == "" {
t.Fatal("no Authorization header in git request")
}
expectedAuthHeader := "Bearer test_access_token"
if gotAuthHeader != expectedAuthHeader {
t.Fatalf("expected Authorization header %q but got %q", expectedAuthHeader, gotAuthHeader)
}
}

View file

@ -38,7 +38,7 @@ func Test_ClonePublicRepository_Shallow(t *testing.T) {
dir := t.TempDir()
t.Logf("Cloning into %s", dir)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false)
assert.NoError(t, err)
assert.Equal(t, 1, getCommitHistoryLength(t, err, dir), "cloned repo has incorrect depth")
}
@ -50,7 +50,7 @@ func Test_ClonePublicRepository_NoGitDirectory(t *testing.T) {
dir := t.TempDir()
t.Logf("Cloning into %s", dir)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false)
assert.NoError(t, err)
assert.NoDirExists(t, filepath.Join(dir, ".git"))
}
@ -84,7 +84,7 @@ func Test_latestCommitID(t *testing.T) {
repositoryURL := setup(t)
referenceName := "refs/heads/main"
id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false)
id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", false)
assert.NoError(t, err)
assert.Equal(t, "68dcaa7bd452494043c64252ab90db0f98ecf8d2", id)
@ -95,7 +95,7 @@ func Test_ListRefs(t *testing.T) {
repositoryURL := setup(t)
fs, err := service.ListRefs(repositoryURL, "", "", gittypes.GitCredentialAuthType_Basic, false, false)
fs, err := service.ListRefs(repositoryURL, "", "", false, false)
assert.NoError(t, err)
assert.Equal(t, []string{"refs/heads/main"}, fs)
@ -107,17 +107,7 @@ func Test_ListFiles(t *testing.T) {
repositoryURL := setup(t)
referenceName := "refs/heads/main"
fs, err := service.ListFiles(
repositoryURL,
referenceName,
"",
"",
gittypes.GitCredentialAuthType_Basic,
false,
false,
[]string{".yml"},
false,
)
fs, err := service.ListFiles(repositoryURL, referenceName, "", "", false, false, []string{".yml"}, false)
assert.NoError(t, err)
assert.Equal(t, []string{"docker-compose.yml"}, fs)
@ -265,7 +255,7 @@ func Test_listFilesPrivateRepository(t *testing.T) {
name: "list tree with real repository and head ref but no credential",
args: fetchOption{
baseOption: baseOption{
repositoryUrl: privateGitRepoURL,
repositoryUrl: privateGitRepoURL + "fake",
username: "",
password: "",
},

View file

@ -8,7 +8,6 @@ import (
"time"
lru "github.com/hashicorp/golang-lru"
gittypes "github.com/portainer/portainer/api/git/types"
"github.com/rs/zerolog/log"
"golang.org/x/sync/singleflight"
)
@ -23,7 +22,6 @@ type baseOption struct {
repositoryUrl string
username string
password string
authType gittypes.GitCredentialAuthType
tlsSkipVerify bool
}
@ -125,22 +123,13 @@ func (service *Service) timerHasStopped() bool {
// CloneRepository clones a git repository using the specified URL in the specified
// destination folder.
func (service *Service) CloneRepository(
destination,
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
func (service *Service) CloneRepository(destination, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error {
options := cloneOption{
fetchOption: fetchOption{
baseOption: baseOption{
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
},
referenceName: referenceName,
@ -166,20 +155,12 @@ func (service *Service) cloneRepository(destination string, options cloneOption)
}
// LatestCommitID returns SHA1 of the latest commit of the specified reference
func (service *Service) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
func (service *Service) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) {
options := fetchOption{
baseOption: baseOption{
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
},
referenceName: referenceName,
@ -189,14 +170,7 @@ func (service *Service) LatestCommitID(
}
// ListRefs will list target repository's references without cloning the repository
func (service *Service) ListRefs(
repositoryURL,
username,
password string,
authType gittypes.GitCredentialAuthType,
hardRefresh bool,
tlsSkipVerify bool,
) ([]string, error) {
func (service *Service) ListRefs(repositoryURL, username, password string, hardRefresh bool, tlsSkipVerify bool) ([]string, error) {
refCacheKey := generateCacheKey(repositoryURL, username, password, strconv.FormatBool(tlsSkipVerify))
if service.cacheEnabled && hardRefresh {
// Should remove the cache explicitly, so that the following normal list can show the correct result
@ -222,7 +196,6 @@ func (service *Service) ListRefs(
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
}
@ -242,62 +215,18 @@ var singleflightGroup = &singleflight.Group{}
// ListFiles will list all the files of the target repository with specific extensions.
// If extension is not provided, it will list all the files under the target repository
func (service *Service) ListFiles(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
dirOnly,
hardRefresh bool,
includedExts []string,
tlsSkipVerify bool,
) ([]string, error) {
repoKey := generateCacheKey(
repositoryURL,
referenceName,
username,
password,
strconv.FormatBool(tlsSkipVerify),
strconv.Itoa(int(authType)),
strconv.FormatBool(dirOnly),
)
func (service *Service) ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includedExts []string, tlsSkipVerify bool) ([]string, error) {
repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly))
fs, err, _ := singleflightGroup.Do(repoKey, func() (any, error) {
return service.listFiles(
repositoryURL,
referenceName,
username,
password,
authType,
dirOnly,
hardRefresh,
tlsSkipVerify,
)
return service.listFiles(repositoryURL, referenceName, username, password, dirOnly, hardRefresh, tlsSkipVerify)
})
return filterFiles(fs.([]string), includedExts), err
}
func (service *Service) listFiles(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
dirOnly,
hardRefresh bool,
tlsSkipVerify bool,
) ([]string, error) {
repoKey := generateCacheKey(
repositoryURL,
referenceName,
username,
password,
strconv.FormatBool(tlsSkipVerify),
strconv.Itoa(int(authType)),
strconv.FormatBool(dirOnly),
)
func (service *Service) listFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, tlsSkipVerify bool) ([]string, error) {
repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly))
if service.cacheEnabled && hardRefresh {
// Should remove the cache explicitly, so that the following normal list can show the correct result
@ -318,7 +247,6 @@ func (service *Service) listFiles(
repositoryUrl: repositoryURL,
username: username,
password: password,
authType: authType,
tlsSkipVerify: tlsSkipVerify,
},
referenceName: referenceName,

View file

@ -1,21 +1,12 @@
package gittypes
import (
"errors"
)
import "errors"
var (
ErrIncorrectRepositoryURL = errors.New("git repository could not be found, please ensure that the URL is correct")
ErrAuthenticationFailure = errors.New("authentication failed, please ensure that the git credentials are correct")
)
type GitCredentialAuthType int
const (
GitCredentialAuthType_Basic GitCredentialAuthType = iota
GitCredentialAuthType_Token
)
// RepoConfig represents a configuration for a repo
type RepoConfig struct {
// The repo url
@ -33,11 +24,10 @@ type RepoConfig struct {
}
type GitAuthentication struct {
Username string
Password string
AuthorizationType GitCredentialAuthType
Username string
Password string
// Git credentials identifier when the value is not 0
// When the value is 0, Username, Password, and Authtype are set without using saved credential
// When the value is 0, Username and Password are set without using saved credential
// This is introduced since 2.15.0
GitCredentialID int `example:"0"`
}

View file

@ -29,14 +29,7 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g
return false, "", errors.WithMessagef(err, "failed to get credentials for %v", objId)
}
newHash, err := gitService.LatestCommitID(
gitConfig.URL,
gitConfig.ReferenceName,
username,
password,
gittypes.GitCredentialAuthType_Basic,
gitConfig.TLSSkipVerify,
)
newHash, err := gitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, username, password, gitConfig.TLSSkipVerify)
if err != nil {
return false, "", errors.WithMessagef(err, "failed to fetch latest commit id of %v", objId)
}
@ -69,7 +62,6 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g
cloneParams.auth = &gitAuth{
username: username,
password: password,
authType: gitConfig.Authentication.AuthorizationType,
}
}
@ -97,31 +89,14 @@ type cloneRepositoryParameters struct {
}
type gitAuth struct {
authType gittypes.GitCredentialAuthType
username string
password string
}
func cloneGitRepository(gitService portainer.GitService, cloneParams *cloneRepositoryParameters) error {
if cloneParams.auth != nil {
return gitService.CloneRepository(
cloneParams.toDir,
cloneParams.url,
cloneParams.ref,
cloneParams.auth.username,
cloneParams.auth.password,
cloneParams.auth.authType,
cloneParams.tlsSkipVerify,
)
return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, cloneParams.auth.username, cloneParams.auth.password, cloneParams.tlsSkipVerify)
}
return gitService.CloneRepository(
cloneParams.toDir,
cloneParams.url,
cloneParams.ref,
"",
"",
gittypes.GitCredentialAuthType_Basic,
cloneParams.tlsSkipVerify,
)
return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, "", "", cloneParams.tlsSkipVerify)
}

View file

@ -3,9 +3,9 @@ package update
import (
"time"
"github.com/asaskevich/govalidator"
portainer "github.com/portainer/portainer/api"
httperrors "github.com/portainer/portainer/api/http/errors"
"github.com/portainer/portainer/pkg/validate"
)
func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error {
@ -17,7 +17,7 @@ func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error
return httperrors.NewInvalidPayloadError("Webhook or Interval must be provided")
}
if autoUpdate.Webhook != "" && !validate.IsUUID(autoUpdate.Webhook) {
if autoUpdate.Webhook != "" && !govalidator.IsUUID(autoUpdate.Webhook) {
return httperrors.NewInvalidPayloadError("invalid Webhook format")
}

View file

@ -1,17 +1,19 @@
package git
import (
"github.com/asaskevich/govalidator"
gittypes "github.com/portainer/portainer/api/git/types"
httperrors "github.com/portainer/portainer/api/http/errors"
"github.com/portainer/portainer/pkg/validate"
)
func ValidateRepoConfig(repoConfig *gittypes.RepoConfig) error {
if len(repoConfig.URL) == 0 || !validate.IsURL(repoConfig.URL) {
if len(repoConfig.URL) == 0 || !govalidator.IsURL(repoConfig.URL) {
return httperrors.NewInvalidPayloadError("Invalid repository URL. Must correspond to a valid URL format")
}
return ValidateRepoAuthentication(repoConfig.Authentication)
}
func ValidateRepoAuthentication(auth *gittypes.GitAuthentication) error {

View file

@ -32,12 +32,15 @@ type Service struct {
}
// NewService initializes a new service.
func NewService(insecureSkipVerify bool) *Service {
func NewService() *Service {
tlsConfig := crypto.CreateTLSConfiguration()
tlsConfig.InsecureSkipVerify = true
return &Service{
httpsClient: &http.Client{
Timeout: httpClientTimeout,
Transport: &http.Transport{
TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify),
TLSClientConfig: tlsConfig,
},
},
}

View file

@ -1,14 +0,0 @@
package openamt
import (
"net/http"
"testing"
"github.com/stretchr/testify/require"
)
func TestNewService(t *testing.T) {
service := NewService(true)
require.NotNil(t, service)
require.True(t, service.httpsClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) //nolint:forbidigo
}

View file

@ -1,6 +1,7 @@
package client
import (
"crypto/tls"
"errors"
"fmt"
"io"
@ -10,7 +11,6 @@ import (
"time"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/crypto"
"github.com/rs/zerolog/log"
"github.com/segmentio/encoding/json"
@ -105,28 +105,21 @@ func Get(url string, timeout int) ([]byte, error) {
// ExecutePingOperation will send a SystemPing operation HTTP request to a Docker environment(endpoint)
// using the specified host and optional TLS configuration.
// It uses a new Http.Client for each operation.
func ExecutePingOperation(host string, tlsConfiguration portainer.TLSConfiguration) (bool, error) {
func ExecutePingOperation(host string, tlsConfig *tls.Config) (bool, error) {
transport := &http.Transport{}
scheme := "http"
if tlsConfiguration.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration)
if err != nil {
return false, err
}
if tlsConfig != nil {
transport.TLSClientConfig = tlsConfig
scheme = "https"
}
client := &http.Client{
Timeout: 3 * time.Second,
Timeout: time.Second * 3,
Transport: transport,
}
target := strings.Replace(host, "tcp://", scheme+"://", 1)
return pingOperation(client, target)
}

View file

@ -1,31 +0,0 @@
package client
import (
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/stretchr/testify/require"
)
func TestExecutePingOperationFailure(t *testing.T) {
host := "http://localhost:1"
config := portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: true,
}
// Invalid host
ok, err := ExecutePingOperation(host, config)
require.False(t, ok)
require.Error(t, err)
// Invalid TLS configuration
config.TLSCertPath = "/invalid/path/to/cert"
config.TLSKeyPath = "/invalid/path/to/key"
ok, err = ExecutePingOperation(host, config)
require.False(t, ok)
require.Error(t, err)
}

View file

@ -2,7 +2,6 @@ package csrf
import (
"crypto/rand"
"errors"
"fmt"
"net/http"
"os"
@ -10,8 +9,7 @@ import (
"github.com/portainer/portainer/api/http/security"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
gcsrf "github.com/gorilla/csrf"
"github.com/rs/zerolog/log"
gorillacsrf "github.com/gorilla/csrf"
"github.com/urfave/negroni"
)
@ -21,7 +19,7 @@ func SkipCSRFToken(w http.ResponseWriter) {
w.Header().Set(csrfSkipHeader, "1")
}
func WithProtect(handler http.Handler, trustedOrigins []string) (http.Handler, error) {
func WithProtect(handler http.Handler) (http.Handler, error) {
// IsDockerDesktopExtension is used to check if we should skip csrf checks in the request bouncer (ShouldSkipCSRFCheck)
// DOCKER_EXTENSION is set to '1' in build/docker-extension/docker-compose.yml
isDockerDesktopExtension := false
@ -36,12 +34,10 @@ func WithProtect(handler http.Handler, trustedOrigins []string) (http.Handler, e
return nil, fmt.Errorf("failed to generate CSRF token: %w", err)
}
handler = gcsrf.Protect(
handler = gorillacsrf.Protect(
token,
gcsrf.Path("/"),
gcsrf.Secure(false),
gcsrf.TrustedOrigins(trustedOrigins),
gcsrf.ErrorHandler(withErrorHandler(trustedOrigins)),
gorillacsrf.Path("/"),
gorillacsrf.Secure(false),
)(handler)
return withSkipCSRF(handler, isDockerDesktopExtension), nil
@ -59,7 +55,7 @@ func withSendCSRFToken(handler http.Handler) http.Handler {
}
if statusCode := sw.Status(); statusCode >= 200 && statusCode < 300 {
sw.Header().Set("X-CSRF-Token", gcsrf.Token(r))
sw.Header().Set("X-CSRF-Token", gorillacsrf.Token(r))
}
})
@ -77,33 +73,9 @@ func withSkipCSRF(handler http.Handler, isDockerDesktopExtension bool) http.Hand
}
if skip {
r = gcsrf.UnsafeSkipCheck(r)
r = gorillacsrf.UnsafeSkipCheck(r)
}
handler.ServeHTTP(w, r)
})
}
func withErrorHandler(trustedOrigins []string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := gcsrf.FailureReason(r)
if errors.Is(err, gcsrf.ErrBadOrigin) || errors.Is(err, gcsrf.ErrBadReferer) || errors.Is(err, gcsrf.ErrNoReferer) {
log.Error().Err(err).
Str("request_url", r.URL.String()).
Str("host", r.Host).
Str("x_forwarded_proto", r.Header.Get("X-Forwarded-Proto")).
Str("forwarded", r.Header.Get("Forwarded")).
Str("origin", r.Header.Get("Origin")).
Str("referer", r.Header.Get("Referer")).
Strs("trusted_origins", trustedOrigins).
Msg("Failed to validate Origin or Referer")
}
http.Error(
w,
http.StatusText(http.StatusForbidden)+" - "+err.Error(),
http.StatusForbidden,
)
})
}

View file

@ -2,7 +2,6 @@ package auth
import (
"net/http"
"strconv"
"strings"
portainer "github.com/portainer/portainer/api"
@ -83,11 +82,6 @@ func (handler *Handler) authenticate(rw http.ResponseWriter, r *http.Request) *h
}
}
// Clear any existing user caches
if user != nil {
handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(user.ID)))
}
if user != nil && isUserInitialAdmin(user) || settings.AuthenticationMethod == portainer.AuthenticationInternal {
return handler.authenticateInternal(rw, user, payload.Password)
}

View file

@ -8,7 +8,6 @@ import (
"github.com/portainer/portainer/api/http/proxy"
"github.com/portainer/portainer/api/http/proxy/factory/kubernetes"
"github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/kubernetes/cli"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/gorilla/mux"
@ -24,18 +23,16 @@ type Handler struct {
OAuthService portainer.OAuthService
ProxyManager *proxy.Manager
KubernetesTokenCacheManager *kubernetes.TokenCacheManager
KubernetesClientFactory *cli.ClientFactory
passwordStrengthChecker security.PasswordStrengthChecker
bouncer security.BouncerService
}
// NewHandler creates a handler to manage authentication operations.
func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker, kubernetesClientFactory *cli.ClientFactory) *Handler {
func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker) *Handler {
h := &Handler{
Router: mux.NewRouter(),
passwordStrengthChecker: passwordStrengthChecker,
bouncer: bouncer,
KubernetesClientFactory: kubernetesClientFactory,
}
h.Handle("/auth/oauth/validate",

View file

@ -2,7 +2,6 @@ package auth
import (
"net/http"
"strconv"
"github.com/portainer/portainer/api/http/security"
"github.com/portainer/portainer/api/logoutcontext"
@ -24,7 +23,6 @@ func (handler *Handler) logout(w http.ResponseWriter, r *http.Request) *httperro
if tokenData != nil {
handler.KubernetesTokenCacheManager.RemoveUserFromCache(tokenData.ID)
handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(tokenData.ID)))
logoutcontext.Cancel(tokenData.Token)
}

View file

@ -18,15 +18,10 @@ import (
"github.com/portainer/portainer/api/crypto"
"github.com/portainer/portainer/api/http/offlinegate"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/pkg/fips"
"github.com/stretchr/testify/assert"
)
func init() {
fips.InitFIPS(false)
}
func listFiles(dir string) []string {
items := make([]string, 0)
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {

View file

@ -16,8 +16,8 @@ import (
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request"
"github.com/portainer/portainer/pkg/libhttp/response"
"github.com/portainer/portainer/pkg/validate"
"github.com/asaskevich/govalidator"
"github.com/rs/zerolog/log"
"github.com/segmentio/encoding/json"
)
@ -228,7 +228,7 @@ func (payload *customTemplateFromGitRepositoryPayload) Validate(r *http.Request)
if len(payload.Description) == 0 {
return errors.New("Invalid custom template description")
}
if len(payload.RepositoryURL) == 0 || !validate.IsURL(payload.RepositoryURL) {
if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) {
return errors.New("Invalid repository URL. Must correspond to a valid URL format")
}
if payload.RepositoryAuthentication && (len(payload.RepositoryUsername) == 0 || len(payload.RepositoryPassword) == 0) {

View file

@ -20,17 +20,12 @@ import (
"github.com/portainer/portainer/api/internal/authorization"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/api/jwt"
"github.com/portainer/portainer/pkg/fips"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert"
)
func init() {
fips.InitFIPS(false)
}
var testFileContent = "abcdefg"
type TestGitService struct {
@ -38,28 +33,13 @@ type TestGitService struct {
targetFilePath string
}
func (g *TestGitService) CloneRepository(
destination string,
repositoryURL,
referenceName string,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
func (g *TestGitService) CloneRepository(destination string, repositoryURL, referenceName string, username, password string, tlsSkipVerify bool) error {
time.Sleep(100 * time.Millisecond)
return createTestFile(g.targetFilePath)
}
func (g *TestGitService) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
func (g *TestGitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) {
return "", nil
}
@ -76,26 +56,11 @@ type InvalidTestGitService struct {
targetFilePath string
}
func (g *InvalidTestGitService) CloneRepository(
dest,
repoUrl,
refName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) error {
func (g *InvalidTestGitService) CloneRepository(dest, repoUrl, refName, username, password string, tlsSkipVerify bool) error {
return errors.New("simulate network error")
}
func (g *InvalidTestGitService) LatestCommitID(
repositoryURL,
referenceName,
username,
password string,
authType gittypes.GitCredentialAuthType,
tlsSkipVerify bool,
) (string, error) {
func (g *InvalidTestGitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) {
return "", nil
}

View file

@ -71,7 +71,7 @@ func (handler *Handler) customTemplateList(w http.ResponseWriter, r *http.Reques
customTemplates = filterByType(customTemplates, templateTypes)
if edge != nil {
customTemplates = slicesx.FilterInPlace(customTemplates, func(customTemplate portainer.CustomTemplate) bool {
customTemplates = slicesx.Filter(customTemplates, func(customTemplate portainer.CustomTemplate) bool {
return customTemplate.EdgeTemplate == *edge
})
}

View file

@ -15,7 +15,8 @@ import (
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request"
"github.com/portainer/portainer/pkg/libhttp/response"
"github.com/portainer/portainer/pkg/validate"
"github.com/asaskevich/govalidator"
)
type customTemplateUpdatePayload struct {
@ -37,16 +38,14 @@ type customTemplateUpdatePayload struct {
RepositoryURL string `example:"https://github.com/openfaas/faas" validate:"required"`
// Reference name of a Git repository hosting the Stack file
RepositoryReferenceName string `example:"refs/heads/master"`
// Use authentication to clone the Git repository
// Use basic authentication to clone the Git repository
RepositoryAuthentication bool `example:"true"`
// Username used in basic authentication. Required when RepositoryAuthentication is true
// and RepositoryGitCredentialID is 0. Ignored if RepositoryAuthType is token
// and RepositoryGitCredentialID is 0
RepositoryUsername string `example:"myGitUsername"`
// Password used in basic authentication or token used in token authentication.
// Required when RepositoryAuthentication is true and RepositoryGitCredentialID is 0
// Password used in basic authentication. Required when RepositoryAuthentication is true
// and RepositoryGitCredentialID is 0
RepositoryPassword string `example:"myGitPassword"`
// RepositoryAuthorizationType is the authorization type to use
RepositoryAuthorizationType gittypes.GitCredentialAuthType `example:"0"`
// GitCredentialID used to identify the bound git credential. Required when RepositoryAuthentication
// is true and RepositoryUsername/RepositoryPassword are not provided
RepositoryGitCredentialID int `example:"0"`
@ -171,7 +170,7 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
customTemplate.EdgeTemplate = payload.EdgeTemplate
if payload.RepositoryURL != "" {
if !validate.IsURL(payload.RepositoryURL) {
if !govalidator.IsURL(payload.RepositoryURL) {
return httperror.BadRequest("Invalid repository URL. Must correspond to a valid URL format", err)
}
@ -184,15 +183,12 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
repositoryUsername := ""
repositoryPassword := ""
repositoryAuthType := gittypes.GitCredentialAuthType_Basic
if payload.RepositoryAuthentication {
repositoryUsername = payload.RepositoryUsername
repositoryPassword = payload.RepositoryPassword
repositoryAuthType = payload.RepositoryAuthorizationType
gitConfig.Authentication = &gittypes.GitAuthentication{
Username: payload.RepositoryUsername,
Password: payload.RepositoryPassword,
AuthorizationType: payload.RepositoryAuthorizationType,
Username: payload.RepositoryUsername,
Password: payload.RepositoryPassword,
}
}
@ -202,7 +198,6 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
ReferenceName: gitConfig.ReferenceName,
Username: repositoryUsername,
Password: repositoryPassword,
AuthType: repositoryAuthType,
TLSSkipVerify: gitConfig.TLSSkipVerify,
})
if err != nil {
@ -211,14 +206,7 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ
defer cleanBackup()
commitHash, err := handler.GitService.LatestCommitID(
gitConfig.URL,
gitConfig.ReferenceName,
repositoryUsername,
repositoryPassword,
repositoryAuthType,
gitConfig.TLSSkipVerify,
)
commitHash, err := handler.GitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, repositoryUsername, repositoryPassword, gitConfig.TLSSkipVerify)
if err != nil {
return httperror.InternalServerError("Unable get latest commit id", fmt.Errorf("failed to fetch latest commit id of the template %v: %w", customTemplate.ID, err))
}

View file

@ -6,7 +6,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
portainer "github.com/portainer/portainer/api"
@ -117,12 +116,12 @@ func (h *Handler) dashboard(w http.ResponseWriter, r *http.Request) *httperror.H
return err
}
networks, err := cli.NetworkList(r.Context(), network.ListOptions{})
networks, err := cli.NetworkList(r.Context(), types.NetworkListOptions{})
if err != nil {
return httperror.InternalServerError("Unable to retrieve Docker networks", err)
}
networks, err = utils.FilterByResourceControl(tx, networks, portainer.NetworkResourceControl, context, func(c network.Summary) string {
networks, err = utils.FilterByResourceControl(tx, networks, portainer.NetworkResourceControl, context, func(c types.NetworkResource) string {
return c.Name
})
if err != nil {

View file

@ -4,7 +4,6 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/api/roar"
)
type endpointSetType map[portainer.EndpointID]bool
@ -50,29 +49,22 @@ func GetEndpointsByTags(tx dataservices.DataStoreTx, tagIDs []portainer.TagID, p
return results, nil
}
func getTrustedEndpoints(tx dataservices.DataStoreTx, endpointIDs roar.Roar[portainer.EndpointID]) ([]portainer.EndpointID, error) {
var innerErr error
func getTrustedEndpoints(tx dataservices.DataStoreTx, endpointIDs []portainer.EndpointID) ([]portainer.EndpointID, error) {
results := []portainer.EndpointID{}
endpointIDs.Iterate(func(endpointID portainer.EndpointID) bool {
for _, endpointID := range endpointIDs {
endpoint, err := tx.Endpoint().Endpoint(endpointID)
if err != nil {
innerErr = err
return false
return nil, err
}
if !endpoint.UserTrusted {
return true
continue
}
results = append(results, endpoint.ID)
}
return true
})
return results, innerErr
return results, nil
}
func mapEndpointGroupToEndpoints(endpoints []portainer.Endpoint) map[portainer.EndpointGroupID]endpointSetType {

View file

@ -7,7 +7,6 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/internal/endpointutils"
"github.com/portainer/portainer/api/roar"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request"
)
@ -53,7 +52,6 @@ func calculateEndpointsOrTags(tx dataservices.DataStoreTx, edgeGroup *portainer.
}
edgeGroup.Endpoints = endpointIDs
edgeGroup.EndpointIDs = roar.FromSlice(endpointIDs)
return nil
}
@ -96,7 +94,6 @@ func (handler *Handler) edgeGroupCreate(w http.ResponseWriter, r *http.Request)
Dynamic: payload.Dynamic,
TagIDs: []portainer.TagID{},
Endpoints: []portainer.EndpointID{},
EndpointIDs: roar.Roar[portainer.EndpointID]{},
PartialMatch: payload.PartialMatch,
}
@ -111,5 +108,5 @@ func (handler *Handler) edgeGroupCreate(w http.ResponseWriter, r *http.Request)
return nil
})
return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err)
return txResponse(w, edgeGroup, err)
}

View file

@ -1,62 +0,0 @@
package edgegroups
import (
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/require"
)
func TestEdgeGroupCreateHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodPost,
"/edge_groups",
strings.NewReader(`{"Name": "New Edge Group", "Endpoints": [1, 2, 3]}`),
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints)
}

View file

@ -5,7 +5,6 @@ import (
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/dataservices"
"github.com/portainer/portainer/api/roar"
httperror "github.com/portainer/portainer/pkg/libhttp/error"
"github.com/portainer/portainer/pkg/libhttp/request"
)
@ -34,9 +33,7 @@ func (handler *Handler) edgeGroupInspect(w http.ResponseWriter, r *http.Request)
return err
})
edgeGroup.Endpoints = edgeGroup.EndpointIDs.ToSlice()
return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err)
return txResponse(w, edgeGroup, err)
}
func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error) {
@ -53,7 +50,7 @@ func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*porta
return nil, httperror.InternalServerError("Unable to retrieve environments and environment groups for Edge group", err)
}
edgeGroup.EndpointIDs = roar.FromSlice(endpoints)
edgeGroup.Endpoints = endpoints
}
return edgeGroup, err

View file

@ -1,176 +0,0 @@
package edgegroups
import (
"net/http"
"net/http/httptest"
"strconv"
"testing"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/datastore"
"github.com/portainer/portainer/api/internal/testhelpers"
"github.com/portainer/portainer/api/roar"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEdgeGroupInspectHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
EndpointIDs: roar.FromSlice([]portainer.EndpointID{1, 2, 3}),
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups/1",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
assert.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints)
}
func TestEmptyEdgeGroupInspectHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
EndpointIDs: roar.Roar[portainer.EndpointID]{},
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups/1",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
// Make sure the frontend does not get a null value but a [] instead
require.NotNil(t, responseGroup.Endpoints)
require.Len(t, responseGroup.Endpoints, 0)
}
func TestDynamicEdgeGroupInspectHandler(t *testing.T) {
_, store := datastore.MustNewTestStore(t, true, true)
handler := NewHandler(testhelpers.NewTestRequestBouncer())
handler.DataStore = store
err := store.EndpointGroup().Create(&portainer.EndpointGroup{
ID: 1,
Name: "Test Group",
})
require.NoError(t, err)
err = store.Tag().Create(&portainer.Tag{
ID: 1,
Name: "Test Tag",
Endpoints: map[portainer.EndpointID]bool{
1: true,
2: true,
3: true,
},
})
require.NoError(t, err)
for i := range 3 {
err = store.Endpoint().Create(&portainer.Endpoint{
ID: portainer.EndpointID(i + 1),
Name: "Test Endpoint " + strconv.Itoa(i+1),
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: 1,
TagIDs: []portainer.TagID{1},
UserTrusted: true,
})
require.NoError(t, err)
err = store.EndpointRelation().Create(&portainer.EndpointRelation{
EndpointID: portainer.EndpointID(i + 1),
EdgeStacks: map[portainer.EdgeStackID]bool{},
})
require.NoError(t, err)
}
err = store.EdgeGroup().Create(&portainer.EdgeGroup{
ID: 1,
Name: "Test Edge Group",
Dynamic: true,
TagIDs: []portainer.TagID{1},
})
require.NoError(t, err)
rr := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodGet,
"/edge_groups/1",
nil,
)
handler.ServeHTTP(rr, req)
require.Equal(t, http.StatusOK, rr.Result().StatusCode)
var responseGroup portainer.EdgeGroup
err = json.NewDecoder(rr.Body).Decode(&responseGroup)
require.NoError(t, err)
require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints)
}

Some files were not shown because too many files have changed in this diff Show more