diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index e1ab32632..75414fa04 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -2,18 +2,17 @@ name: Bug Report description: Create a report to help us improve. labels: kind/bug,bug/need-confirmation body: - - type: markdown attributes: value: | # Welcome! - + The issue tracker is for reporting bugs. If you have an [idea for a new feature](https://github.com/orgs/portainer/discussions/categories/ideas) or a [general question about Portainer](https://github.com/orgs/portainer/discussions/categories/help) please post in our [GitHub Discussions](https://github.com/orgs/portainer/discussions). - + You can also ask for help in our [community Slack channel](https://join.slack.com/t/portainer/shared_invite/zt-txh3ljab-52QHTyjCqbe5RibC2lcjKA). Please note that we only provide support for current versions of Portainer. You can find a list of supported versions in our [lifecycle policy](https://docs.portainer.io/start/lifecycle). - + **DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS**. - type: checkboxes @@ -45,7 +44,7 @@ body: - type: textarea attributes: label: Problem Description - description: A clear and concise description of what the bug is. + description: A clear and concise description of what the bug is. validations: required: true @@ -71,7 +70,7 @@ body: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' - 4. See error + 4. See error validations: required: true @@ -92,9 +91,31 @@ body: - type: dropdown attributes: label: Portainer version - description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [upgrading first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed. + description: We only provide support for current versions of Portainer as per the lifecycle policy linked above. If you are on an older version of Portainer we recommend [updating first](https://docs.portainer.io/start/upgrade) in case your bug has already been fixed. multiple: false options: + - '2.32.0' + - '2.31.3' + - '2.31.2' + - '2.31.1' + - '2.31.0' + - '2.30.1' + - '2.30.0' + - '2.29.2' + - '2.29.1' + - '2.29.0' + - '2.28.1' + - '2.28.0' + - '2.27.9' + - '2.27.8' + - '2.27.7' + - '2.27.6' + - '2.27.5' + - '2.27.4' + - '2.27.3' + - '2.27.2' + - '2.27.1' + - '2.27.0' - '2.26.1' - '2.26.0' - '2.25.1' @@ -109,20 +130,6 @@ body: - '2.21.2' - '2.21.1' - '2.21.0' - - '2.20.3' - - '2.20.2' - - '2.20.1' - - '2.20.0' - - '2.19.5' - - '2.19.4' - - '2.19.3' - - '2.19.2' - - '2.19.1' - - '2.19.0' - - '2.18.4' - - '2.18.3' - - '2.18.2' - - '2.18.1' validations: required: true @@ -160,7 +167,7 @@ body: - type: input attributes: label: Browser - description: | + description: | Enter your browser and version. Example: Google Chrome 114.0 validations: required: false diff --git a/.golangci.yaml b/.golangci.yaml index 648df24d1..c036175a2 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -12,8 +12,18 @@ linters: - copyloopvar - intrange - perfsprint + - ineffassign + - bodyclose + - forbidigo linters-settings: + forbidigo: + analyze-types: true + forbid: + - p: ^tls\.Config$ + msg: 'Use crypto.CreateTLSConfiguration() instead' + - p: ^tls\.Config\.(InsecureSkipVerify|MinVersion|MaxVersion|CipherSuites|CurvePreferences)$ + msg: 'Do not set this field directly, use crypto.CreateTLSConfiguration() instead' depguard: rules: main: diff --git a/README.md b/README.md index 9b6bb31b8..680382cb0 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ Portainer consists of a single container that can run on any cluster. It can be **Portainer Business Edition** builds on the open-source base and includes a range of advanced features and functions (like RBAC and Support) that are specific to the needs of business users. -- [Compare Portainer CE and Compare Portainer BE](https://portainer.io/products) +- [Compare Portainer CE and Compare Portainer BE](https://www.portainer.io/features) - [Take3 – get 3 free nodes of Portainer Business for as long as you want them](https://www.portainer.io/take-3) -- [Portainer BE install guide](https://install.portainer.io) +- [Portainer BE install guide](https://academy.portainer.io/install/) ## Latest Version @@ -20,22 +20,19 @@ Portainer CE is updated regularly. We aim to do an update release every couple o ## Getting started -- [Deploy Portainer](https://docs.portainer.io/start/install) +- [Deploy Portainer](https://docs.portainer.io/start/install-ce) - [Documentation](https://docs.portainer.io) - [Contribute to the project](https://docs.portainer.io/contribute/contribute) ## Features & Functions -View [this](https://www.portainer.io/products) table to see all of the Portainer CE functionality and compare to Portainer Business. - -- [Portainer CE for Docker / Docker Swarm](https://www.portainer.io/solutions/docker) -- [Portainer CE for Kubernetes](https://www.portainer.io/solutions/kubernetes-ui) +View [this](https://www.portainer.io/features) table to see all of the Portainer CE functionality and compare to Portainer Business. ## Getting help Portainer CE is an open source project and is supported by the community. You can buy a supported version of Portainer at portainer.io -Learn more about Portainer's community support channels [here.](https://www.portainer.io/get-support-for-portainer) +Learn more about Portainer's community support channels [here.](https://www.portainer.io/resources/get-help/get-support) - Issues: https://github.com/portainer/portainer/issues - Slack (chat): [https://portainer.io/slack](https://portainer.io/slack) @@ -53,13 +50,13 @@ You can join the Portainer Community by visiting [https://www.portainer.io/join- ## Work for us -If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to info@portainer.io with your details and/or visit our [careers page](https://portainer.io/careers). +If you are a developer, and our code in this repo makes sense to you, we would love to hear from you. We are always on the hunt for awesome devs, either freelance or employed. Drop us a line to success@portainer.io with your details and/or visit our [careers page](https://apply.workable.com/portainer/). ## Privacy **To make sure we focus our development effort in the right places we need to know which features get used most often. To give us this information we use [Matomo Analytics](https://matomo.org/), which is hosted in Germany and is fully GDPR compliant.** -When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer. +When Portainer first starts, you are given the option to DISABLE analytics. If you **don't** choose to disable it, we collect anonymous usage as per [our privacy policy](https://www.portainer.io/legal/privacy-policy). **Please note**, there is no personally identifiable information sent or stored at any time and we only use the data to help us improve Portainer. ## Limitations diff --git a/api/agent/version.go b/api/agent/version.go index a9480850a..7e6885351 100644 --- a/api/agent/version.go +++ b/api/agent/version.go @@ -16,7 +16,7 @@ import ( // GetAgentVersionAndPlatform returns the agent version and platform // // it sends a ping to the agent and parses the version and platform from the headers -func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) { +func GetAgentVersionAndPlatform(endpointUrl string, tlsConfig *tls.Config) (portainer.AgentPlatform, string, error) { //nolint:forbidigo httpCli := &http.Client{ Timeout: 3 * time.Second, } diff --git a/api/archive/zip.go b/api/archive/zip.go index 50328ef09..b7dbb9302 100644 --- a/api/archive/zip.go +++ b/api/archive/zip.go @@ -2,7 +2,6 @@ package archive import ( "archive/zip" - "bytes" "fmt" "io" "os" @@ -12,50 +11,6 @@ import ( "github.com/pkg/errors" ) -// UnzipArchive will unzip an archive from bytes into the dest destination folder on disk -func UnzipArchive(archiveData []byte, dest string) error { - zipReader, err := zip.NewReader(bytes.NewReader(archiveData), int64(len(archiveData))) - if err != nil { - return err - } - - for _, zipFile := range zipReader.File { - err := extractFileFromArchive(zipFile, dest) - if err != nil { - return err - } - } - - return nil -} - -func extractFileFromArchive(file *zip.File, dest string) error { - f, err := file.Open() - if err != nil { - return err - } - defer f.Close() - - data, err := io.ReadAll(f) - if err != nil { - return err - } - - fpath := filepath.Join(dest, file.Name) - - outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) - if err != nil { - return err - } - - _, err = io.Copy(outFile, bytes.NewReader(data)) - if err != nil { - return err - } - - return outFile.Close() -} - // UnzipFile will decompress a zip archive, moving all files and folders // within the zip file (parameter 1) to an output directory (parameter 2). func UnzipFile(src string, dest string) error { @@ -76,11 +31,11 @@ func UnzipFile(src string, dest string) error { if f.FileInfo().IsDir() { // Make Folder os.MkdirAll(p, os.ModePerm) + continue } - err = unzipFile(f, p) - if err != nil { + if err := unzipFile(f, p); err != nil { return err } } @@ -93,20 +48,20 @@ func unzipFile(f *zip.File, p string) error { if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil { return errors.Wrapf(err, "unzipFile: can't make a path %s", p) } + outFile, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return errors.Wrapf(err, "unzipFile: can't create file %s", p) } defer outFile.Close() + rc, err := f.Open() if err != nil { return errors.Wrapf(err, "unzipFile: can't open zip file %s in the archive", f.Name) } defer rc.Close() - _, err = io.Copy(outFile, rc) - - if err != nil { + if _, err = io.Copy(outFile, rc); err != nil { return errors.Wrapf(err, "unzipFile: can't copy an archived file content") } diff --git a/api/chisel/service_test.go b/api/chisel/service_test.go index 918c7bf1e..e8fa71a5a 100644 --- a/api/chisel/service_test.go +++ b/api/chisel/service_test.go @@ -9,10 +9,15 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/pkg/fips" "github.com/stretchr/testify/require" ) +func init() { + fips.InitFIPS(false) +} + func TestPingAgentPanic(t *testing.T) { endpoint := &portainer.Endpoint{ ID: 1, diff --git a/api/cli/cli.go b/api/cli/cli.go index 67005fddb..0722c0b2e 100644 --- a/api/cli/cli.go +++ b/api/cli/cli.go @@ -60,6 +60,9 @@ func CLIFlags() *portainer.CLIFlags { LogLevel: kingpin.Flag("log-level", "Set the minimum logging level to show").Default("INFO").Enum("DEBUG", "INFO", "WARN", "ERROR"), LogMode: kingpin.Flag("log-mode", "Set the logging output mode").Default("PRETTY").Enum("NOCOLOR", "PRETTY", "JSON"), KubectlShellImage: kingpin.Flag("kubectl-shell-image", "Kubectl shell image").Envar(portainer.KubectlShellImageEnvVar).Default(portainer.DefaultKubectlShellImage).String(), + PullLimitCheckDisabled: kingpin.Flag("pull-limit-check-disabled", "Pull limit check").Envar(portainer.PullLimitCheckDisabledEnvVar).Default(defaultPullLimitCheckDisabled).Bool(), + TrustedOrigins: kingpin.Flag("trusted-origins", "List of trusted origins for CSRF protection. Separate multiple origins with a comma.").Envar(portainer.TrustedOriginsEnvVar).String(), + CSP: kingpin.Flag("csp", "Content Security Policy (CSP) header").Envar(portainer.CSPEnvVar).Default("true").Bool(), } } diff --git a/api/cli/defaults.go b/api/cli/defaults.go index ca426cdad..13aa2df93 100644 --- a/api/cli/defaults.go +++ b/api/cli/defaults.go @@ -4,20 +4,21 @@ package cli const ( - defaultBindAddress = ":9000" - defaultHTTPSBindAddress = ":9443" - defaultTunnelServerAddress = "0.0.0.0" - defaultTunnelServerPort = "8000" - defaultDataDirectory = "/data" - defaultAssetsDirectory = "./" - defaultTLS = "false" - defaultTLSSkipVerify = "false" - defaultTLSCACertPath = "/certs/ca.pem" - defaultTLSCertPath = "/certs/cert.pem" - defaultTLSKeyPath = "/certs/key.pem" - defaultHTTPDisabled = "false" - defaultHTTPEnabled = "false" - defaultSSL = "false" - defaultBaseURL = "/" - defaultSecretKeyName = "portainer" + defaultBindAddress = ":9000" + defaultHTTPSBindAddress = ":9443" + defaultTunnelServerAddress = "0.0.0.0" + defaultTunnelServerPort = "8000" + defaultDataDirectory = "/data" + defaultAssetsDirectory = "./" + defaultTLS = "false" + defaultTLSSkipVerify = "false" + defaultTLSCACertPath = "/certs/ca.pem" + defaultTLSCertPath = "/certs/cert.pem" + defaultTLSKeyPath = "/certs/key.pem" + defaultHTTPDisabled = "false" + defaultHTTPEnabled = "false" + defaultSSL = "false" + defaultBaseURL = "/" + defaultSecretKeyName = "portainer" + defaultPullLimitCheckDisabled = "false" ) diff --git a/api/cli/defaults_windows.go b/api/cli/defaults_windows.go index ed5996004..a9d02da65 100644 --- a/api/cli/defaults_windows.go +++ b/api/cli/defaults_windows.go @@ -1,21 +1,22 @@ package cli const ( - defaultBindAddress = ":9000" - defaultHTTPSBindAddress = ":9443" - defaultTunnelServerAddress = "0.0.0.0" - defaultTunnelServerPort = "8000" - defaultDataDirectory = "C:\\data" - defaultAssetsDirectory = "./" - defaultTLS = "false" - defaultTLSSkipVerify = "false" - defaultTLSCACertPath = "C:\\certs\\ca.pem" - defaultTLSCertPath = "C:\\certs\\cert.pem" - defaultTLSKeyPath = "C:\\certs\\key.pem" - defaultHTTPDisabled = "false" - defaultHTTPEnabled = "false" - defaultSSL = "false" - defaultSnapshotInterval = "5m" - defaultBaseURL = "/" - defaultSecretKeyName = "portainer" + defaultBindAddress = ":9000" + defaultHTTPSBindAddress = ":9443" + defaultTunnelServerAddress = "0.0.0.0" + defaultTunnelServerPort = "8000" + defaultDataDirectory = "C:\\data" + defaultAssetsDirectory = "./" + defaultTLS = "false" + defaultTLSSkipVerify = "false" + defaultTLSCACertPath = "C:\\certs\\ca.pem" + defaultTLSCertPath = "C:\\certs\\cert.pem" + defaultTLSKeyPath = "C:\\certs\\key.pem" + defaultHTTPDisabled = "false" + defaultHTTPEnabled = "false" + defaultSSL = "false" + defaultSnapshotInterval = "5m" + defaultBaseURL = "/" + defaultSecretKeyName = "portainer" + defaultPullLimitCheckDisabled = "false" ) diff --git a/api/cli/pairlistbool.go b/api/cli/pairlistbool.go deleted file mode 100644 index 69e89b792..000000000 --- a/api/cli/pairlistbool.go +++ /dev/null @@ -1,45 +0,0 @@ -package cli - -import ( - "strings" - - portainer "github.com/portainer/portainer/api" - - "gopkg.in/alecthomas/kingpin.v2" -) - -type pairListBool []portainer.Pair - -// Set implementation for a list of portainer.Pair -func (l *pairListBool) Set(value string) error { - p := new(portainer.Pair) - - // default to true. example setting=true is equivalent to setting - parts := strings.SplitN(value, "=", 2) - if len(parts) != 2 { - p.Name = parts[0] - p.Value = "true" - } else { - p.Name = parts[0] - p.Value = parts[1] - } - - *l = append(*l, *p) - return nil -} - -// String implementation for a list of pair -func (l *pairListBool) String() string { - return "" -} - -// IsCumulative implementation for a list of pair -func (l *pairListBool) IsCumulative() bool { - return true -} - -func BoolPairs(s kingpin.Settings) (target *[]portainer.Pair) { - target = new([]portainer.Pair) - s.SetValue((*pairListBool)(target)) - return -} diff --git a/api/cmd/portainer/main.go b/api/cmd/portainer/main.go index edc9cb897..08a99c603 100644 --- a/api/cmd/portainer/main.go +++ b/api/cmd/portainer/main.go @@ -39,6 +39,7 @@ import ( "github.com/portainer/portainer/api/kubernetes" kubecli "github.com/portainer/portainer/api/kubernetes/cli" "github.com/portainer/portainer/api/ldap" + "github.com/portainer/portainer/api/logs" "github.com/portainer/portainer/api/oauth" "github.com/portainer/portainer/api/pendingactions" "github.com/portainer/portainer/api/pendingactions/actions" @@ -48,8 +49,11 @@ import ( "github.com/portainer/portainer/api/stacks/deployments" "github.com/portainer/portainer/pkg/build" "github.com/portainer/portainer/pkg/featureflags" + "github.com/portainer/portainer/pkg/fips" "github.com/portainer/portainer/pkg/libhelm" + libhelmtypes "github.com/portainer/portainer/pkg/libhelm/types" "github.com/portainer/portainer/pkg/libstack/compose" + "github.com/portainer/portainer/pkg/validate" "github.com/gofrs/uuid" "github.com/rs/zerolog/log" @@ -165,12 +169,12 @@ func checkDBSchemaServerVersionMatch(dbStore dataservices.DataStore, serverVersi return v.SchemaVersion == serverVersion && v.Edition == serverEdition } -func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager, assetsPath string) portainer.KubernetesDeployer { - return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager, assetsPath) +func initKubernetesDeployer(kubernetesTokenCacheManager *kubeproxy.TokenCacheManager, kubernetesClientFactory *kubecli.ClientFactory, dataStore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager) portainer.KubernetesDeployer { + return exec.NewKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager) } -func initHelmPackageManager(assetsPath string) (libhelm.HelmPackageManager, error) { - return libhelm.NewHelmPackageManager(libhelm.HelmConfig{BinaryPath: assetsPath}) +func initHelmPackageManager() (libhelmtypes.HelmPackageManager, error) { + return libhelm.NewHelmPackageManager() } func initAPIKeyService(datastore dataservices.DataStore) apikey.APIKeyService { @@ -238,10 +242,10 @@ func updateSettingsFromFlags(dataStore dataservices.DataStore, flags *portainer. return err } - settings.SnapshotInterval = *cmp.Or(flags.SnapshotInterval, &settings.SnapshotInterval) - settings.LogoURL = *cmp.Or(flags.Logo, &settings.LogoURL) - settings.EnableEdgeComputeFeatures = *cmp.Or(flags.EnableEdgeComputeFeatures, &settings.EnableEdgeComputeFeatures) - settings.TemplatesURL = *cmp.Or(flags.Templates, &settings.TemplatesURL) + settings.SnapshotInterval = cmp.Or(*flags.SnapshotInterval, settings.SnapshotInterval) + settings.LogoURL = cmp.Or(*flags.Logo, settings.LogoURL) + settings.EnableEdgeComputeFeatures = cmp.Or(*flags.EnableEdgeComputeFeatures, settings.EnableEdgeComputeFeatures) + settings.TemplatesURL = cmp.Or(*flags.Templates, settings.TemplatesURL) if *flags.Labels != nil { settings.BlackListedLabels = *flags.Labels @@ -328,6 +332,21 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server { featureflags.Parse(*flags.FeatureFlags, portainer.SupportedFeatureFlags) } + trustedOrigins := []string{} + if *flags.TrustedOrigins != "" { + // validate if the trusted origins are valid urls + for _, origin := range strings.Split(*flags.TrustedOrigins, ",") { + if !validate.IsTrustedOrigin(origin) { + log.Fatal().Str("trusted_origin", origin).Msg("invalid url for trusted origin. Please check the trusted origins flag.") + } + + trustedOrigins = append(trustedOrigins, origin) + } + } + + // -ce can not ever be run in FIPS mode + fips.InitFIPS(false) + fileService := initFileService(*flags.Data) encryptionKey := loadEncryptionSecretKey(*flags.SecretKeyName) if encryptionKey == nil { @@ -368,7 +387,8 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server { gitService := git.NewService(shutdownCtx) - openAMTService := openamt.NewService() + // Setting insecureSkipVerify to true to preserve the old behaviour. + openAMTService := openamt.NewService(true) cryptoService := &crypto.Service{} @@ -421,7 +441,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server { log.Fatal().Err(err).Msg("failed initializing swarm stack manager") } - kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager, *flags.Assets) + kubernetesDeployer := initKubernetesDeployer(kubernetesTokenCacheManager, kubernetesClientFactory, dataStore, reverseTunnelService, signatureService, proxyManager) pendingActionsService := pendingactions.NewService(dataStore, kubernetesClientFactory) pendingActionsService.RegisterHandler(actions.CleanNAPWithOverridePolicies, handlers.NewHandlerCleanNAPWithOverridePolicies(authorizationService, dataStore)) @@ -435,9 +455,9 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server { snapshotService.Start() - proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService) + proxyManager.NewProxyFactory(dataStore, signatureService, reverseTunnelService, dockerClientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService, jwtService) - helmPackageManager, err := initHelmPackageManager(*flags.Assets) + helmPackageManager, err := initHelmPackageManager() if err != nil { log.Fatal().Err(err).Msg("failed initializing helm package manager") } @@ -543,6 +563,7 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server { Status: applicationStatus, BindAddress: *flags.Addr, BindAddressHTTPS: *flags.AddrHTTPS, + CSP: *flags.CSP, HTTPEnabled: sslDBSettings.HTTPEnabled, AssetsPath: *flags.Assets, DataStore: dataStore, @@ -575,17 +596,19 @@ func buildServer(flags *portainer.CLIFlags) portainer.Server { AdminCreationDone: adminCreationDone, PendingActionsService: pendingActionsService, PlatformService: platformService, + PullLimitCheckDisabled: *flags.PullLimitCheckDisabled, + TrustedOrigins: trustedOrigins, } } func main() { - configureLogger() - setLoggingMode("PRETTY") + logs.ConfigureLogger() + logs.SetLoggingMode("PRETTY") flags := initCLI() - setLoggingLevel(*flags.LogLevel) - setLoggingMode(*flags.LogMode) + logs.SetLoggingLevel(*flags.LogLevel) + logs.SetLoggingMode(*flags.LogMode) for { server := buildServer(flags) diff --git a/api/connection.go b/api/connection.go index 710b978da..14e2dc1ca 100644 --- a/api/connection.go +++ b/api/connection.go @@ -6,8 +6,10 @@ import ( type ReadTransaction interface { GetObject(bucketName string, key []byte, object any) error + GetRawBytes(bucketName string, key []byte) ([]byte, error) GetAll(bucketName string, obj any, append func(o any) (any, error)) error GetAllWithKeyPrefix(bucketName string, keyPrefix []byte, obj any, append func(o any) (any, error)) error + KeyExists(bucketName string, key []byte) (bool, error) } type Transaction interface { diff --git a/api/crypto/aes.go b/api/crypto/aes.go index 922cdfd75..b63e8ffa0 100644 --- a/api/crypto/aes.go +++ b/api/crypto/aes.go @@ -6,11 +6,15 @@ import ( "crypto/aes" "crypto/cipher" "crypto/rand" + "crypto/sha256" "errors" "fmt" "io" + "strings" + "github.com/portainer/portainer/pkg/fips" "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" ) @@ -19,20 +23,32 @@ const ( aesGcmHeader = "AES256-GCM" // The encrypted file header aesGcmBlockSize = 1024 * 1024 // 1MB block for aes gcm + aesGcmFIPSHeader = "FIPS-AES256-GCM" + aesGcmFIPSBlockSize = 16 * 1024 * 1024 // 16MB block for aes gcm + // Argon2 settings - // Recommded settings lower memory hardware according to current OWASP recommendations + // Recommended settings lower memory hardware according to current OWASP recommendations // Considering some people run portainer on a NAS I think it's prudent not to assume we're on server grade hardware // https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id argon2MemoryCost = 12 * 1024 argon2TimeCost = 3 argon2Threads = 1 argon2KeyLength = 32 + + pbkdf2Iterations = 600_000 // use recommended iterations from https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 a little overkill for this use + pbkdf2SaltLength = 32 ) // AesEncrypt reads from input, encrypts with AES-256 and writes to output. passphrase is used to generate an encryption key func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error { - if err := aesEncryptGCM(input, output, passphrase); err != nil { - return fmt.Errorf("error encrypting file: %w", err) + if fips.FIPSMode() { + if err := aesEncryptGCMFIPS(input, output, passphrase); err != nil { + return fmt.Errorf("error encrypting file: %w", err) + } + } else { + if err := aesEncryptGCM(input, output, passphrase); err != nil { + return fmt.Errorf("error encrypting file: %w", err) + } } return nil @@ -40,14 +56,36 @@ func AesEncrypt(input io.Reader, output io.Writer, passphrase []byte) error { // AesDecrypt reads from input, decrypts with AES-256 and returns the reader to read the decrypted content from func AesDecrypt(input io.Reader, passphrase []byte) (io.Reader, error) { + fipsMode := fips.FIPSMode() + return aesDecrypt(input, passphrase, fipsMode) +} + +func aesDecrypt(input io.Reader, passphrase []byte, fipsMode bool) (io.Reader, error) { // Read file header to determine how it was encrypted inputReader := bufio.NewReader(input) - header, err := inputReader.Peek(len(aesGcmHeader)) + header, err := inputReader.Peek(len(aesGcmFIPSHeader)) if err != nil { return nil, fmt.Errorf("error reading encrypted backup file header: %w", err) } - if string(header) == aesGcmHeader { + if strings.HasPrefix(string(header), aesGcmFIPSHeader) { + if !fipsMode { + return nil, errors.New("fips encrypted file detected but fips mode is not enabled") + } + + reader, err := aesDecryptGCMFIPS(inputReader, passphrase) + if err != nil { + return nil, fmt.Errorf("error decrypting file: %w", err) + } + + return reader, nil + } + + if strings.HasPrefix(string(header), aesGcmHeader) { + if fipsMode { + return nil, errors.New("fips mode is enabled but non-fips encrypted file detected") + } + reader, err := aesDecryptGCM(inputReader, passphrase) if err != nil { return nil, fmt.Errorf("error decrypting file: %w", err) @@ -203,6 +241,126 @@ func aesDecryptGCM(input io.Reader, passphrase []byte) (io.Reader, error) { return &buf, nil } +// aesEncryptGCMFIPS reads from input, encrypts with AES-256 in a fips compliant +// way and writes to output. passphrase is used to generate an encryption key. +func aesEncryptGCMFIPS(input io.Reader, output io.Writer, passphrase []byte) error { + salt := make([]byte, pbkdf2SaltLength) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + return err + } + + key := pbkdf2.Key(passphrase, salt, pbkdf2Iterations, 32, sha256.New) + + block, err := aes.NewCipher(key) + if err != nil { + return err + } + + // write the header + if _, err := output.Write([]byte(aesGcmFIPSHeader)); err != nil { + return err + } + + // Write nonce and salt to the output file + if _, err := output.Write(salt); err != nil { + return err + } + + // Buffer for reading plaintext blocks + buf := make([]byte, aesGcmFIPSBlockSize) + + // Encrypt plaintext in blocks + for { + // new random nonce for each block + aesgcm, err := cipher.NewGCMWithRandomNonce(block) + if err != nil { + return fmt.Errorf("error creating gcm: %w", err) + } + + n, err := io.ReadFull(input, buf) + if n == 0 { + break // end of plaintext input + } + + if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) { + return err + } + + // Seal encrypts the plaintext + ciphertext := aesgcm.Seal(nil, nil, buf[:n], nil) + + _, err = output.Write(ciphertext) + if err != nil { + return err + } + } + + return nil +} + +// aesDecryptGCMFIPS reads from input, decrypts with AES-256 in a fips compliant +// way and returns the reader to read the decrypted content from. +func aesDecryptGCMFIPS(input io.Reader, passphrase []byte) (io.Reader, error) { + // Reader & verify header + header := make([]byte, len(aesGcmFIPSHeader)) + if _, err := io.ReadFull(input, header); err != nil { + return nil, err + } + + if string(header) != aesGcmFIPSHeader { + return nil, errors.New("invalid header") + } + + // Read salt + salt := make([]byte, pbkdf2SaltLength) + if _, err := io.ReadFull(input, salt); err != nil { + return nil, err + } + + key := pbkdf2.Key(passphrase, salt, pbkdf2Iterations, 32, sha256.New) + + // Initialize AES cipher block + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + // Initialize a buffer to store decrypted data + buf := bytes.Buffer{} + + // Decrypt the ciphertext in blocks + for { + // Create GCM mode with the cipher block + aesgcm, err := cipher.NewGCMWithRandomNonce(block) + if err != nil { + return nil, err + } + + // Read a block of ciphertext from the input reader + ciphertextBlock := make([]byte, aesGcmFIPSBlockSize+aesgcm.Overhead()) + n, err := io.ReadFull(input, ciphertextBlock) + if n == 0 { + break // end of ciphertext + } + + if err != nil && !(errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) { + return nil, err + } + + // Decrypt the block of ciphertext + plaintext, err := aesgcm.Open(nil, nil, ciphertextBlock[:n], nil) + if err != nil { + return nil, err + } + + if _, err := buf.Write(plaintext); err != nil { + return nil, err + } + } + + return &buf, nil +} + // aesDecryptOFB reads from input, decrypts with AES-256 and returns the reader to a read decrypted content from. // passphrase is used to generate an encryption key. // note: This function used to decrypt files that were encrypted without a header i.e. old archives diff --git a/api/crypto/aes_test.go b/api/crypto/aes_test.go index e03a9917e..de39a69fe 100644 --- a/api/crypto/aes_test.go +++ b/api/crypto/aes_test.go @@ -7,9 +7,15 @@ import ( "path/filepath" "testing" + "github.com/portainer/portainer/pkg/fips" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func init() { + fips.InitFIPS(false) +} + const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" func randBytes(n int) []byte { @@ -20,198 +26,296 @@ func randBytes(n int) []byte { return b } +type encryptFunc func(input io.Reader, output io.Writer, passphrase []byte) error +type decryptFunc func(input io.Reader, passphrase []byte) (io.Reader, error) + func Test_encryptAndDecrypt_withTheSamePassword(t *testing.T) { const passphrase = "passphrase" - tmpdir := t.TempDir() + testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc, decryptShouldSucceed bool) { + tmpdir := t.TempDir() - var ( - originFilePath = filepath.Join(tmpdir, "origin") - encryptedFilePath = filepath.Join(tmpdir, "encrypted") - decryptedFilePath = filepath.Join(tmpdir, "decrypted") - ) + var ( + originFilePath = filepath.Join(tmpdir, "origin") + encryptedFilePath = filepath.Join(tmpdir, "encrypted") + decryptedFilePath = filepath.Join(tmpdir, "decrypted") + ) - content := randBytes(1024*1024*100 + 523) - os.WriteFile(originFilePath, content, 0600) + content := randBytes(1024*1024*100 + 523) + os.WriteFile(originFilePath, content, 0600) - originFile, _ := os.Open(originFilePath) - defer originFile.Close() + originFile, _ := os.Open(originFilePath) + defer originFile.Close() - encryptedFileWriter, _ := os.Create(encryptedFilePath) + encryptedFileWriter, _ := os.Create(encryptedFilePath) - err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase)) - assert.Nil(t, err, "Failed to encrypt a file") - encryptedFileWriter.Close() + err := encrypt(originFile, encryptedFileWriter, []byte(passphrase)) + require.Nil(t, err, "Failed to encrypt a file") + encryptedFileWriter.Close() - encryptedContent, err := os.ReadFile(encryptedFilePath) - assert.Nil(t, err, "Couldn't read encrypted file") - assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") + encryptedContent, err := os.ReadFile(encryptedFilePath) + require.Nil(t, err, "Couldn't read encrypted file") + assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") - encryptedFileReader, _ := os.Open(encryptedFilePath) - defer encryptedFileReader.Close() + encryptedFileReader, _ := os.Open(encryptedFilePath) + defer encryptedFileReader.Close() - decryptedFileWriter, _ := os.Create(decryptedFilePath) - defer decryptedFileWriter.Close() + decryptedFileWriter, _ := os.Create(decryptedFilePath) + defer decryptedFileWriter.Close() - decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase)) - assert.Nil(t, err, "Failed to decrypt file") + decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase)) + if !decryptShouldSucceed { + require.Error(t, err, "Failed to decrypt file as indicated by decryptShouldSucceed") + } else { + require.NoError(t, err, "Failed to decrypt file indicated by decryptShouldSucceed") - io.Copy(decryptedFileWriter, decryptedReader) + io.Copy(decryptedFileWriter, decryptedReader) - decryptedContent, _ := os.ReadFile(decryptedFilePath) - assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + decryptedContent, _ := os.ReadFile(decryptedFilePath) + assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + } + } + + t.Run("fips", func(t *testing.T) { + testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS, true) + }) + + t.Run("non_fips", func(t *testing.T) { + testFunc(t, aesEncryptGCM, aesDecryptGCM, true) + }) + + t.Run("system_fips_mode_public_entry_points", func(t *testing.T) { + // use the init mode, public entry points + testFunc(t, AesEncrypt, AesDecrypt, true) + }) + + t.Run("fips_encrypted_file_header_fails_in_non_fips_mode", func(t *testing.T) { + // use aesDecrypt which checks the header, confirm that it fails + decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) { + return aesDecrypt(input, passphrase, false) + } + + testFunc(t, aesEncryptGCMFIPS, decrypt, false) + }) + + t.Run("non_fips_encrypted_file_header_fails_in_fips_mode", func(t *testing.T) { + // use aesDecrypt which checks the header, confirm that it fails + decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) { + return aesDecrypt(input, passphrase, true) + } + + testFunc(t, aesEncryptGCM, decrypt, false) + }) + + t.Run("fips_encrypted_file_fails_in_non_fips_mode", func(t *testing.T) { + testFunc(t, aesEncryptGCMFIPS, aesDecryptGCM, false) + }) + + t.Run("non_fips_encrypted_file_with_fips_mode_should_fail", func(t *testing.T) { + testFunc(t, aesEncryptGCM, aesDecryptGCMFIPS, false) + }) + + t.Run("fips_with_base_aesDecrypt", func(t *testing.T) { + // maximize coverage, use the base aesDecrypt function with valid fips mode + decrypt := func(input io.Reader, passphrase []byte) (io.Reader, error) { + return aesDecrypt(input, passphrase, true) + } + + testFunc(t, aesEncryptGCMFIPS, decrypt, true) + }) } func Test_encryptAndDecrypt_withStrongPassphrase(t *testing.T) { const passphrase = "A strong passphrase with special characters: !@#$%^&*()_+" - tmpdir := t.TempDir() - var ( - originFilePath = filepath.Join(tmpdir, "origin2") - encryptedFilePath = filepath.Join(tmpdir, "encrypted2") - decryptedFilePath = filepath.Join(tmpdir, "decrypted2") - ) + testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) { + tmpdir := t.TempDir() - content := randBytes(500) - os.WriteFile(originFilePath, content, 0600) + var ( + originFilePath = filepath.Join(tmpdir, "origin2") + encryptedFilePath = filepath.Join(tmpdir, "encrypted2") + decryptedFilePath = filepath.Join(tmpdir, "decrypted2") + ) - originFile, _ := os.Open(originFilePath) - defer originFile.Close() + content := randBytes(500) + os.WriteFile(originFilePath, content, 0600) - encryptedFileWriter, _ := os.Create(encryptedFilePath) + originFile, _ := os.Open(originFilePath) + defer originFile.Close() - err := AesEncrypt(originFile, encryptedFileWriter, []byte(passphrase)) - assert.Nil(t, err, "Failed to encrypt a file") - encryptedFileWriter.Close() + encryptedFileWriter, _ := os.Create(encryptedFilePath) - encryptedContent, err := os.ReadFile(encryptedFilePath) - assert.Nil(t, err, "Couldn't read encrypted file") - assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") + err := encrypt(originFile, encryptedFileWriter, []byte(passphrase)) + assert.Nil(t, err, "Failed to encrypt a file") + encryptedFileWriter.Close() - encryptedFileReader, _ := os.Open(encryptedFilePath) - defer encryptedFileReader.Close() + encryptedContent, err := os.ReadFile(encryptedFilePath) + assert.Nil(t, err, "Couldn't read encrypted file") + assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") - decryptedFileWriter, _ := os.Create(decryptedFilePath) - defer decryptedFileWriter.Close() + encryptedFileReader, _ := os.Open(encryptedFilePath) + defer encryptedFileReader.Close() - decryptedReader, err := AesDecrypt(encryptedFileReader, []byte(passphrase)) - assert.Nil(t, err, "Failed to decrypt file") + decryptedFileWriter, _ := os.Create(decryptedFilePath) + defer decryptedFileWriter.Close() - io.Copy(decryptedFileWriter, decryptedReader) + decryptedReader, err := decrypt(encryptedFileReader, []byte(passphrase)) + assert.Nil(t, err, "Failed to decrypt file") - decryptedContent, _ := os.ReadFile(decryptedFilePath) - assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + io.Copy(decryptedFileWriter, decryptedReader) + + decryptedContent, _ := os.ReadFile(decryptedFilePath) + assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + } + + t.Run("fips", func(t *testing.T) { + testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS) + }) + + t.Run("non_fips", func(t *testing.T) { + testFunc(t, aesEncryptGCM, aesDecryptGCM) + }) } func Test_encryptAndDecrypt_withTheSamePasswordSmallFile(t *testing.T) { - tmpdir := t.TempDir() + testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) { + tmpdir := t.TempDir() - var ( - originFilePath = filepath.Join(tmpdir, "origin2") - encryptedFilePath = filepath.Join(tmpdir, "encrypted2") - decryptedFilePath = filepath.Join(tmpdir, "decrypted2") - ) + var ( + originFilePath = filepath.Join(tmpdir, "origin2") + encryptedFilePath = filepath.Join(tmpdir, "encrypted2") + decryptedFilePath = filepath.Join(tmpdir, "decrypted2") + ) - content := randBytes(500) - os.WriteFile(originFilePath, content, 0600) + content := randBytes(500) + os.WriteFile(originFilePath, content, 0600) - originFile, _ := os.Open(originFilePath) - defer originFile.Close() + originFile, _ := os.Open(originFilePath) + defer originFile.Close() - encryptedFileWriter, _ := os.Create(encryptedFilePath) + encryptedFileWriter, _ := os.Create(encryptedFilePath) - err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase")) - assert.Nil(t, err, "Failed to encrypt a file") - encryptedFileWriter.Close() + err := encrypt(originFile, encryptedFileWriter, []byte("passphrase")) + assert.Nil(t, err, "Failed to encrypt a file") + encryptedFileWriter.Close() - encryptedContent, err := os.ReadFile(encryptedFilePath) - assert.Nil(t, err, "Couldn't read encrypted file") - assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") + encryptedContent, err := os.ReadFile(encryptedFilePath) + assert.Nil(t, err, "Couldn't read encrypted file") + assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") - encryptedFileReader, _ := os.Open(encryptedFilePath) - defer encryptedFileReader.Close() + encryptedFileReader, _ := os.Open(encryptedFilePath) + defer encryptedFileReader.Close() - decryptedFileWriter, _ := os.Create(decryptedFilePath) - defer decryptedFileWriter.Close() + decryptedFileWriter, _ := os.Create(decryptedFilePath) + defer decryptedFileWriter.Close() - decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("passphrase")) - assert.Nil(t, err, "Failed to decrypt file") + decryptedReader, err := decrypt(encryptedFileReader, []byte("passphrase")) + assert.Nil(t, err, "Failed to decrypt file") - io.Copy(decryptedFileWriter, decryptedReader) + io.Copy(decryptedFileWriter, decryptedReader) - decryptedContent, _ := os.ReadFile(decryptedFilePath) - assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + decryptedContent, _ := os.ReadFile(decryptedFilePath) + assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + } + + t.Run("fips", func(t *testing.T) { + testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS) + }) + + t.Run("non_fips", func(t *testing.T) { + testFunc(t, aesEncryptGCM, aesDecryptGCM) + }) } func Test_encryptAndDecrypt_withEmptyPassword(t *testing.T) { - tmpdir := t.TempDir() + testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) { + tmpdir := t.TempDir() - var ( - originFilePath = filepath.Join(tmpdir, "origin") - encryptedFilePath = filepath.Join(tmpdir, "encrypted") - decryptedFilePath = filepath.Join(tmpdir, "decrypted") - ) + var ( + originFilePath = filepath.Join(tmpdir, "origin") + encryptedFilePath = filepath.Join(tmpdir, "encrypted") + decryptedFilePath = filepath.Join(tmpdir, "decrypted") + ) - content := randBytes(1024 * 50) - os.WriteFile(originFilePath, content, 0600) + content := randBytes(1024 * 50) + os.WriteFile(originFilePath, content, 0600) - originFile, _ := os.Open(originFilePath) - defer originFile.Close() + originFile, _ := os.Open(originFilePath) + defer originFile.Close() - encryptedFileWriter, _ := os.Create(encryptedFilePath) - defer encryptedFileWriter.Close() + encryptedFileWriter, _ := os.Create(encryptedFilePath) + defer encryptedFileWriter.Close() - err := AesEncrypt(originFile, encryptedFileWriter, []byte("")) - assert.Nil(t, err, "Failed to encrypt a file") - encryptedContent, err := os.ReadFile(encryptedFilePath) - assert.Nil(t, err, "Couldn't read encrypted file") - assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") + err := encrypt(originFile, encryptedFileWriter, []byte("")) + assert.Nil(t, err, "Failed to encrypt a file") + encryptedContent, err := os.ReadFile(encryptedFilePath) + assert.Nil(t, err, "Couldn't read encrypted file") + assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") - encryptedFileReader, _ := os.Open(encryptedFilePath) - defer encryptedFileReader.Close() + encryptedFileReader, _ := os.Open(encryptedFilePath) + defer encryptedFileReader.Close() - decryptedFileWriter, _ := os.Create(decryptedFilePath) - defer decryptedFileWriter.Close() + decryptedFileWriter, _ := os.Create(decryptedFilePath) + defer decryptedFileWriter.Close() - decryptedReader, err := AesDecrypt(encryptedFileReader, []byte("")) - assert.Nil(t, err, "Failed to decrypt file") + decryptedReader, err := decrypt(encryptedFileReader, []byte("")) + assert.Nil(t, err, "Failed to decrypt file") - io.Copy(decryptedFileWriter, decryptedReader) + io.Copy(decryptedFileWriter, decryptedReader) - decryptedContent, _ := os.ReadFile(decryptedFilePath) - assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + decryptedContent, _ := os.ReadFile(decryptedFilePath) + assert.Equal(t, content, decryptedContent, "Original and decrypted content should match") + } + + t.Run("fips", func(t *testing.T) { + testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS) + }) + + t.Run("non_fips", func(t *testing.T) { + testFunc(t, aesEncryptGCM, aesDecryptGCM) + }) } func Test_decryptWithDifferentPassphrase_shouldProduceWrongResult(t *testing.T) { - tmpdir := t.TempDir() + testFunc := func(t *testing.T, encrypt encryptFunc, decrypt decryptFunc) { + tmpdir := t.TempDir() - var ( - originFilePath = filepath.Join(tmpdir, "origin") - encryptedFilePath = filepath.Join(tmpdir, "encrypted") - decryptedFilePath = filepath.Join(tmpdir, "decrypted") - ) + var ( + originFilePath = filepath.Join(tmpdir, "origin") + encryptedFilePath = filepath.Join(tmpdir, "encrypted") + decryptedFilePath = filepath.Join(tmpdir, "decrypted") + ) - content := randBytes(1034) - os.WriteFile(originFilePath, content, 0600) + content := randBytes(1034) + os.WriteFile(originFilePath, content, 0600) - originFile, _ := os.Open(originFilePath) - defer originFile.Close() + originFile, _ := os.Open(originFilePath) + defer originFile.Close() - encryptedFileWriter, _ := os.Create(encryptedFilePath) - defer encryptedFileWriter.Close() + encryptedFileWriter, _ := os.Create(encryptedFilePath) + defer encryptedFileWriter.Close() - err := AesEncrypt(originFile, encryptedFileWriter, []byte("passphrase")) - assert.Nil(t, err, "Failed to encrypt a file") - encryptedContent, err := os.ReadFile(encryptedFilePath) - assert.Nil(t, err, "Couldn't read encrypted file") - assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") + err := encrypt(originFile, encryptedFileWriter, []byte("passphrase")) + assert.Nil(t, err, "Failed to encrypt a file") + encryptedContent, err := os.ReadFile(encryptedFilePath) + assert.Nil(t, err, "Couldn't read encrypted file") + assert.NotEqual(t, encryptedContent, content, "Content wasn't encrypted") - encryptedFileReader, _ := os.Open(encryptedFilePath) - defer encryptedFileReader.Close() + encryptedFileReader, _ := os.Open(encryptedFilePath) + defer encryptedFileReader.Close() - decryptedFileWriter, _ := os.Create(decryptedFilePath) - defer decryptedFileWriter.Close() + decryptedFileWriter, _ := os.Create(decryptedFilePath) + defer decryptedFileWriter.Close() - _, err = AesDecrypt(encryptedFileReader, []byte("garbage")) - assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase") + _, err = decrypt(encryptedFileReader, []byte("garbage")) + assert.NotNil(t, err, "Should not allow decrypt with wrong passphrase") + } + + t.Run("fips", func(t *testing.T) { + testFunc(t, aesEncryptGCMFIPS, aesDecryptGCMFIPS) + }) + + t.Run("non_fips", func(t *testing.T) { + testFunc(t, aesEncryptGCM, aesDecryptGCM) + }) } diff --git a/api/crypto/ecdsa.go b/api/crypto/ecdsa.go index 1279c06e1..e7eabdd1d 100644 --- a/api/crypto/ecdsa.go +++ b/api/crypto/ecdsa.go @@ -112,7 +112,7 @@ func (service *ECDSAService) CreateSignature(message string) (string, error) { message = service.secret } - hash := libcrypto.HashFromBytes([]byte(message)) + hash := libcrypto.InsecureHashFromBytes([]byte(message)) r, s, err := ecdsa.Sign(rand.Reader, service.privateKey, hash) if err != nil { diff --git a/api/crypto/ecdsa_test.go b/api/crypto/ecdsa_test.go new file mode 100644 index 000000000..62141268a --- /dev/null +++ b/api/crypto/ecdsa_test.go @@ -0,0 +1,22 @@ +package crypto + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateSignature(t *testing.T) { + var s = NewECDSAService("secret") + + privKey, pubKey, err := s.GenerateKeyPair() + require.NoError(t, err) + require.Greater(t, len(privKey), 0) + require.Greater(t, len(pubKey), 0) + + m := "test message" + r, err := s.CreateSignature(m) + require.NoError(t, err) + require.NotEqual(t, r, m) + require.Greater(t, len(r), 0) +} diff --git a/api/crypto/nonce.go b/api/crypto/nonce.go index 571a9ba71..af1cc898b 100644 --- a/api/crypto/nonce.go +++ b/api/crypto/nonce.go @@ -15,7 +15,7 @@ func NewNonce(size int) *Nonce { } // NewRandomNonce generates a new initial nonce with the lower byte set to a random value -// This ensures there are plenty of nonce values availble before rolling over +// This ensures there are plenty of nonce values available before rolling over // Based on ideas from the Secure Programming Cookbook for C and C++ by John Viega, Matt Messier // https://www.oreilly.com/library/view/secure-programming-cookbook/0596003943/ch04s09.html func NewRandomNonce(size int) (*Nonce, error) { diff --git a/api/crypto/tls.go b/api/crypto/tls.go index 54e4cd43a..d6b7c3b09 100644 --- a/api/crypto/tls.go +++ b/api/crypto/tls.go @@ -1,14 +1,36 @@ package crypto import ( + "crypto/fips140" "crypto/tls" "crypto/x509" "os" + + portainer "github.com/portainer/portainer/api" ) // CreateTLSConfiguration creates a basic tls.Config with recommended TLS settings -func CreateTLSConfiguration() *tls.Config { - return &tls.Config{ +func CreateTLSConfiguration(insecureSkipVerify bool) *tls.Config { //nolint:forbidigo + // TODO: use fips.FIPSMode() instead + return createTLSConfiguration(fips140.Enabled(), insecureSkipVerify) +} + +func createTLSConfiguration(fipsEnabled bool, insecureSkipVerify bool) *tls.Config { //nolint:forbidigo + if fipsEnabled { + return &tls.Config{ //nolint:forbidigo + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS13, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + }, + CurvePreferences: []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521}, + } + } + + return &tls.Config{ //nolint:forbidigo MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_AES_128_GCM_SHA256, @@ -29,24 +51,34 @@ func CreateTLSConfiguration() *tls.Config { tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, }, + InsecureSkipVerify: insecureSkipVerify, //nolint:forbidigo } } // CreateTLSConfigurationFromBytes initializes a tls.Config using a CA certificate, a certificate and a key // loaded from memory. -func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { - config := CreateTLSConfiguration() - config.InsecureSkipVerify = skipServerVerification +func CreateTLSConfigurationFromBytes(useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo + // TODO: use fips.FIPSMode() instead + return createTLSConfigurationFromBytes(fips140.Enabled(), useTLS, caCert, cert, key, skipClientVerification, skipServerVerification) +} - if !skipClientVerification { +func createTLSConfigurationFromBytes(fipsEnabled, useTLS bool, caCert, cert, key []byte, skipClientVerification, skipServerVerification bool) (*tls.Config, error) { //nolint:forbidigo + if !useTLS { + return nil, nil + } + + config := createTLSConfiguration(fipsEnabled, skipServerVerification) + + if !skipClientVerification || fipsEnabled { certificate, err := tls.X509KeyPair(cert, key) if err != nil { return nil, err } + config.Certificates = []tls.Certificate{certificate} } - if !skipServerVerification { + if !skipServerVerification || fipsEnabled { caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) config.RootCAs = caCertPool @@ -57,29 +89,37 @@ func CreateTLSConfigurationFromBytes(caCert, cert, key []byte, skipClientVerific // CreateTLSConfigurationFromDisk initializes a tls.Config using a CA certificate, a certificate and a key // loaded from disk. -func CreateTLSConfigurationFromDisk(caCertPath, certPath, keyPath string, skipServerVerification bool) (*tls.Config, error) { - config := CreateTLSConfiguration() - config.InsecureSkipVerify = skipServerVerification +func CreateTLSConfigurationFromDisk(config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo + // TODO: use fips.FIPSMode() instead + return createTLSConfigurationFromDisk(fips140.Enabled(), config) +} - if certPath != "" && keyPath != "" { - cert, err := tls.LoadX509KeyPair(certPath, keyPath) +func createTLSConfigurationFromDisk(fipsEnabled bool, config portainer.TLSConfiguration) (*tls.Config, error) { //nolint:forbidigo + if !config.TLS { + return nil, nil + } + + tlsConfig := createTLSConfiguration(fipsEnabled, config.TLSSkipVerify) + + if config.TLSCertPath != "" && config.TLSKeyPath != "" { + cert, err := tls.LoadX509KeyPair(config.TLSCertPath, config.TLSKeyPath) if err != nil { return nil, err } - config.Certificates = []tls.Certificate{cert} + tlsConfig.Certificates = []tls.Certificate{cert} } - if !skipServerVerification && caCertPath != "" { - caCert, err := os.ReadFile(caCertPath) + if !tlsConfig.InsecureSkipVerify && config.TLSCACertPath != "" { //nolint:forbidigo + caCert, err := os.ReadFile(config.TLSCACertPath) if err != nil { return nil, err } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) - config.RootCAs = caCertPool + tlsConfig.RootCAs = caCertPool } - return config, nil + return tlsConfig, nil } diff --git a/api/crypto/tls_test.go b/api/crypto/tls_test.go new file mode 100644 index 000000000..77abdff73 --- /dev/null +++ b/api/crypto/tls_test.go @@ -0,0 +1,87 @@ +package crypto + +import ( + "crypto/tls" + "testing" + + portainer "github.com/portainer/portainer/api" + + "github.com/stretchr/testify/require" +) + +func TestCreateTLSConfiguration(t *testing.T) { + // InsecureSkipVerify = false + config := CreateTLSConfiguration(false) + require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo + require.False(t, config.InsecureSkipVerify) //nolint:forbidigo + + // InsecureSkipVerify = true + config = CreateTLSConfiguration(true) + require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo + require.True(t, config.InsecureSkipVerify) //nolint:forbidigo +} + +func TestCreateTLSConfigurationFIPS(t *testing.T) { + fips := true + + fipsCipherSuites := []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + } + + fipsCurvePreferences := []tls.CurveID{tls.CurveP256, tls.CurveP384, tls.CurveP521} + + config := createTLSConfiguration(fips, false) + require.Equal(t, config.MinVersion, uint16(tls.VersionTLS12)) //nolint:forbidigo + require.Equal(t, config.MaxVersion, uint16(tls.VersionTLS13)) //nolint:forbidigo + require.Equal(t, config.CipherSuites, fipsCipherSuites) //nolint:forbidigo + require.Equal(t, config.CurvePreferences, fipsCurvePreferences) //nolint:forbidigo + require.False(t, config.InsecureSkipVerify) //nolint:forbidigo +} + +func TestCreateTLSConfigurationFromBytes(t *testing.T) { + // No TLS + config, err := CreateTLSConfigurationFromBytes(false, nil, nil, nil, false, false) + require.Nil(t, err) + require.Nil(t, config) + + // Skip TLS client/server verifications + config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, true, true) + require.NoError(t, err) + require.NotNil(t, config) + + // Empty TLS + config, err = CreateTLSConfigurationFromBytes(true, nil, nil, nil, false, false) + require.Error(t, err) + require.Nil(t, config) +} + +func TestCreateTLSConfigurationFromDisk(t *testing.T) { + // No TLS + config, err := CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{}) + require.Nil(t, err) + require.Nil(t, config) + + // Skip TLS verifications + config, err = CreateTLSConfigurationFromDisk(portainer.TLSConfiguration{ + TLS: true, + TLSSkipVerify: true, + }) + require.NoError(t, err) + require.NotNil(t, config) +} + +func TestCreateTLSConfigurationFromDiskFIPS(t *testing.T) { + fips := true + + // Skipping TLS verifications cannot be done in FIPS mode + config, err := createTLSConfigurationFromDisk(fips, portainer.TLSConfiguration{ + TLS: true, + TLSSkipVerify: true, + }) + require.NoError(t, err) + require.NotNil(t, config) + require.False(t, config.InsecureSkipVerify) //nolint:forbidigo +} diff --git a/api/database/boltdb/db.go b/api/database/boltdb/db.go index cef93b345..32a1b55c3 100644 --- a/api/database/boltdb/db.go +++ b/api/database/boltdb/db.go @@ -138,6 +138,8 @@ func (connection *DbConnection) Open() error { db, err := bolt.Open(databasePath, 0600, &bolt.Options{ Timeout: 1 * time.Second, InitialMmapSize: connection.InitialMmapSize, + FreelistType: bolt.FreelistMapType, + NoFreelistSync: true, }) if err != nil { return err @@ -244,6 +246,32 @@ func (connection *DbConnection) GetObject(bucketName string, key []byte, object }) } +func (connection *DbConnection) GetRawBytes(bucketName string, key []byte) ([]byte, error) { + var value []byte + + err := connection.ViewTx(func(tx portainer.Transaction) error { + var err error + value, err = tx.GetRawBytes(bucketName, key) + + return err + }) + + return value, err +} + +func (connection *DbConnection) KeyExists(bucketName string, key []byte) (bool, error) { + var exists bool + + err := connection.ViewTx(func(tx portainer.Transaction) error { + var err error + exists, err = tx.KeyExists(bucketName, key) + + return err + }) + + return exists, err +} + func (connection *DbConnection) getEncryptionKey() []byte { if !connection.isEncrypted { return nil diff --git a/api/database/boltdb/json.go b/api/database/boltdb/json.go index b9ce97213..363e0ad7d 100644 --- a/api/database/boltdb/json.go +++ b/api/database/boltdb/json.go @@ -4,8 +4,6 @@ import ( "bytes" "crypto/aes" "crypto/cipher" - "crypto/rand" - "io" "github.com/pkg/errors" "github.com/segmentio/encoding/json" @@ -65,18 +63,18 @@ func (connection *DbConnection) UnmarshalObject(data []byte, object any) error { // https://gist.github.com/atoponce/07d8d4c833873be2f68c34f9afc5a78a#symmetric-encryption func encrypt(plaintext []byte, passphrase []byte) (encrypted []byte, err error) { - block, _ := aes.NewCipher(passphrase) - gcm, err := cipher.NewGCM(block) + block, err := aes.NewCipher(passphrase) if err != nil { return encrypted, err } - nonce := make([]byte, gcm.NonceSize()) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + // NewGCMWithRandomNonce in go 1.24 handles setting up the nonce and adding it to the encrypted output + gcm, err := cipher.NewGCMWithRandomNonce(block) + if err != nil { return encrypted, err } - return gcm.Seal(nonce, nonce, plaintext, nil), nil + return gcm.Seal(nil, nil, plaintext, nil), nil } func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err error) { @@ -89,19 +87,17 @@ func decrypt(encrypted []byte, passphrase []byte) (plaintextByte []byte, err err return encrypted, errors.Wrap(err, "Error creating cypher block") } - gcm, err := cipher.NewGCM(block) + // NewGCMWithRandomNonce in go 1.24 handles reading the nonce from the encrypted input for us + gcm, err := cipher.NewGCMWithRandomNonce(block) if err != nil { return encrypted, errors.Wrap(err, "Error creating GCM") } - nonceSize := gcm.NonceSize() - if len(encrypted) < nonceSize { + if len(encrypted) < gcm.NonceSize() { return encrypted, errEncryptedStringTooShort } - nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:] - - plaintextByte, err = gcm.Open(nil, nonce, ciphertextByteClean, nil) + plaintextByte, err = gcm.Open(nil, nil, encrypted, nil) if err != nil { return encrypted, errors.Wrap(err, "Error decrypting text") } diff --git a/api/database/boltdb/json_test.go b/api/database/boltdb/json_test.go index 577aa2cfd..32813f907 100644 --- a/api/database/boltdb/json_test.go +++ b/api/database/boltdb/json_test.go @@ -1,12 +1,19 @@ package boltdb import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" "crypto/sha256" + "encoding/base64" "fmt" + "io" "testing" "github.com/gofrs/uuid" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -160,7 +167,7 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) { } key := secretToEncryptionKey(passphrase) - conn := DbConnection{EncryptionKey: key} + conn := DbConnection{EncryptionKey: key, isEncrypted: true} for _, test := range tests { t.Run(fmt.Sprintf("%s -> %s", test.object, test.expected), func(t *testing.T) { @@ -175,3 +182,94 @@ func Test_ObjectMarshallingEncrypted(t *testing.T) { }) } } + +func Test_NonceSources(t *testing.T) { + // ensure that the new go 1.24 NewGCMWithRandomNonce works correctly with + // the old way of creating and including the nonce + + encryptOldFn := func(plaintext []byte, passphrase []byte) (encrypted []byte, err error) { + block, _ := aes.NewCipher(passphrase) + gcm, err := cipher.NewGCM(block) + if err != nil { + return encrypted, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return encrypted, err + } + + return gcm.Seal(nonce, nonce, plaintext, nil), nil + } + + decryptOldFn := func(encrypted []byte, passphrase []byte) (plaintext []byte, err error) { + block, err := aes.NewCipher(passphrase) + if err != nil { + return encrypted, errors.Wrap(err, "Error creating cypher block") + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return encrypted, errors.Wrap(err, "Error creating GCM") + } + + nonceSize := gcm.NonceSize() + if len(encrypted) < nonceSize { + return encrypted, errEncryptedStringTooShort + } + + nonce, ciphertextByteClean := encrypted[:nonceSize], encrypted[nonceSize:] + + plaintext, err = gcm.Open(nil, nonce, ciphertextByteClean, nil) + if err != nil { + return encrypted, errors.Wrap(err, "Error decrypting text") + } + + return plaintext, err + } + + encryptNewFn := encrypt + decryptNewFn := decrypt + + passphrase := make([]byte, 32) + _, err := io.ReadFull(rand.Reader, passphrase) + require.NoError(t, err) + + junk := make([]byte, 1024) + _, err = io.ReadFull(rand.Reader, junk) + require.NoError(t, err) + + junkEnc := make([]byte, base64.StdEncoding.EncodedLen(len(junk))) + base64.StdEncoding.Encode(junkEnc, junk) + + cases := [][]byte{ + []byte("test"), + []byte("35"), + []byte("9ca4a1dd-a439-4593-b386-a7dfdc2e9fc6"), + []byte(jsonobject), + passphrase, + junk, + junkEnc, + } + + for _, plain := range cases { + var enc, dec []byte + var err error + + enc, err = encryptOldFn(plain, passphrase) + require.NoError(t, err) + + dec, err = decryptNewFn(enc, passphrase) + require.NoError(t, err) + + require.Equal(t, plain, dec) + + enc, err = encryptNewFn(plain, passphrase) + require.NoError(t, err) + + dec, err = decryptOldFn(enc, passphrase) + require.NoError(t, err) + + require.Equal(t, plain, dec) + } +} diff --git a/api/database/boltdb/tx.go b/api/database/boltdb/tx.go index 5de5d5333..2e45ac7b9 100644 --- a/api/database/boltdb/tx.go +++ b/api/database/boltdb/tx.go @@ -6,6 +6,7 @@ import ( dserrors "github.com/portainer/portainer/api/dataservices/errors" + "github.com/pkg/errors" "github.com/rs/zerolog/log" bolt "go.etcd.io/bbolt" ) @@ -31,6 +32,33 @@ func (tx *DbTransaction) GetObject(bucketName string, key []byte, object any) er return tx.conn.UnmarshalObject(value, object) } +func (tx *DbTransaction) GetRawBytes(bucketName string, key []byte) ([]byte, error) { + bucket := tx.tx.Bucket([]byte(bucketName)) + + value := bucket.Get(key) + if value == nil { + return nil, fmt.Errorf("%w (bucket=%s, key=%s)", dserrors.ErrObjectNotFound, bucketName, keyToString(key)) + } + + if tx.conn.getEncryptionKey() != nil { + var err error + + if value, err = decrypt(value, tx.conn.getEncryptionKey()); err != nil { + return value, errors.Wrap(err, "Failed decrypting object") + } + } + + return value, nil +} + +func (tx *DbTransaction) KeyExists(bucketName string, key []byte) (bool, error) { + bucket := tx.tx.Bucket([]byte(bucketName)) + + value := bucket.Get(key) + + return value != nil, nil +} + func (tx *DbTransaction) UpdateObject(bucketName string, key []byte, object any) error { data, err := tx.conn.MarshalObject(object) if err != nil { diff --git a/api/dataservices/base.go b/api/dataservices/base.go index 9b1a42a53..18839b60f 100644 --- a/api/dataservices/base.go +++ b/api/dataservices/base.go @@ -9,7 +9,8 @@ import ( type BaseCRUD[T any, I constraints.Integer] interface { Create(element *T) error Read(ID I) (*T, error) - ReadAll() ([]T, error) + Exists(ID I) (bool, error) + ReadAll(predicates ...func(T) bool) ([]T, error) Update(ID I, element *T) error Delete(ID I) error } @@ -42,12 +43,26 @@ func (service BaseDataService[T, I]) Read(ID I) (*T, error) { }) } -func (service BaseDataService[T, I]) ReadAll() ([]T, error) { +func (service BaseDataService[T, I]) Exists(ID I) (bool, error) { + var exists bool + + err := service.Connection.ViewTx(func(tx portainer.Transaction) error { + var err error + exists, err = service.Tx(tx).Exists(ID) + + return err + }) + + return exists, err +} + +// ReadAll retrieves all the elements that satisfy all the provided predicates. +func (service BaseDataService[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) { var collection = make([]T, 0) return collection, service.Connection.ViewTx(func(tx portainer.Transaction) error { var err error - collection, err = service.Tx(tx).ReadAll() + collection, err = service.Tx(tx).ReadAll(predicates...) return err }) diff --git a/api/dataservices/base_test.go b/api/dataservices/base_test.go new file mode 100644 index 000000000..e97a09963 --- /dev/null +++ b/api/dataservices/base_test.go @@ -0,0 +1,92 @@ +package dataservices + +import ( + "strconv" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/slicesx" + + "github.com/stretchr/testify/require" +) + +type testObject struct { + ID int + Value int +} + +type mockConnection struct { + store map[int]testObject + + portainer.Connection +} + +func (m mockConnection) UpdateObject(bucket string, key []byte, value interface{}) error { + obj := value.(*testObject) + + m.store[obj.ID] = *obj + + return nil +} + +func (m mockConnection) GetAll(bucketName string, obj any, appendFn func(o any) (any, error)) error { + for _, v := range m.store { + if _, err := appendFn(&v); err != nil { + return err + } + } + + return nil +} + +func (m mockConnection) UpdateTx(fn func(portainer.Transaction) error) error { + return fn(m) +} + +func (m mockConnection) ViewTx(fn func(portainer.Transaction) error) error { + return fn(m) +} + +func (m mockConnection) ConvertToKey(v int) []byte { + return []byte(strconv.Itoa(v)) +} + +func TestReadAll(t *testing.T) { + service := BaseDataService[testObject, int]{ + Bucket: "testBucket", + Connection: mockConnection{store: make(map[int]testObject)}, + } + + data := []testObject{ + {ID: 1, Value: 1}, + {ID: 2, Value: 2}, + {ID: 3, Value: 3}, + {ID: 4, Value: 4}, + {ID: 5, Value: 5}, + } + + for _, item := range data { + err := service.Update(item.ID, &item) + require.NoError(t, err) + } + + // ReadAll without predicates + result, err := service.ReadAll() + require.NoError(t, err) + + expected := append([]testObject{}, data...) + + require.ElementsMatch(t, expected, result) + + // ReadAll with predicates + hasLowID := func(obj testObject) bool { return obj.ID < 3 } + isEven := func(obj testObject) bool { return obj.Value%2 == 0 } + + result, err = service.ReadAll(hasLowID, isEven) + require.NoError(t, err) + + expected = slicesx.Filter(expected, hasLowID) + expected = slicesx.Filter(expected, isEven) + + require.ElementsMatch(t, expected, result) +} diff --git a/api/dataservices/base_tx.go b/api/dataservices/base_tx.go index db1e702cb..5d7e7eee0 100644 --- a/api/dataservices/base_tx.go +++ b/api/dataservices/base_tx.go @@ -28,13 +28,38 @@ func (service BaseDataServiceTx[T, I]) Read(ID I) (*T, error) { return &element, nil } -func (service BaseDataServiceTx[T, I]) ReadAll() ([]T, error) { +func (service BaseDataServiceTx[T, I]) Exists(ID I) (bool, error) { + identifier := service.Connection.ConvertToKey(int(ID)) + + return service.Tx.KeyExists(service.Bucket, identifier) +} + +// ReadAll retrieves all the elements that satisfy all the provided predicates. +func (service BaseDataServiceTx[T, I]) ReadAll(predicates ...func(T) bool) ([]T, error) { var collection = make([]T, 0) + if len(predicates) == 0 { + return collection, service.Tx.GetAll( + service.Bucket, + new(T), + AppendFn(&collection), + ) + } + + filterFn := func(element T) bool { + for _, p := range predicates { + if !p(element) { + return false + } + } + + return true + } + return collection, service.Tx.GetAll( service.Bucket, new(T), - AppendFn(&collection), + FilterFn(&collection, filterFn), ) } diff --git a/api/dataservices/edgegroup/tx.go b/api/dataservices/edgegroup/tx.go index 19f37e011..2fba688a6 100644 --- a/api/dataservices/edgegroup/tx.go +++ b/api/dataservices/edgegroup/tx.go @@ -17,11 +17,29 @@ func (service ServiceTx) UpdateEdgeGroupFunc(ID portainer.EdgeGroupID, updateFun } func (service ServiceTx) Create(group *portainer.EdgeGroup) error { - return service.Tx.CreateObject( + es := group.Endpoints + group.Endpoints = nil // Clear deprecated field + + err := service.Tx.CreateObject( BucketName, func(id uint64) (int, any) { group.ID = portainer.EdgeGroupID(id) return int(group.ID), group }, ) + + group.Endpoints = es // Restore endpoints after create + + return err +} + +func (service ServiceTx) Update(ID portainer.EdgeGroupID, group *portainer.EdgeGroup) error { + es := group.Endpoints + group.Endpoints = nil // Clear deprecated field + + err := service.BaseDataServiceTx.Update(ID, group) + + group.Endpoints = es // Restore endpoints after update + + return err } diff --git a/api/dataservices/edgestack/edgestack_test.go b/api/dataservices/edgestack/edgestack_test.go new file mode 100644 index 000000000..debb4652e --- /dev/null +++ b/api/dataservices/edgestack/edgestack_test.go @@ -0,0 +1,50 @@ +package edgestack + +import ( + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/database/boltdb" + + "github.com/stretchr/testify/require" +) + +func TestUpdate(t *testing.T) { + var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()} + err := conn.Open() + require.NoError(t, err) + + defer conn.Close() + + service, err := NewService(conn, func(portainer.Transaction, portainer.EdgeStackID) {}) + require.NoError(t, err) + + const edgeStackID = 1 + edgeStack := &portainer.EdgeStack{ + ID: edgeStackID, + Name: "Test Stack", + } + + err = service.Create(edgeStackID, edgeStack) + require.NoError(t, err) + + err = service.UpdateEdgeStackFunc(edgeStackID, func(edgeStack *portainer.EdgeStack) { + edgeStack.Name = "Updated Stack" + }) + require.NoError(t, err) + + updatedStack, err := service.EdgeStack(edgeStackID) + require.NoError(t, err) + require.Equal(t, "Updated Stack", updatedStack.Name) + + err = conn.UpdateTx(func(tx portainer.Transaction) error { + return service.UpdateEdgeStackFuncTx(tx, edgeStackID, func(edgeStack *portainer.EdgeStack) { + edgeStack.Name = "Updated Stack Again" + }) + }) + require.NoError(t, err) + + updatedStack, err = service.EdgeStack(edgeStackID) + require.NoError(t, err) + require.Equal(t, "Updated Stack Again", updatedStack.Name) +} diff --git a/api/dataservices/edgestackstatus/edgestackstatus.go b/api/dataservices/edgestackstatus/edgestackstatus.go new file mode 100644 index 000000000..7d063ba49 --- /dev/null +++ b/api/dataservices/edgestackstatus/edgestackstatus.go @@ -0,0 +1,89 @@ +package edgestackstatus + +import ( + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" +) + +var _ dataservices.EdgeStackStatusService = &Service{} + +const BucketName = "edge_stack_status" + +type Service struct { + conn portainer.Connection +} + +func (service *Service) BucketName() string { + return BucketName +} + +func NewService(connection portainer.Connection) (*Service, error) { + if err := connection.SetServiceName(BucketName); err != nil { + return nil, err + } + + return &Service{conn: connection}, nil +} + +func (s *Service) Tx(tx portainer.Transaction) ServiceTx { + return ServiceTx{ + service: s, + tx: tx, + } +} + +func (s *Service) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error { + return s.conn.UpdateTx(func(tx portainer.Transaction) error { + return s.Tx(tx).Create(edgeStackID, endpointID, status) + }) +} + +func (s *Service) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) { + var element *portainer.EdgeStackStatusForEnv + + return element, s.conn.ViewTx(func(tx portainer.Transaction) error { + var err error + element, err = s.Tx(tx).Read(edgeStackID, endpointID) + + return err + }) +} + +func (s *Service) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) { + var collection = make([]portainer.EdgeStackStatusForEnv, 0) + + return collection, s.conn.ViewTx(func(tx portainer.Transaction) error { + var err error + collection, err = s.Tx(tx).ReadAll(edgeStackID) + + return err + }) +} + +func (s *Service) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error { + return s.conn.UpdateTx(func(tx portainer.Transaction) error { + return s.Tx(tx).Update(edgeStackID, endpointID, status) + }) +} + +func (s *Service) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error { + return s.conn.UpdateTx(func(tx portainer.Transaction) error { + return s.Tx(tx).Delete(edgeStackID, endpointID) + }) +} + +func (s *Service) DeleteAll(edgeStackID portainer.EdgeStackID) error { + return s.conn.UpdateTx(func(tx portainer.Transaction) error { + return s.Tx(tx).DeleteAll(edgeStackID) + }) +} + +func (s *Service) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error { + return s.conn.UpdateTx(func(tx portainer.Transaction) error { + return s.Tx(tx).Clear(edgeStackID, relatedEnvironmentsIDs) + }) +} + +func (s *Service) key(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) []byte { + return append(s.conn.ConvertToKey(int(edgeStackID)), s.conn.ConvertToKey(int(endpointID))...) +} diff --git a/api/dataservices/edgestackstatus/tx.go b/api/dataservices/edgestackstatus/tx.go new file mode 100644 index 000000000..b0dc14856 --- /dev/null +++ b/api/dataservices/edgestackstatus/tx.go @@ -0,0 +1,95 @@ +package edgestackstatus + +import ( + "fmt" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" +) + +var _ dataservices.EdgeStackStatusService = &Service{} + +type ServiceTx struct { + service *Service + tx portainer.Transaction +} + +func (service ServiceTx) Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error { + identifier := service.service.key(edgeStackID, endpointID) + return service.tx.CreateObjectWithStringId(BucketName, identifier, status) +} + +func (s ServiceTx) Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) { + var status portainer.EdgeStackStatusForEnv + identifier := s.service.key(edgeStackID, endpointID) + + if err := s.tx.GetObject(BucketName, identifier, &status); err != nil { + return nil, err + } + + return &status, nil +} + +func (s ServiceTx) ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) { + keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID)) + + statuses := make([]portainer.EdgeStackStatusForEnv, 0) + + if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil { + return nil, fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err) + } + + return statuses, nil +} + +func (s ServiceTx) Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error { + identifier := s.service.key(edgeStackID, endpointID) + return s.tx.UpdateObject(BucketName, identifier, status) +} + +func (s ServiceTx) Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error { + identifier := s.service.key(edgeStackID, endpointID) + return s.tx.DeleteObject(BucketName, identifier) +} + +func (s ServiceTx) DeleteAll(edgeStackID portainer.EdgeStackID) error { + keyPrefix := s.service.conn.ConvertToKey(int(edgeStackID)) + + statuses := make([]portainer.EdgeStackStatusForEnv, 0) + + if err := s.tx.GetAllWithKeyPrefix(BucketName, keyPrefix, &portainer.EdgeStackStatusForEnv{}, dataservices.AppendFn(&statuses)); err != nil { + return fmt.Errorf("unable to retrieve EdgeStackStatus for EdgeStack %d: %w", edgeStackID, err) + } + + for _, status := range statuses { + if err := s.tx.DeleteObject(BucketName, s.service.key(edgeStackID, status.EndpointID)); err != nil { + return fmt.Errorf("unable to delete EdgeStackStatus for EdgeStack %d and Endpoint %d: %w", edgeStackID, status.EndpointID, err) + } + } + + return nil +} + +func (s ServiceTx) Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error { + for _, envID := range relatedEnvironmentsIDs { + existingStatus, err := s.Read(edgeStackID, envID) + if err != nil && !dataservices.IsErrObjectNotFound(err) { + return fmt.Errorf("unable to retrieve status for environment %d: %w", envID, err) + } + + var deploymentInfo portainer.StackDeploymentInfo + if existingStatus != nil { + deploymentInfo = existingStatus.DeploymentInfo + } + + if err := s.Update(edgeStackID, envID, &portainer.EdgeStackStatusForEnv{ + EndpointID: envID, + Status: []portainer.EdgeStackDeploymentStatus{}, + DeploymentInfo: deploymentInfo, + }); err != nil { + return err + } + } + + return nil +} diff --git a/api/dataservices/endpointrelation/endpointrelation.go b/api/dataservices/endpointrelation/endpointrelation.go index 4b7ff6b82..91c00f05a 100644 --- a/api/dataservices/endpointrelation/endpointrelation.go +++ b/api/dataservices/endpointrelation/endpointrelation.go @@ -6,8 +6,6 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/internal/edge/cache" - - "github.com/rs/zerolog/log" ) // BucketName represents the name of the bucket where this service stores data. @@ -16,21 +14,20 @@ const BucketName = "endpoint_relations" // Service represents a service for managing environment(endpoint) relation data. type Service struct { connection portainer.Connection - updateStackFn func(ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error updateStackFnTx func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error endpointRelationsCache []portainer.EndpointRelation mu sync.Mutex } +var _ dataservices.EndpointRelationService = &Service{} + func (service *Service) BucketName() string { return BucketName } func (service *Service) RegisterUpdateStackFunction( - updateFunc func(portainer.EdgeStackID, func(*portainer.EdgeStack)) error, updateFuncTx func(portainer.Transaction, portainer.EdgeStackID, func(*portainer.EdgeStack)) error, ) { - service.updateStackFn = updateFunc service.updateStackFnTx = updateFuncTx } @@ -89,94 +86,26 @@ func (service *Service) Create(endpointRelation *portainer.EndpointRelation) err // UpdateEndpointRelation updates an Environment(Endpoint) relation object func (service *Service) UpdateEndpointRelation(endpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error { - previousRelationState, _ := service.EndpointRelation(endpointID) + return service.connection.UpdateTx(func(tx portainer.Transaction) error { + return service.Tx(tx).UpdateEndpointRelation(endpointID, endpointRelation) + }) +} - identifier := service.connection.ConvertToKey(int(endpointID)) - err := service.connection.UpdateObject(BucketName, identifier, endpointRelation) - cache.Del(endpointID) - if err != nil { - return err - } +func (service *Service) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error { + return service.connection.UpdateTx(func(tx portainer.Transaction) error { + return service.Tx(tx).AddEndpointRelationsForEdgeStack(endpointIDs, edgeStackID) + }) +} - updatedRelationState, _ := service.EndpointRelation(endpointID) - - service.mu.Lock() - service.endpointRelationsCache = nil - service.mu.Unlock() - - service.updateEdgeStacksAfterRelationChange(previousRelationState, updatedRelationState) - - return nil +func (service *Service) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error { + return service.connection.UpdateTx(func(tx portainer.Transaction) error { + return service.Tx(tx).RemoveEndpointRelationsForEdgeStack(endpointIDs, edgeStackID) + }) } // DeleteEndpointRelation deletes an Environment(Endpoint) relation object func (service *Service) DeleteEndpointRelation(endpointID portainer.EndpointID) error { - deletedRelation, _ := service.EndpointRelation(endpointID) - - identifier := service.connection.ConvertToKey(int(endpointID)) - err := service.connection.DeleteObject(BucketName, identifier) - cache.Del(endpointID) - if err != nil { - return err - } - - service.mu.Lock() - service.endpointRelationsCache = nil - service.mu.Unlock() - - service.updateEdgeStacksAfterRelationChange(deletedRelation, nil) - - return nil -} - -func (service *Service) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) { - relations, _ := service.EndpointRelations() - - stacksToUpdate := map[portainer.EdgeStackID]bool{} - - if previousRelationState != nil { - for stackId, enabled := range previousRelationState.EdgeStacks { - // flag stack for update if stack is not in the updated relation state - // = stack has been removed for this relation - // or this relation has been deleted - if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) { - stacksToUpdate[stackId] = true - } - } - } - - if updatedRelationState != nil { - for stackId, enabled := range updatedRelationState.EdgeStacks { - // flag stack for update if stack is not in the previous relation state - // = stack has been added for this relation - if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) { - stacksToUpdate[stackId] = true - } - } - } - - // for each stack referenced by the updated relation - // list how many time this stack is referenced in all relations - // in order to update the stack deployments count - for refStackId, refStackEnabled := range stacksToUpdate { - if !refStackEnabled { - continue - } - - numDeployments := 0 - - for _, r := range relations { - for sId, enabled := range r.EdgeStacks { - if enabled && sId == refStackId { - numDeployments += 1 - } - } - } - - if err := service.updateStackFn(refStackId, func(edgeStack *portainer.EdgeStack) { - edgeStack.NumDeployments = numDeployments - }); err != nil { - log.Error().Err(err).Msg("could not update the number of deployments") - } - } + return service.connection.UpdateTx(func(tx portainer.Transaction) error { + return service.Tx(tx).DeleteEndpointRelation(endpointID) + }) } diff --git a/api/dataservices/endpointrelation/endpointrelation_test.go b/api/dataservices/endpointrelation/endpointrelation_test.go new file mode 100644 index 000000000..f1ead0919 --- /dev/null +++ b/api/dataservices/endpointrelation/endpointrelation_test.go @@ -0,0 +1,104 @@ +package endpointrelation + +import ( + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/database/boltdb" + "github.com/portainer/portainer/api/internal/edge/cache" + + "github.com/stretchr/testify/require" +) + +func TestUpdateRelation(t *testing.T) { + const endpointID = 1 + const edgeStackID1 = 1 + const edgeStackID2 = 2 + + var conn portainer.Connection = &boltdb.DbConnection{Path: t.TempDir()} + err := conn.Open() + require.NoError(t, err) + + defer conn.Close() + + service, err := NewService(conn) + require.NoError(t, err) + + updateStackFnTxCalled := false + + edgeStacks := make(map[portainer.EdgeStackID]portainer.EdgeStack) + edgeStacks[edgeStackID1] = portainer.EdgeStack{ID: edgeStackID1} + edgeStacks[edgeStackID2] = portainer.EdgeStack{ID: edgeStackID2} + + service.RegisterUpdateStackFunction(func(tx portainer.Transaction, ID portainer.EdgeStackID, updateFunc func(edgeStack *portainer.EdgeStack)) error { + updateStackFnTxCalled = true + + s, ok := edgeStacks[ID] + require.True(t, ok) + + updateFunc(&s) + edgeStacks[ID] = s + + return nil + }) + + // Nil relation + + cache.Set(endpointID, []byte("value")) + + err = service.UpdateEndpointRelation(endpointID, nil) + _, cacheKeyExists := cache.Get(endpointID) + require.NoError(t, err) + require.False(t, updateStackFnTxCalled) + require.False(t, cacheKeyExists) + + // Add a relation to two edge stacks + + cache.Set(endpointID, []byte("value")) + + err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{ + EndpointID: endpointID, + EdgeStacks: map[portainer.EdgeStackID]bool{ + edgeStackID1: true, + edgeStackID2: true, + }, + }) + _, cacheKeyExists = cache.Get(endpointID) + require.NoError(t, err) + require.True(t, updateStackFnTxCalled) + require.False(t, cacheKeyExists) + require.Equal(t, 1, edgeStacks[edgeStackID1].NumDeployments) + require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments) + + // Remove a relation to one edge stack + + updateStackFnTxCalled = false + cache.Set(endpointID, []byte("value")) + + err = service.UpdateEndpointRelation(endpointID, &portainer.EndpointRelation{ + EndpointID: endpointID, + EdgeStacks: map[portainer.EdgeStackID]bool{ + 2: true, + }, + }) + _, cacheKeyExists = cache.Get(endpointID) + require.NoError(t, err) + require.True(t, updateStackFnTxCalled) + require.False(t, cacheKeyExists) + require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments) + require.Equal(t, 1, edgeStacks[edgeStackID2].NumDeployments) + + // Delete the relation + + updateStackFnTxCalled = false + cache.Set(endpointID, []byte("value")) + + err = service.DeleteEndpointRelation(endpointID) + + _, cacheKeyExists = cache.Get(endpointID) + require.NoError(t, err) + require.True(t, updateStackFnTxCalled) + require.False(t, cacheKeyExists) + require.Equal(t, 0, edgeStacks[edgeStackID1].NumDeployments) + require.Equal(t, 0, edgeStacks[edgeStackID2].NumDeployments) +} diff --git a/api/dataservices/endpointrelation/tx.go b/api/dataservices/endpointrelation/tx.go index 097748767..54e66a31b 100644 --- a/api/dataservices/endpointrelation/tx.go +++ b/api/dataservices/endpointrelation/tx.go @@ -13,6 +13,8 @@ type ServiceTx struct { tx portainer.Transaction } +var _ dataservices.EndpointRelationService = &ServiceTx{} + func (service ServiceTx) BucketName() string { return BucketName } @@ -74,6 +76,66 @@ func (service ServiceTx) UpdateEndpointRelation(endpointID portainer.EndpointID, return nil } +func (service ServiceTx) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error { + for _, endpointID := range endpointIDs { + rel, err := service.EndpointRelation(endpointID) + if err != nil { + return err + } + + rel.EdgeStacks[edgeStackID] = true + + identifier := service.service.connection.ConvertToKey(int(endpointID)) + err = service.tx.UpdateObject(BucketName, identifier, rel) + cache.Del(endpointID) + if err != nil { + return err + } + } + + service.service.mu.Lock() + service.service.endpointRelationsCache = nil + service.service.mu.Unlock() + + if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) { + edgeStack.NumDeployments += len(endpointIDs) + }); err != nil { + log.Error().Err(err).Msg("could not update the number of deployments") + } + + return nil +} + +func (service ServiceTx) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error { + for _, endpointID := range endpointIDs { + rel, err := service.EndpointRelation(endpointID) + if err != nil { + return err + } + + delete(rel.EdgeStacks, edgeStackID) + + identifier := service.service.connection.ConvertToKey(int(endpointID)) + err = service.tx.UpdateObject(BucketName, identifier, rel) + cache.Del(endpointID) + if err != nil { + return err + } + } + + service.service.mu.Lock() + service.service.endpointRelationsCache = nil + service.service.mu.Unlock() + + if err := service.service.updateStackFnTx(service.tx, edgeStackID, func(edgeStack *portainer.EdgeStack) { + edgeStack.NumDeployments -= len(endpointIDs) + }); err != nil { + log.Error().Err(err).Msg("could not update the number of deployments") + } + + return nil +} + // DeleteEndpointRelation deletes an Environment(Endpoint) relation object func (service ServiceTx) DeleteEndpointRelation(endpointID portainer.EndpointID) error { deletedRelation, _ := service.EndpointRelation(endpointID) @@ -124,53 +186,49 @@ func (service ServiceTx) cachedEndpointRelations() ([]portainer.EndpointRelation } func (service ServiceTx) updateEdgeStacksAfterRelationChange(previousRelationState *portainer.EndpointRelation, updatedRelationState *portainer.EndpointRelation) { - relations, _ := service.EndpointRelations() - - stacksToUpdate := map[portainer.EdgeStackID]bool{} - if previousRelationState != nil { for stackId, enabled := range previousRelationState.EdgeStacks { // flag stack for update if stack is not in the updated relation state // = stack has been removed for this relation // or this relation has been deleted if enabled && (updatedRelationState == nil || !updatedRelationState.EdgeStacks[stackId]) { - stacksToUpdate[stackId] = true - } - } - } + if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) { + // Sanity check + if edgeStack.NumDeployments <= 0 { + log.Error(). + Int("edgestack_id", int(edgeStack.ID)). + Int("endpoint_id", int(previousRelationState.EndpointID)). + Int("num_deployments", edgeStack.NumDeployments). + Msg("cannot decrement the number of deployments for an edge stack with zero deployments") - if updatedRelationState != nil { - for stackId, enabled := range updatedRelationState.EdgeStacks { - // flag stack for update if stack is not in the previous relation state - // = stack has been added for this relation - if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) { - stacksToUpdate[stackId] = true - } - } - } + return + } - // for each stack referenced by the updated relation - // list how many time this stack is referenced in all relations - // in order to update the stack deployments count - for refStackId, refStackEnabled := range stacksToUpdate { - if !refStackEnabled { - continue - } - - numDeployments := 0 - - for _, r := range relations { - for sId, enabled := range r.EdgeStacks { - if enabled && sId == refStackId { - numDeployments += 1 + edgeStack.NumDeployments-- + }); err != nil { + log.Error().Err(err).Msg("could not update the number of deployments") } + + cache.Del(previousRelationState.EndpointID) } } + } - if err := service.service.updateStackFnTx(service.tx, refStackId, func(edgeStack *portainer.EdgeStack) { - edgeStack.NumDeployments = numDeployments - }); err != nil { - log.Error().Err(err).Msg("could not update the number of deployments") + if updatedRelationState == nil { + return + } + + for stackId, enabled := range updatedRelationState.EdgeStacks { + // flag stack for update if stack is not in the previous relation state + // = stack has been added for this relation + if enabled && (previousRelationState == nil || !previousRelationState.EdgeStacks[stackId]) { + if err := service.service.updateStackFnTx(service.tx, stackId, func(edgeStack *portainer.EdgeStack) { + edgeStack.NumDeployments++ + }); err != nil { + log.Error().Err(err).Msg("could not update the number of deployments") + } + + cache.Del(updatedRelationState.EndpointID) } } } diff --git a/api/dataservices/interface.go b/api/dataservices/interface.go index 1efef4f70..d330d4959 100644 --- a/api/dataservices/interface.go +++ b/api/dataservices/interface.go @@ -12,6 +12,7 @@ type ( EdgeGroup() EdgeGroupService EdgeJob() EdgeJobService EdgeStack() EdgeStackService + EdgeStackStatus() EdgeStackStatusService Endpoint() EndpointService EndpointGroup() EndpointGroupService EndpointRelation() EndpointRelationService @@ -39,8 +40,8 @@ type ( Open() (newStore bool, err error) Init() error Close() error - UpdateTx(func(DataStoreTx) error) error - ViewTx(func(DataStoreTx) error) error + UpdateTx(func(tx DataStoreTx) error) error + ViewTx(func(tx DataStoreTx) error) error MigrateData() error Rollback(force bool) error CheckCurrentEdition() error @@ -89,6 +90,16 @@ type ( BucketName() string } + EdgeStackStatusService interface { + Create(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error + Read(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) (*portainer.EdgeStackStatusForEnv, error) + ReadAll(edgeStackID portainer.EdgeStackID) ([]portainer.EdgeStackStatusForEnv, error) + Update(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID, status *portainer.EdgeStackStatusForEnv) error + Delete(edgeStackID portainer.EdgeStackID, endpointID portainer.EndpointID) error + DeleteAll(edgeStackID portainer.EdgeStackID) error + Clear(edgeStackID portainer.EdgeStackID, relatedEnvironmentsIDs []portainer.EndpointID) error + } + // EndpointService represents a service for managing environment(endpoint) data EndpointService interface { Endpoint(ID portainer.EndpointID) (*portainer.Endpoint, error) @@ -115,6 +126,8 @@ type ( EndpointRelation(EndpointID portainer.EndpointID) (*portainer.EndpointRelation, error) Create(endpointRelation *portainer.EndpointRelation) error UpdateEndpointRelation(EndpointID portainer.EndpointID, endpointRelation *portainer.EndpointRelation) error + AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error + RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error DeleteEndpointRelation(EndpointID portainer.EndpointID) error BucketName() string } @@ -157,6 +170,7 @@ type ( SnapshotService interface { BaseCRUD[portainer.Snapshot, portainer.EndpointID] + ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) } // SSLSettingsService represents a service for managing application settings diff --git a/api/dataservices/snapshot/snapshot.go b/api/dataservices/snapshot/snapshot.go index 1f9cd5f9f..c0066317d 100644 --- a/api/dataservices/snapshot/snapshot.go +++ b/api/dataservices/snapshot/snapshot.go @@ -38,3 +38,33 @@ func (service *Service) Tx(tx portainer.Transaction) ServiceTx { func (service *Service) Create(snapshot *portainer.Snapshot) error { return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) } + +func (service *Service) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) { + var snapshot *portainer.Snapshot + + err := service.Connection.ViewTx(func(tx portainer.Transaction) error { + var err error + snapshot, err = service.Tx(tx).ReadWithoutSnapshotRaw(ID) + + return err + }) + + return snapshot, err +} + +func (service *Service) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) { + var snapshot *portainer.SnapshotRawMessage + + err := service.Connection.ViewTx(func(tx portainer.Transaction) error { + var err error + snapshot, err = service.Tx(tx).ReadRawMessage(ID) + + return err + }) + + return snapshot, err +} + +func (service *Service) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error { + return service.Connection.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) +} diff --git a/api/dataservices/snapshot/tx.go b/api/dataservices/snapshot/tx.go index c93a747d3..45d1df9fc 100644 --- a/api/dataservices/snapshot/tx.go +++ b/api/dataservices/snapshot/tx.go @@ -12,3 +12,42 @@ type ServiceTx struct { func (service ServiceTx) Create(snapshot *portainer.Snapshot) error { return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) } + +func (service ServiceTx) ReadWithoutSnapshotRaw(ID portainer.EndpointID) (*portainer.Snapshot, error) { + var snapshot struct { + Docker *struct { + X struct{} `json:"DockerSnapshotRaw"` + *portainer.DockerSnapshot + } `json:"Docker"` + + portainer.Snapshot + } + + identifier := service.Connection.ConvertToKey(int(ID)) + + if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil { + return nil, err + } + + if snapshot.Docker != nil { + snapshot.Snapshot.Docker = snapshot.Docker.DockerSnapshot + } + + return &snapshot.Snapshot, nil +} + +func (service ServiceTx) ReadRawMessage(ID portainer.EndpointID) (*portainer.SnapshotRawMessage, error) { + var snapshot = portainer.SnapshotRawMessage{} + + identifier := service.Connection.ConvertToKey(int(ID)) + + if err := service.Tx.GetObject(service.Bucket, identifier, &snapshot); err != nil { + return nil, err + } + + return &snapshot, nil +} + +func (service ServiceTx) CreateRawMessage(snapshot *portainer.SnapshotRawMessage) error { + return service.Tx.CreateObjectWithId(BucketName, int(snapshot.EndpointID), snapshot) +} diff --git a/api/datastore/migrate_data.go b/api/datastore/migrate_data.go index 8047274d1..2b53bbb9c 100644 --- a/api/datastore/migrate_data.go +++ b/api/datastore/migrate_data.go @@ -40,13 +40,11 @@ func (store *Store) MigrateData() error { } // before we alter anything in the DB, create a backup - _, err = store.Backup("") - if err != nil { + if _, err := store.Backup(""); err != nil { return errors.Wrap(err, "while backing up database") } - err = store.FailSafeMigrate(migrator, version) - if err != nil { + if err := store.FailSafeMigrate(migrator, version); err != nil { err = errors.Wrap(err, "failed to migrate database") log.Warn().Err(err).Msg("migration failed, restoring database to previous version") @@ -85,7 +83,9 @@ func (store *Store) newMigratorParameters(version *models.Version, flags *portai DockerhubService: store.DockerHubService, AuthorizationService: authorization.NewService(store), EdgeStackService: store.EdgeStackService, + EdgeStackStatusService: store.EdgeStackStatusService, EdgeJobService: store.EdgeJobService, + EdgeGroupService: store.EdgeGroupService, TunnelServerService: store.TunnelServerService, PendingActionsService: store.PendingActionsService, } @@ -140,8 +140,7 @@ func (store *Store) connectionRollback(force bool) error { } } - err := store.Restore() - if err != nil { + if err := store.Restore(); err != nil { return err } diff --git a/api/datastore/migrator/migrate_2_31_0.go b/api/datastore/migrator/migrate_2_31_0.go new file mode 100644 index 000000000..7afea9802 --- /dev/null +++ b/api/datastore/migrator/migrate_2_31_0.go @@ -0,0 +1,31 @@ +package migrator + +import portainer "github.com/portainer/portainer/api" + +func (m *Migrator) migrateEdgeStacksStatuses_2_31_0() error { + edgeStacks, err := m.edgeStackService.EdgeStacks() + if err != nil { + return err + } + + for _, edgeStack := range edgeStacks { + for envID, status := range edgeStack.Status { + if err := m.edgeStackStatusService.Create(edgeStack.ID, envID, &portainer.EdgeStackStatusForEnv{ + EndpointID: envID, + Status: status.Status, + DeploymentInfo: status.DeploymentInfo, + ReadyRePullImage: status.ReadyRePullImage, + }); err != nil { + return err + } + } + + edgeStack.Status = nil + + if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil { + return err + } + } + + return nil +} diff --git a/api/datastore/migrator/migrate_2_32_0.go b/api/datastore/migrator/migrate_2_32_0.go new file mode 100644 index 000000000..c32a63cad --- /dev/null +++ b/api/datastore/migrator/migrate_2_32_0.go @@ -0,0 +1,33 @@ +package migrator + +import ( + "github.com/pkg/errors" + portainer "github.com/portainer/portainer/api" + perrors "github.com/portainer/portainer/api/dataservices/errors" + "github.com/portainer/portainer/api/internal/endpointutils" +) + +func (m *Migrator) addEndpointRelationForEdgeAgents_2_32_0() error { + endpoints, err := m.endpointService.Endpoints() + if err != nil { + return err + } + + for _, endpoint := range endpoints { + if endpointutils.IsEdgeEndpoint(&endpoint) { + _, err := m.endpointRelationService.EndpointRelation(endpoint.ID) + if err != nil && errors.Is(err, perrors.ErrObjectNotFound) { + relation := &portainer.EndpointRelation{ + EndpointID: endpoint.ID, + EdgeStacks: make(map[portainer.EdgeStackID]bool), + } + + if err := m.endpointRelationService.Create(relation); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/api/datastore/migrator/migrate_2_33_0.go b/api/datastore/migrator/migrate_2_33_0.go new file mode 100644 index 000000000..f000a780a --- /dev/null +++ b/api/datastore/migrator/migrate_2_33_0.go @@ -0,0 +1,23 @@ +package migrator + +import ( + "github.com/portainer/portainer/api/roar" +) + +func (m *Migrator) migrateEdgeGroupEndpointsToRoars_2_33_0() error { + egs, err := m.edgeGroupService.ReadAll() + if err != nil { + return err + } + + for _, eg := range egs { + eg.EndpointIDs = roar.FromSlice(eg.Endpoints) + eg.Endpoints = nil + + if err := m.edgeGroupService.Update(eg.ID, &eg); err != nil { + return err + } + } + + return nil +} diff --git a/api/datastore/migrator/migrate_dbversion100.go b/api/datastore/migrator/migrate_dbversion100.go index 458c10c95..59b15b08d 100644 --- a/api/datastore/migrator/migrate_dbversion100.go +++ b/api/datastore/migrator/migrate_dbversion100.go @@ -94,6 +94,10 @@ func (m *Migrator) updateEdgeStackStatusForDB100() error { continue } + if environmentStatus.Details == nil { + continue + } + statusArray := []portainer.EdgeStackDeploymentStatus{} if environmentStatus.Details.Pending { statusArray = append(statusArray, portainer.EdgeStackDeploymentStatus{ diff --git a/api/datastore/migrator/migrate_dbversion20.go b/api/datastore/migrator/migrate_dbversion20.go index 1f02b8885..bad999c26 100644 --- a/api/datastore/migrator/migrate_dbversion20.go +++ b/api/datastore/migrator/migrate_dbversion20.go @@ -18,8 +18,7 @@ func (m *Migrator) updateResourceControlsToDBVersion22() error { for _, resourceControl := range legacyResourceControls { resourceControl.AdministratorsOnly = false - err := m.resourceControlService.Update(resourceControl.ID, &resourceControl) - if err != nil { + if err := m.resourceControlService.Update(resourceControl.ID, &resourceControl); err != nil { return err } } @@ -42,8 +41,8 @@ func (m *Migrator) updateUsersAndRolesToDBVersion22() error { for _, user := range legacyUsers { user.PortainerAuthorizations = authorization.DefaultPortainerAuthorizations() - err = m.userService.Update(user.ID, &user) - if err != nil { + + if err := m.userService.Update(user.ID, &user); err != nil { return err } } @@ -52,38 +51,47 @@ func (m *Migrator) updateUsersAndRolesToDBVersion22() error { if err != nil { return err } + endpointAdministratorRole.Priority = 1 endpointAdministratorRole.Authorizations = authorization.DefaultEndpointAuthorizationsForEndpointAdministratorRole() - err = m.roleService.Update(endpointAdministratorRole.ID, endpointAdministratorRole) + if err := m.roleService.Update(endpointAdministratorRole.ID, endpointAdministratorRole); err != nil { + return err + } helpDeskRole, err := m.roleService.Read(portainer.RoleID(2)) if err != nil { return err } + helpDeskRole.Priority = 2 helpDeskRole.Authorizations = authorization.DefaultEndpointAuthorizationsForHelpDeskRole(settings.AllowVolumeBrowserForRegularUsers) - err = m.roleService.Update(helpDeskRole.ID, helpDeskRole) + if err := m.roleService.Update(helpDeskRole.ID, helpDeskRole); err != nil { + return err + } standardUserRole, err := m.roleService.Read(portainer.RoleID(3)) if err != nil { return err } + standardUserRole.Priority = 3 standardUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForStandardUserRole(settings.AllowVolumeBrowserForRegularUsers) - err = m.roleService.Update(standardUserRole.ID, standardUserRole) + if err := m.roleService.Update(standardUserRole.ID, standardUserRole); err != nil { + return err + } readOnlyUserRole, err := m.roleService.Read(portainer.RoleID(4)) if err != nil { return err } + readOnlyUserRole.Priority = 4 readOnlyUserRole.Authorizations = authorization.DefaultEndpointAuthorizationsForReadOnlyUserRole(settings.AllowVolumeBrowserForRegularUsers) - err = m.roleService.Update(readOnlyUserRole.ID, readOnlyUserRole) - if err != nil { + if err := m.roleService.Update(readOnlyUserRole.ID, readOnlyUserRole); err != nil { return err } diff --git a/api/datastore/migrator/migrate_dbversion80.go b/api/datastore/migrator/migrate_dbversion80.go index 77671745e..a738e5177 100644 --- a/api/datastore/migrator/migrate_dbversion80.go +++ b/api/datastore/migrator/migrate_dbversion80.go @@ -75,6 +75,10 @@ func (m *Migrator) updateEdgeStackStatusForDB80() error { for _, edgeStack := range edgeStacks { for endpointId, status := range edgeStack.Status { + if status.Details == nil { + status.Details = &portainer.EdgeStackStatusDetails{} + } + switch status.Type { case portainer.EdgeStackStatusPending: status.Details.Pending = true @@ -93,10 +97,10 @@ func (m *Migrator) updateEdgeStackStatusForDB80() error { edgeStack.Status[endpointId] = status } - err = m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack) - if err != nil { + if err := m.edgeStackService.UpdateEdgeStack(edgeStack.ID, &edgeStack); err != nil { return err } } + return nil } diff --git a/api/datastore/migrator/migrator.go b/api/datastore/migrator/migrator.go index dc92006ad..df27cc0cd 100644 --- a/api/datastore/migrator/migrator.go +++ b/api/datastore/migrator/migrator.go @@ -3,12 +3,13 @@ package migrator import ( "errors" - "github.com/Masterminds/semver" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/database/models" "github.com/portainer/portainer/api/dataservices/dockerhub" + "github.com/portainer/portainer/api/dataservices/edgegroup" "github.com/portainer/portainer/api/dataservices/edgejob" "github.com/portainer/portainer/api/dataservices/edgestack" + "github.com/portainer/portainer/api/dataservices/edgestackstatus" "github.com/portainer/portainer/api/dataservices/endpoint" "github.com/portainer/portainer/api/dataservices/endpointgroup" "github.com/portainer/portainer/api/dataservices/endpointrelation" @@ -27,6 +28,8 @@ import ( "github.com/portainer/portainer/api/dataservices/user" "github.com/portainer/portainer/api/dataservices/version" "github.com/portainer/portainer/api/internal/authorization" + + "github.com/Masterminds/semver" "github.com/rs/zerolog/log" ) @@ -56,7 +59,9 @@ type ( authorizationService *authorization.Service dockerhubService *dockerhub.Service edgeStackService *edgestack.Service + edgeStackStatusService *edgestackstatus.Service edgeJobService *edgejob.Service + edgeGroupService *edgegroup.Service TunnelServerService *tunnelserver.Service pendingActionsService *pendingactions.Service } @@ -84,7 +89,9 @@ type ( AuthorizationService *authorization.Service DockerhubService *dockerhub.Service EdgeStackService *edgestack.Service + EdgeStackStatusService *edgestackstatus.Service EdgeJobService *edgejob.Service + EdgeGroupService *edgegroup.Service TunnelServerService *tunnelserver.Service PendingActionsService *pendingactions.Service } @@ -114,12 +121,15 @@ func NewMigrator(parameters *MigratorParameters) *Migrator { authorizationService: parameters.AuthorizationService, dockerhubService: parameters.DockerhubService, edgeStackService: parameters.EdgeStackService, + edgeStackStatusService: parameters.EdgeStackStatusService, edgeJobService: parameters.EdgeJobService, + edgeGroupService: parameters.EdgeGroupService, TunnelServerService: parameters.TunnelServerService, pendingActionsService: parameters.PendingActionsService, } migrator.initMigrations() + return migrator } @@ -242,6 +252,12 @@ func (m *Migrator) initMigrations() { m.migratePendingActionsDataForDB130, ) + m.addMigrations("2.31.0", m.migrateEdgeStacksStatuses_2_31_0) + + m.addMigrations("2.32.0", m.addEndpointRelationForEdgeAgents_2_32_0) + + m.addMigrations("2.33.0", m.migrateEdgeGroupEndpointsToRoars_2_33_0) + // Add new migrations above... // One function per migration, each versions migration funcs in the same file. } diff --git a/api/datastore/services.go b/api/datastore/services.go index b5363afe9..7413d6c03 100644 --- a/api/datastore/services.go +++ b/api/datastore/services.go @@ -13,6 +13,7 @@ import ( "github.com/portainer/portainer/api/dataservices/edgegroup" "github.com/portainer/portainer/api/dataservices/edgejob" "github.com/portainer/portainer/api/dataservices/edgestack" + "github.com/portainer/portainer/api/dataservices/edgestackstatus" "github.com/portainer/portainer/api/dataservices/endpoint" "github.com/portainer/portainer/api/dataservices/endpointgroup" "github.com/portainer/portainer/api/dataservices/endpointrelation" @@ -39,6 +40,8 @@ import ( "github.com/segmentio/encoding/json" ) +var _ dataservices.DataStore = &Store{} + // Store defines the implementation of portainer.DataStore using // BoltDB as the storage system. type Store struct { @@ -51,6 +54,7 @@ type Store struct { EdgeGroupService *edgegroup.Service EdgeJobService *edgejob.Service EdgeStackService *edgestack.Service + EdgeStackStatusService *edgestackstatus.Service EndpointGroupService *endpointgroup.Service EndpointService *endpoint.Service EndpointRelationService *endpointrelation.Service @@ -107,7 +111,13 @@ func (store *Store) initServices() error { return err } store.EdgeStackService = edgeStackService - endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFunc, edgeStackService.UpdateEdgeStackFuncTx) + endpointRelationService.RegisterUpdateStackFunction(edgeStackService.UpdateEdgeStackFuncTx) + + edgeStackStatusService, err := edgestackstatus.NewService(store.connection) + if err != nil { + return err + } + store.EdgeStackStatusService = edgeStackStatusService edgeGroupService, err := edgegroup.NewService(store.connection) if err != nil { @@ -269,6 +279,10 @@ func (store *Store) EdgeStack() dataservices.EdgeStackService { return store.EdgeStackService } +func (store *Store) EdgeStackStatus() dataservices.EdgeStackStatusService { + return store.EdgeStackStatusService +} + // Environment(Endpoint) gives access to the Environment(Endpoint) data management layer func (store *Store) Endpoint() dataservices.EndpointService { return store.EndpointService diff --git a/api/datastore/services_tx.go b/api/datastore/services_tx.go index ddedf20cc..cf9f868f4 100644 --- a/api/datastore/services_tx.go +++ b/api/datastore/services_tx.go @@ -32,6 +32,10 @@ func (tx *StoreTx) EdgeStack() dataservices.EdgeStackService { return tx.store.EdgeStackService.Tx(tx.tx) } +func (tx *StoreTx) EdgeStackStatus() dataservices.EdgeStackStatusService { + return tx.store.EdgeStackStatusService.Tx(tx.tx) +} + func (tx *StoreTx) Endpoint() dataservices.EndpointService { return tx.store.EndpointService.Tx(tx.tx) } diff --git a/api/datastore/test_data/output_24_to_latest.json b/api/datastore/test_data/output_24_to_latest.json index 9fa3e5f09..5e8b0eefa 100644 --- a/api/datastore/test_data/output_24_to_latest.json +++ b/api/datastore/test_data/output_24_to_latest.json @@ -8,6 +8,7 @@ } ], "edge_stack": null, + "edge_stack_status": null, "edgegroups": null, "edgejobs": null, "endpoint_groups": [ @@ -120,6 +121,10 @@ "Ecr": { "Region": "" }, + "Github": { + "OrganisationName": "", + "UseOrganisation": false + }, "Gitlab": { "InstanceURL": "", "ProjectId": 0, @@ -610,7 +615,7 @@ "RequiredPasswordLength": 12 }, "KubeconfigExpiry": "0", - "KubectlShellImage": "portainer/kubectl-shell:2.27.0-rc1", + "KubectlShellImage": "portainer/kubectl-shell:2.32.0", "LDAPSettings": { "AnonymousMode": true, "AutoCreateUsers": true, @@ -678,14 +683,11 @@ "Images": null, "Info": { "Architecture": "", - "BridgeNfIp6tables": false, - "BridgeNfIptables": false, "CDISpecDirs": null, "CPUSet": false, "CPUShares": false, "CgroupDriver": "", "ContainerdCommit": { - "Expected": "", "ID": "" }, "Containers": 0, @@ -709,7 +711,6 @@ "IndexServerAddress": "", "InitBinary": "", "InitCommit": { - "Expected": "", "ID": "" }, "Isolation": "", @@ -738,7 +739,6 @@ }, "RegistryConfig": null, "RuncCommit": { - "Expected": "", "ID": "" }, "Runtimes": null, @@ -780,6 +780,7 @@ "ImageCount": 9, "IsPodman": false, "NodeCount": 0, + "PerformanceMetrics": null, "RunningContainerCount": 5, "ServiceCount": 0, "StackCount": 2, @@ -943,7 +944,7 @@ } ], "version": { - "VERSION": "{\"SchemaVersion\":\"2.27.0-rc1\",\"MigratorCount\":0,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}" + "VERSION": "{\"SchemaVersion\":\"2.32.0\",\"MigratorCount\":1,\"Edition\":1,\"InstanceID\":\"463d5c47-0ea5-4aca-85b1-405ceefee254\"}" }, "webhooks": null } \ No newline at end of file diff --git a/api/datastore/validate/validate.go b/api/datastore/validate/validate.go deleted file mode 100644 index 2b37311fe..000000000 --- a/api/datastore/validate/validate.go +++ /dev/null @@ -1,15 +0,0 @@ -package validate - -import ( - "github.com/go-playground/validator/v10" - portainer "github.com/portainer/portainer/api" -) - -var validate *validator.Validate - -func ValidateLDAPSettings(ldp *portainer.LDAPSettings) error { - validate = validator.New() - registerValidationMethods(validate) - - return validate.Struct(ldp) -} diff --git a/api/datastore/validate/validate_test.go b/api/datastore/validate/validate_test.go deleted file mode 100644 index 3fa7bd425..000000000 --- a/api/datastore/validate/validate_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package validate - -import ( - "testing" - - portainer "github.com/portainer/portainer/api" -) - -func TestValidateLDAPSettings(t *testing.T) { - - tests := []struct { - name string - ldap portainer.LDAPSettings - wantErr bool - }{ - { - name: "Empty LDAP Settings", - ldap: portainer.LDAPSettings{}, - wantErr: true, - }, - { - name: "With URL", - ldap: portainer.LDAPSettings{ - AnonymousMode: true, - URL: "192.168.0.1:323", - }, - wantErr: false, - }, - { - name: "Validate URL and URLs", - ldap: portainer.LDAPSettings{ - AnonymousMode: true, - URL: "192.168.0.1:323", - }, - wantErr: false, - }, - { - name: "validate client ldap", - ldap: portainer.LDAPSettings{ - AnonymousMode: false, - ReaderDN: "CN=LDAP API Service Account", - Password: "Qu**dfUUU**", - URL: "aukdc15.pgc.co:389", - TLSConfig: portainer.TLSConfiguration{ - TLS: false, - TLSSkipVerify: false, - }, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateLDAPSettings(&tt.ldap) - if (err == nil) == tt.wantErr { - t.Errorf("No error expected but got %s", err) - } - }) - } -} diff --git a/api/datastore/validate/validationMethods.go b/api/datastore/validate/validationMethods.go deleted file mode 100644 index 36abe3a54..000000000 --- a/api/datastore/validate/validationMethods.go +++ /dev/null @@ -1,17 +0,0 @@ -package validate - -import ( - "github.com/go-playground/validator/v10" -) - -func registerValidationMethods(v *validator.Validate) { - v.RegisterValidation("validate_bool", ValidateBool) -} - -/** - * Validation methods below are being used for custom validation - */ -func ValidateBool(fl validator.FieldLevel) bool { - _, ok := fl.Field().Interface().(bool) - return ok -} diff --git a/api/docker/client/client.go b/api/docker/client/client.go index 065a40382..9648656a0 100644 --- a/api/docker/client/client.go +++ b/api/docker/client/client.go @@ -3,8 +3,8 @@ package client import ( "bytes" "errors" + "fmt" "io" - "maps" "net/http" "strings" "time" @@ -73,19 +73,6 @@ func createLocalClient(endpoint *portainer.Endpoint) (*client.Client, error) { ) } -func CreateClientFromEnv() (*client.Client, error) { - return client.NewClientWithOpts( - client.FromEnv, - client.WithAPIVersionNegotiation(), - ) -} - -func CreateSimpleClient() (*client.Client, error) { - return client.NewClientWithOpts( - client.WithAPIVersionNegotiation(), - ) -} - func createTCPClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*client.Client, error) { httpCli, err := httpClient(endpoint, timeout) if err != nil { @@ -141,7 +128,6 @@ func createAgentClient(endpoint *portainer.Endpoint, endpointURL string, signatu type NodeNameTransport struct { *http.Transport - nodeNames map[string]string } func (t *NodeNameTransport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -176,28 +162,30 @@ func (t *NodeNameTransport) RoundTrip(req *http.Request) (*http.Response, error) return resp, nil } - t.nodeNames = make(map[string]string) - for _, r := range rs { - t.nodeNames[r.ID] = r.Portainer.Agent.NodeName + nodeNames, ok := req.Context().Value("nodeNames").(map[string]string) + if ok { + for idx, r := range rs { + // as there is no way to differentiate the same image available in multiple nodes only by their ID + // we append the index of the image in the payload response to match the node name later + // from the image.Summary[] list returned by docker's client.ImageList() + nodeNames[fmt.Sprintf("%s-%d", r.ID, idx)] = r.Portainer.Agent.NodeName + } } return resp, err } -func (t *NodeNameTransport) NodeNames() map[string]string { - return maps.Clone(t.nodeNames) -} - func httpClient(endpoint *portainer.Endpoint, timeout *time.Duration) (*http.Client, error) { transport := &NodeNameTransport{ Transport: &http.Transport{}, } if endpoint.TLSConfig.TLS { - tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) + tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) if err != nil { return nil, err } + transport.TLSClientConfig = tlsConfig } diff --git a/api/docker/client/client_test.go b/api/docker/client/client_test.go new file mode 100644 index 000000000..4d7f767e0 --- /dev/null +++ b/api/docker/client/client_test.go @@ -0,0 +1,26 @@ +package client + +import ( + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/stretchr/testify/require" +) + +func TestHttpClient(t *testing.T) { + // Valid TLS configuration + endpoint := &portainer.Endpoint{} + endpoint.TLSConfig = portainer.TLSConfiguration{TLS: true} + + cli, err := httpClient(endpoint, nil) + require.NoError(t, err) + require.NotNil(t, cli) + + // Invalid TLS configuration + endpoint.TLSConfig.TLSCertPath = "/invalid/path/client.crt" + endpoint.TLSConfig.TLSKeyPath = "/invalid/path/client.key" + + cli, err = httpClient(endpoint, nil) + require.Error(t, err) + require.Nil(t, cli) +} diff --git a/api/docker/images/digest.go b/api/docker/images/digest.go index 591f94d1e..38638de56 100644 --- a/api/docker/images/digest.go +++ b/api/docker/images/digest.go @@ -38,10 +38,10 @@ func NewClientWithRegistry(registryClient *RegistryClient, clientFactory *docker func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) { ctx, cancel := c.timeoutContext() defer cancel() + // Docker references with both a tag and digest are currently not supported if image.Tag != "" && image.Digest != "" { - err := image.trimDigest() - if err != nil { + if err := image.TrimDigest(); err != nil { return "", err } } @@ -69,7 +69,7 @@ func (c *DigestClient) RemoteDigest(image Image) (digest.Digest, error) { // Retrieve remote digest through HEAD request rmDigest, err := docker.GetDigest(ctx, sysCtx, rmRef) if err != nil { - // fallback to public registry for hub + // Fallback to public registry for hub if image.HubLink != "" { rmDigest, err = docker.GetDigest(ctx, c.sysCtx, rmRef) if err == nil { @@ -131,8 +131,7 @@ func ParseRepoDigests(repoDigests []string) []digest.Digest { func ParseRepoTags(repoTags []string) []*Image { images := make([]*Image, 0) for _, repoTag := range repoTags { - image := ParseRepoTag(repoTag) - if image != nil { + if image := ParseRepoTag(repoTag); image != nil { images = append(images, image) } } @@ -147,7 +146,7 @@ func ParseRepoDigest(repoDigest string) digest.Digest { d, err := digest.Parse(strings.Split(repoDigest, "@")[1]) if err != nil { - log.Warn().Msgf("Skip invalid repo digest item: %s [error: %v]", repoDigest, err) + log.Warn().Err(err).Str("digest", repoDigest).Msg("skip invalid repo item") return "" } diff --git a/api/docker/images/image.go b/api/docker/images/image.go index 55e72aff0..6b0c6eb81 100644 --- a/api/docker/images/image.go +++ b/api/docker/images/image.go @@ -26,7 +26,7 @@ type Image struct { Digest digest.Digest HubLink string named reference.Named - opts ParseImageOptions + Opts ParseImageOptions `json:"-"` } // ParseImageOptions holds image options for parsing. @@ -43,9 +43,10 @@ func (i *Image) Name() string { // FullName return the real full name may include Tag or Digest of the image, Tag first. func (i *Image) FullName() string { if i.Tag == "" { - return fmt.Sprintf("%s@%s", i.Name(), i.Digest) + return i.Name() + "@" + i.Digest.String() } - return fmt.Sprintf("%s:%s", i.Name(), i.Tag) + + return i.Name() + ":" + i.Tag } // String returns the string representation of an image, including Tag and Digest if existed. @@ -66,22 +67,25 @@ func (i *Image) Reference() string { func (i *Image) WithDigest(digest digest.Digest) (err error) { i.Digest = digest i.named, err = reference.WithDigest(i.named, digest) + return err } func (i *Image) WithTag(tag string) (err error) { i.Tag = tag i.named, err = reference.WithTag(i.named, tag) + return err } -func (i *Image) trimDigest() error { +func (i *Image) TrimDigest() error { i.Digest = "" named, err := ParseImage(ParseImageOptions{Name: i.FullName()}) if err != nil { return err } i.named = &named + return nil } @@ -92,11 +96,12 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) { if err != nil { return Image{}, errors.Wrapf(err, "parsing image %s failed", parseOpts.Name) } + // Add the latest lag if they did not provide one. named = reference.TagNameOnly(named) i := Image{ - opts: parseOpts, + Opts: parseOpts, named: named, Domain: reference.Domain(named), Path: reference.Path(named), @@ -122,15 +127,16 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) { } func (i *Image) hubLink() (string, error) { - if i.opts.HubTpl != "" { + if i.Opts.HubTpl != "" { var out bytes.Buffer tmpl, err := template.New("tmpl"). Option("missingkey=error"). - Parse(i.opts.HubTpl) + Parse(i.Opts.HubTpl) if err != nil { return "", err } err = tmpl.Execute(&out, i) + return out.String(), err } @@ -142,6 +148,7 @@ func (i *Image) hubLink() (string, error) { prefix = "_" path = strings.Replace(i.Path, "library/", "", 1) } + return "https://hub.docker.com/" + prefix + "/" + path, nil case "docker.bintray.io", "jfrog-docker-reg2.bintray.io": return "https://bintray.com/jfrog/reg2/" + strings.ReplaceAll(i.Path, "/", "%3A"), nil diff --git a/api/docker/images/image_test.go b/api/docker/images/image_test.go index 2649e387c..713a67732 100644 --- a/api/docker/images/image_test.go +++ b/api/docker/images/image_test.go @@ -16,7 +16,7 @@ func TestImageParser(t *testing.T) { }) is.NoError(err, "") is.Equal("docker.io/portainer/portainer-ee:latest", image.FullName()) - is.Equal("portainer/portainer-ee", image.opts.Name) + is.Equal("portainer/portainer-ee", image.Opts.Name) is.Equal("latest", image.Tag) is.Equal("portainer/portainer-ee", image.Path) is.Equal("docker.io", image.Domain) @@ -32,7 +32,7 @@ func TestImageParser(t *testing.T) { }) is.NoError(err, "") is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.FullName()) - is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) + is.Equal("gcr.io/k8s-minikube/kicbase@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name) is.Equal("", image.Tag) is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("gcr.io", image.Domain) @@ -49,7 +49,7 @@ func TestImageParser(t *testing.T) { }) is.NoError(err, "") is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName()) - is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) + is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name) is.Equal("v0.0.30", image.Tag) is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("gcr.io", image.Domain) @@ -71,7 +71,7 @@ func TestUpdateParsedImage(t *testing.T) { is.NoError(err, "") _ = image.WithTag("v0.0.31") is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.31", image.FullName()) - is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) + is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name) is.Equal("v0.0.31", image.Tag) is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("gcr.io", image.Domain) @@ -89,7 +89,7 @@ func TestUpdateParsedImage(t *testing.T) { is.NoError(err, "") _ = image.WithDigest("sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b3") is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName()) - is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) + is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name) is.Equal("v0.0.30", image.Tag) is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("gcr.io", image.Domain) @@ -105,9 +105,9 @@ func TestUpdateParsedImage(t *testing.T) { Name: "gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", }) is.NoError(err, "") - _ = image.trimDigest() + _ = image.TrimDigest() is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30", image.FullName()) - is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.opts.Name) + is.Equal("gcr.io/k8s-minikube/kicbase:v0.0.30@sha256:02c921df998f95e849058af14de7045efc3954d90320967418a0d1f182bbc0b2", image.Opts.Name) is.Equal("v0.0.30", image.Tag) is.Equal("k8s-minikube/kicbase", image.Path) is.Equal("gcr.io", image.Domain) diff --git a/api/docker/images/registry.go b/api/docker/images/registry.go index 10b4a6388..015385c2f 100644 --- a/api/docker/images/registry.go +++ b/api/docker/images/registry.go @@ -29,7 +29,7 @@ func (c *RegistryClient) RegistryAuth(image Image) (string, string, error) { return "", "", err } - registry, err := findBestMatchRegistry(image.opts.Name, registries) + registry, err := findBestMatchRegistry(image.Opts.Name, registries) if err != nil { return "", "", err } @@ -59,7 +59,7 @@ func (c *RegistryClient) EncodedRegistryAuth(image Image) (string, error) { return "", err } - registry, err := findBestMatchRegistry(image.opts.Name, registries) + registry, err := findBestMatchRegistry(image.Opts.Name, registries) if err != nil { return "", err } diff --git a/api/exec/exectest/kubernetes_mocks.go b/api/exec/exectest/kubernetes_mocks.go index 22638216e..7d2afac73 100644 --- a/api/exec/exectest/kubernetes_mocks.go +++ b/api/exec/exectest/kubernetes_mocks.go @@ -4,10 +4,12 @@ import ( portainer "github.com/portainer/portainer/api" ) -type kubernetesMockDeployer struct{} +type kubernetesMockDeployer struct { + portainer.KubernetesDeployer +} // NewKubernetesDeployer creates a mock kubernetes deployer -func NewKubernetesDeployer() portainer.KubernetesDeployer { +func NewKubernetesDeployer() *kubernetesMockDeployer { return &kubernetesMockDeployer{} } @@ -18,3 +20,7 @@ func (deployer *kubernetesMockDeployer) Deploy(userID portainer.UserID, endpoint func (deployer *kubernetesMockDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { return "", nil } + +func (deployer *kubernetesMockDeployer) Restart(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { + return "", nil +} diff --git a/api/exec/kubernetes_deploy.go b/api/exec/kubernetes_deploy.go index 1941a6513..940276929 100644 --- a/api/exec/kubernetes_deploy.go +++ b/api/exec/kubernetes_deploy.go @@ -1,13 +1,8 @@ package exec import ( - "bytes" + "context" "fmt" - "os" - "os/exec" - "path" - "runtime" - "strings" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" @@ -15,13 +10,17 @@ import ( "github.com/portainer/portainer/api/http/proxy/factory" "github.com/portainer/portainer/api/http/proxy/factory/kubernetes" "github.com/portainer/portainer/api/kubernetes/cli" + "github.com/portainer/portainer/pkg/libkubectl" "github.com/pkg/errors" ) +const ( + defaultServerURL = "https://kubernetes.default.svc" +) + // KubernetesDeployer represents a service to deploy resources inside a Kubernetes environment(endpoint). type KubernetesDeployer struct { - binaryPath string dataStore dataservices.DataStore reverseTunnelService portainer.ReverseTunnelService signatureService portainer.DigitalSignatureService @@ -31,9 +30,8 @@ type KubernetesDeployer struct { } // NewKubernetesDeployer initializes a new KubernetesDeployer service. -func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager, binaryPath string) *KubernetesDeployer { +func NewKubernetesDeployer(kubernetesTokenCacheManager *kubernetes.TokenCacheManager, kubernetesClientFactory *cli.ClientFactory, datastore dataservices.DataStore, reverseTunnelService portainer.ReverseTunnelService, signatureService portainer.DigitalSignatureService, proxyManager *proxy.Manager) *KubernetesDeployer { return &KubernetesDeployer{ - binaryPath: binaryPath, dataStore: datastore, reverseTunnelService: reverseTunnelService, signatureService: signatureService, @@ -78,63 +76,56 @@ func (deployer *KubernetesDeployer) getToken(userID portainer.UserID, endpoint * } // Deploy upserts Kubernetes resources defined in manifest(s) -func (deployer *KubernetesDeployer) Deploy(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { - return deployer.command("apply", userID, endpoint, manifestFiles, namespace) +func (deployer *KubernetesDeployer) Deploy(userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) { + return deployer.command("apply", userID, endpoint, resources, namespace) } // Remove deletes Kubernetes resources defined in manifest(s) -func (deployer *KubernetesDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { - return deployer.command("delete", userID, endpoint, manifestFiles, namespace) +func (deployer *KubernetesDeployer) Remove(userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) { + return deployer.command("delete", userID, endpoint, resources, namespace) } -func (deployer *KubernetesDeployer) command(operation string, userID portainer.UserID, endpoint *portainer.Endpoint, manifestFiles []string, namespace string) (string, error) { +func (deployer *KubernetesDeployer) command(operation string, userID portainer.UserID, endpoint *portainer.Endpoint, resources []string, namespace string) (string, error) { token, err := deployer.getToken(userID, endpoint, endpoint.Type == portainer.KubernetesLocalEnvironment) if err != nil { return "", errors.Wrap(err, "failed generating a user token") } - command := path.Join(deployer.binaryPath, "kubectl") - if runtime.GOOS == "windows" { - command = path.Join(deployer.binaryPath, "kubectl.exe") - } - - args := []string{"--token", token} - if namespace != "" { - args = append(args, "--namespace", namespace) - } - + serverURL := defaultServerURL if endpoint.Type == portainer.AgentOnKubernetesEnvironment || endpoint.Type == portainer.EdgeAgentOnKubernetesEnvironment { url, proxy, err := deployer.getAgentURL(endpoint) if err != nil { return "", errors.WithMessage(err, "failed generating endpoint URL") } - defer proxy.Close() - args = append(args, "--server", url) - args = append(args, "--insecure-skip-tls-verify") + + serverURL = url } - if operation == "delete" { - args = append(args, "--ignore-not-found=true") - } - - args = append(args, operation) - for _, path := range manifestFiles { - args = append(args, "-f", strings.TrimSpace(path)) - } - - var stderr bytes.Buffer - cmd := exec.Command(command, args...) - cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, "POD_NAMESPACE=default") - cmd.Stderr = &stderr - - output, err := cmd.Output() + client, err := libkubectl.NewClient(&libkubectl.ClientAccess{ + Token: token, + ServerUrl: serverURL, + }, namespace, "", true) if err != nil { - return "", errors.Wrapf(err, "failed to execute kubectl command: %q", stderr.String()) + return "", errors.Wrap(err, "failed to create kubectl client") } - return string(output), nil + operations := map[string]func(context.Context, []string) (string, error){ + "apply": client.Apply, + "delete": client.Delete, + } + + operationFunc, ok := operations[operation] + if !ok { + return "", errors.Errorf("unsupported operation: %s", operation) + } + + output, err := operationFunc(context.Background(), resources) + if err != nil { + return "", errors.Wrapf(err, "failed to execute kubectl %s command", operation) + } + + return output, nil } func (deployer *KubernetesDeployer) getAgentURL(endpoint *portainer.Endpoint) (string, *factory.ProxyServer, error) { diff --git a/api/exec/kubernetes_deploy_test.go b/api/exec/kubernetes_deploy_test.go new file mode 100644 index 000000000..cd49a2b92 --- /dev/null +++ b/api/exec/kubernetes_deploy_test.go @@ -0,0 +1,173 @@ +package exec + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockKubectlClient struct { + applyFunc func(ctx context.Context, files []string) error + deleteFunc func(ctx context.Context, files []string) error + rolloutRestartFunc func(ctx context.Context, resources []string) error +} + +func (m *mockKubectlClient) Apply(ctx context.Context, files []string) error { + if m.applyFunc != nil { + return m.applyFunc(ctx, files) + } + return nil +} + +func (m *mockKubectlClient) Delete(ctx context.Context, files []string) error { + if m.deleteFunc != nil { + return m.deleteFunc(ctx, files) + } + return nil +} + +func (m *mockKubectlClient) RolloutRestart(ctx context.Context, resources []string) error { + if m.rolloutRestartFunc != nil { + return m.rolloutRestartFunc(ctx, resources) + } + return nil +} + +func testExecuteKubectlOperation(client *mockKubectlClient, operation string, manifestFiles []string) error { + operations := map[string]func(context.Context, []string) error{ + "apply": client.Apply, + "delete": client.Delete, + "rollout-restart": client.RolloutRestart, + } + + operationFunc, ok := operations[operation] + if !ok { + return fmt.Errorf("unsupported operation: %s", operation) + } + + if err := operationFunc(context.Background(), manifestFiles); err != nil { + return fmt.Errorf("failed to execute kubectl %s command: %w", operation, err) + } + + return nil +} + +func TestExecuteKubectlOperation_Apply_Success(t *testing.T) { + called := false + mockClient := &mockKubectlClient{ + applyFunc: func(ctx context.Context, files []string) error { + called = true + assert.Equal(t, []string{"manifest1.yaml", "manifest2.yaml"}, files) + return nil + }, + } + + manifests := []string{"manifest1.yaml", "manifest2.yaml"} + err := testExecuteKubectlOperation(mockClient, "apply", manifests) + + assert.NoError(t, err) + assert.True(t, called) +} + +func TestExecuteKubectlOperation_Apply_Error(t *testing.T) { + expectedErr := errors.New("kubectl apply failed") + called := false + mockClient := &mockKubectlClient{ + applyFunc: func(ctx context.Context, files []string) error { + called = true + assert.Equal(t, []string{"error.yaml"}, files) + return expectedErr + }, + } + + manifests := []string{"error.yaml"} + err := testExecuteKubectlOperation(mockClient, "apply", manifests) + + assert.Error(t, err) + assert.Contains(t, err.Error(), expectedErr.Error()) + assert.True(t, called) +} + +func TestExecuteKubectlOperation_Delete_Success(t *testing.T) { + called := false + mockClient := &mockKubectlClient{ + deleteFunc: func(ctx context.Context, files []string) error { + called = true + assert.Equal(t, []string{"manifest1.yaml"}, files) + return nil + }, + } + + manifests := []string{"manifest1.yaml"} + err := testExecuteKubectlOperation(mockClient, "delete", manifests) + + assert.NoError(t, err) + assert.True(t, called) +} + +func TestExecuteKubectlOperation_Delete_Error(t *testing.T) { + expectedErr := errors.New("kubectl delete failed") + called := false + mockClient := &mockKubectlClient{ + deleteFunc: func(ctx context.Context, files []string) error { + called = true + assert.Equal(t, []string{"error.yaml"}, files) + return expectedErr + }, + } + + manifests := []string{"error.yaml"} + err := testExecuteKubectlOperation(mockClient, "delete", manifests) + + assert.Error(t, err) + assert.Contains(t, err.Error(), expectedErr.Error()) + assert.True(t, called) +} + +func TestExecuteKubectlOperation_RolloutRestart_Success(t *testing.T) { + called := false + mockClient := &mockKubectlClient{ + rolloutRestartFunc: func(ctx context.Context, resources []string) error { + called = true + assert.Equal(t, []string{"deployment/nginx"}, resources) + return nil + }, + } + + resources := []string{"deployment/nginx"} + err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources) + + assert.NoError(t, err) + assert.True(t, called) +} + +func TestExecuteKubectlOperation_RolloutRestart_Error(t *testing.T) { + expectedErr := errors.New("kubectl rollout restart failed") + called := false + mockClient := &mockKubectlClient{ + rolloutRestartFunc: func(ctx context.Context, resources []string) error { + called = true + assert.Equal(t, []string{"deployment/error"}, resources) + return expectedErr + }, + } + + resources := []string{"deployment/error"} + err := testExecuteKubectlOperation(mockClient, "rollout-restart", resources) + + assert.Error(t, err) + assert.Contains(t, err.Error(), expectedErr.Error()) + assert.True(t, called) +} + +func TestExecuteKubectlOperation_UnsupportedOperation(t *testing.T) { + mockClient := &mockKubectlClient{} + + err := testExecuteKubectlOperation(mockClient, "unsupported", []string{}) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported operation") +} diff --git a/api/exec/swarm_stack.go b/api/exec/swarm_stack.go index 634194d67..b5e13dcc6 100644 --- a/api/exec/swarm_stack.go +++ b/api/exec/swarm_stack.go @@ -127,7 +127,7 @@ func (manager *SwarmStackManager) Remove(stack *portainer.Stack, endpoint *porta return err } - args = append(args, "stack", "rm", stack.Name) + args = append(args, "stack", "rm", "--detach=false", stack.Name) return runCommandAndCaptureStdErr(command, args, nil, "") } diff --git a/api/filesystem/copy.go b/api/filesystem/copy.go index abf4d33aa..bc0abb766 100644 --- a/api/filesystem/copy.go +++ b/api/filesystem/copy.go @@ -68,7 +68,7 @@ func copyFile(src, dst string) error { defer from.Close() // has to include 'execute' bit, otherwise fails. MkdirAll follows `mkdir -m` restrictions - if err := os.MkdirAll(filepath.Dir(dst), 0744); err != nil { + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { return err } to, err := os.Create(dst) diff --git a/api/filesystem/filesystem.go b/api/filesystem/filesystem.go index df49a2706..43e38b4e7 100644 --- a/api/filesystem/filesystem.go +++ b/api/filesystem/filesystem.go @@ -841,11 +841,11 @@ func (service *Service) GetDefaultSSLCertsPath() (string, string) { } func defaultMTLSCertPathUnderFileStore() (string, string, string) { - certPath := JoinPaths(SSLCertPath, MTLSCertFilename) caCertPath := JoinPaths(SSLCertPath, MTLSCACertFilename) + certPath := JoinPaths(SSLCertPath, MTLSCertFilename) keyPath := JoinPaths(SSLCertPath, MTLSKeyFilename) - return certPath, caCertPath, keyPath + return caCertPath, certPath, keyPath } // GetDefaultChiselPrivateKeyPath returns the chisle private key path @@ -1014,26 +1014,45 @@ func CreateFile(path string, r io.Reader) error { return err } -func (service *Service) StoreMTLSCertificates(cert, caCert, key []byte) (string, string, string, error) { - certPath, caCertPath, keyPath := defaultMTLSCertPathUnderFileStore() +func (service *Service) StoreMTLSCertificates(caCert, cert, key []byte) (string, string, string, error) { + caCertPath, certPath, keyPath := defaultMTLSCertPathUnderFileStore() - r := bytes.NewReader(cert) - err := service.createFileInStore(certPath, r) - if err != nil { + r := bytes.NewReader(caCert) + if err := service.createFileInStore(caCertPath, r); err != nil { return "", "", "", err } - r = bytes.NewReader(caCert) - err = service.createFileInStore(caCertPath, r) - if err != nil { + r = bytes.NewReader(cert) + if err := service.createFileInStore(certPath, r); err != nil { return "", "", "", err } r = bytes.NewReader(key) - err = service.createFileInStore(keyPath, r) - if err != nil { + if err := service.createFileInStore(keyPath, r); err != nil { return "", "", "", err } - return service.wrapFileStore(certPath), service.wrapFileStore(caCertPath), service.wrapFileStore(keyPath), nil + return service.wrapFileStore(caCertPath), service.wrapFileStore(certPath), service.wrapFileStore(keyPath), nil +} + +func (service *Service) GetMTLSCertificates() (string, string, string, error) { + caCertPath, certPath, keyPath := defaultMTLSCertPathUnderFileStore() + + caCertPath = service.wrapFileStore(caCertPath) + certPath = service.wrapFileStore(certPath) + keyPath = service.wrapFileStore(keyPath) + + paths := [...]string{caCertPath, certPath, keyPath} + for _, path := range paths { + exists, err := service.FileExists(path) + if err != nil { + return "", "", "", err + } + + if !exists { + return "", "", "", fmt.Errorf("file %s does not exist", path) + } + } + + return caCertPath, certPath, keyPath, nil } diff --git a/api/filesystem/serialize_per_dev_configs.go b/api/filesystem/serialize_per_dev_configs.go index c9d02ad0d..7ae653934 100644 --- a/api/filesystem/serialize_per_dev_configs.go +++ b/api/filesystem/serialize_per_dev_configs.go @@ -15,15 +15,19 @@ type MultiFilterArgs []struct { } // MultiFilterDirForPerDevConfigs filers the given dirEntries with multiple filter args, returns the merged entries for the given device -func MultiFilterDirForPerDevConfigs(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs) []DirEntry { +func MultiFilterDirForPerDevConfigs(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs) ([]DirEntry, []string) { var filteredDirEntries []DirEntry + var envFiles []string + for _, multiFilterArg := range multiFilterArgs { - tmp := FilterDirForPerDevConfigs(dirEntries, multiFilterArg.FilterKey, configPath, multiFilterArg.FilterType) + tmp, efs := FilterDirForPerDevConfigs(dirEntries, multiFilterArg.FilterKey, configPath, multiFilterArg.FilterType) filteredDirEntries = append(filteredDirEntries, tmp...) + + envFiles = append(envFiles, efs...) } - return deduplicate(filteredDirEntries) + return deduplicate(filteredDirEntries), envFiles } func deduplicate(dirEntries []DirEntry) []DirEntry { @@ -32,8 +36,7 @@ func deduplicate(dirEntries []DirEntry) []DirEntry { marks := make(map[string]struct{}) for _, dirEntry := range dirEntries { - _, ok := marks[dirEntry.Name] - if !ok { + if _, ok := marks[dirEntry.Name]; !ok { marks[dirEntry.Name] = struct{}{} deduplicatedDirEntries = append(deduplicatedDirEntries, dirEntry) } @@ -44,34 +47,33 @@ func deduplicate(dirEntries []DirEntry) []DirEntry { // FilterDirForPerDevConfigs filers the given dirEntries, returns entries for the given device // For given configPath A/B/C, return entries: -// 1. all entries outside of dir A -// 2. dir entries A, A/B, A/B/C -// 3. For filterType file: +// 1. all entries outside of dir A/B/C +// 2. For filterType file: // file entries: A/B/C/ and A/B/C/.* -// 4. For filterType dir: +// 3. For filterType dir: // dir entry: A/B/C/ // all entries: A/B/C//* -func FilterDirForPerDevConfigs(dirEntries []DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) []DirEntry { +func FilterDirForPerDevConfigs(dirEntries []DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) ([]DirEntry, []string) { var filteredDirEntries []DirEntry + var envFiles []string + for _, dirEntry := range dirEntries { if shouldIncludeEntry(dirEntry, deviceName, configPath, filterType) { filteredDirEntries = append(filteredDirEntries, dirEntry) + + if shouldParseEnvVars(dirEntry, deviceName, configPath, filterType) { + envFiles = append(envFiles, dirEntry.Name) + } } } - return filteredDirEntries + return filteredDirEntries, envFiles } func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool { - // Include all entries outside of dir A - if !isInConfigRootDir(dirEntry, configPath) { - return true - } - - // Include dir entries A, A/B, A/B/C - if isParentDir(dirEntry, configPath) { + if !isInConfigDir(dirEntry, configPath) { return true } @@ -90,21 +92,9 @@ func shouldIncludeEntry(dirEntry DirEntry, deviceName, configPath string, filter return false } -func isInConfigRootDir(dirEntry DirEntry, configPath string) bool { - // get the first element of the configPath - rootDir := strings.Split(configPath, string(os.PathSeparator))[0] - - // return true if entry name starts with "A/" - return strings.HasPrefix(dirEntry.Name, appendTailSeparator(rootDir)) -} - -func isParentDir(dirEntry DirEntry, configPath string) bool { - if dirEntry.IsFile { - return false - } - - // return true for dir entries A, A/B, A/B/C - return strings.HasPrefix(appendTailSeparator(configPath), appendTailSeparator(dirEntry.Name)) +func isInConfigDir(dirEntry DirEntry, configPath string) bool { + // return true if entry name starts with "A/B" + return strings.HasPrefix(dirEntry.Name, appendTailSeparator(configPath)) } func shouldIncludeFile(dirEntry DirEntry, deviceName, configPath string) bool { @@ -138,6 +128,15 @@ func shouldIncludeDir(dirEntry DirEntry, deviceName, configPath string) bool { return strings.HasPrefix(dirEntry.Name, filterPrefix) } +func shouldParseEnvVars(dirEntry DirEntry, deviceName, configPath string, filterType portainer.PerDevConfigsFilterType) bool { + if !dirEntry.IsFile { + return false + } + + return isInConfigDir(dirEntry, configPath) && + filepath.Base(dirEntry.Name) == deviceName+".env" +} + func appendTailSeparator(path string) string { return fmt.Sprintf("%s%c", path, os.PathSeparator) } diff --git a/api/filesystem/serialize_per_dev_configs_test.go b/api/filesystem/serialize_per_dev_configs_test.go index 6a2a5f33b..2330db68d 100644 --- a/api/filesystem/serialize_per_dev_configs_test.go +++ b/api/filesystem/serialize_per_dev_configs_test.go @@ -4,14 +4,17 @@ import ( "testing" portainer "github.com/portainer/portainer/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMultiFilterDirForPerDevConfigs(t *testing.T) { - type args struct { - dirEntries []DirEntry - configPath string - multiFilterArgs MultiFilterArgs + f := func(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs, wantDirEntries []DirEntry) { + t.Helper() + + dirEntries, _ = MultiFilterDirForPerDevConfigs(dirEntries, configPath, multiFilterArgs) + require.Equal(t, wantDirEntries, dirEntries) } baseDirEntries := []DirEntry{ @@ -26,67 +29,94 @@ func TestMultiFilterDirForPerDevConfigs(t *testing.T) { {"configs/folder2/config2", "", true, 420}, } - tests := []struct { - name string - args args - want []DirEntry - }{ - { - name: "filter file1", - args: args{ - baseDirEntries, - "configs", - MultiFilterArgs{{"file1", portainer.PerDevConfigsTypeFile}}, - }, - want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3]}, + // Filter file1 + f( + baseDirEntries, + "configs", + MultiFilterArgs{{"file1", portainer.PerDevConfigsTypeFile}}, + []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3]}, + ) + + // Filter folder1 + f( + baseDirEntries, + "configs", + MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}}, + []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]}, + ) + + // Filter file1 and folder1 + f( + baseDirEntries, + "configs", + MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}}, + []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]}, + ) + + // Filter file1 and file2 + f( + baseDirEntries, + "configs", + MultiFilterArgs{ + {"file1", portainer.PerDevConfigsTypeFile}, + {"file2", portainer.PerDevConfigsTypeFile}, }, - { - name: "filter folder1", - args: args{ - baseDirEntries, - "configs", - MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}}, - }, - want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]}, - }, - { - name: "filter file1 and folder1", - args: args{ - baseDirEntries, - "configs", - MultiFilterArgs{{"folder1", portainer.PerDevConfigsTypeDir}}, - }, - want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6]}, - }, - { - name: "filter file1 and file2", - args: args{ - baseDirEntries, - "configs", - MultiFilterArgs{ - {"file1", portainer.PerDevConfigsTypeFile}, - {"file2", portainer.PerDevConfigsTypeFile}, - }, - }, - want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3], baseDirEntries[4]}, - }, - { - name: "filter folder1 and folder2", - args: args{ - baseDirEntries, - "configs", - MultiFilterArgs{ - {"folder1", portainer.PerDevConfigsTypeDir}, - {"folder2", portainer.PerDevConfigsTypeDir}, - }, - }, - want: []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6], baseDirEntries[7], baseDirEntries[8]}, + []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[3], baseDirEntries[4]}, + ) + + // Filter folder1 and folder2 + f( + baseDirEntries, + "configs", + MultiFilterArgs{ + {"folder1", portainer.PerDevConfigsTypeDir}, + {"folder2", portainer.PerDevConfigsTypeDir}, }, + []DirEntry{baseDirEntries[0], baseDirEntries[1], baseDirEntries[2], baseDirEntries[5], baseDirEntries[6], baseDirEntries[7], baseDirEntries[8]}, + ) +} + +func TestMultiFilterDirForPerDevConfigsEnvFiles(t *testing.T) { + f := func(dirEntries []DirEntry, configPath string, multiFilterArgs MultiFilterArgs, wantEnvFiles []string) { + t.Helper() + + _, envFiles := MultiFilterDirForPerDevConfigs(dirEntries, configPath, multiFilterArgs) + require.Equal(t, wantEnvFiles, envFiles) } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equalf(t, tt.want, MultiFilterDirForPerDevConfigs(tt.args.dirEntries, tt.args.configPath, tt.args.multiFilterArgs), "MultiFilterDirForPerDevConfigs(%v, %v, %v)", tt.args.dirEntries, tt.args.configPath, tt.args.multiFilterArgs) - }) + baseDirEntries := []DirEntry{ + {".env", "", true, 420}, + {"docker-compose.yaml", "", true, 420}, + {"configs", "", false, 420}, + {"configs/edge-id/edge-id.env", "", true, 420}, } + + f( + baseDirEntries, + "configs", + MultiFilterArgs{{"edge-id", portainer.PerDevConfigsTypeDir}}, + []string{"configs/edge-id/edge-id.env"}, + ) + +} + +func TestIsInConfigDir(t *testing.T) { + f := func(dirEntry DirEntry, configPath string, expect bool) { + t.Helper() + + actual := isInConfigDir(dirEntry, configPath) + assert.Equal(t, expect, actual) + } + + f(DirEntry{Name: "edge-configs"}, "edge-configs", false) + f(DirEntry{Name: "edge-configs_backup"}, "edge-configs", false) + f(DirEntry{Name: "edge-configs/standalone-edge-agent-standard"}, "edge-configs", true) + f(DirEntry{Name: "parent/edge-configs/"}, "edge-configs", false) + f(DirEntry{Name: "edgestacktest"}, "edgestacktest/edge-configs", false) + f(DirEntry{Name: "edgestacktest/edgeconfigs-test.yaml"}, "edgestacktest/edge-configs", false) + f(DirEntry{Name: "edgestacktest/file1.conf"}, "edgestacktest/edge-configs", false) + f(DirEntry{Name: "edgeconfigs-test.yaml"}, "edgestacktest/edge-configs", false) + f(DirEntry{Name: "edgestacktest/edge-configs"}, "edgestacktest/edge-configs", false) + f(DirEntry{Name: "edgestacktest/edge-configs/standalone-edge-agent-async"}, "edgestacktest/edge-configs", true) + f(DirEntry{Name: "edgestacktest/edge-configs/abc.txt"}, "edgestacktest/edge-configs", true) } diff --git a/api/git/azure.go b/api/git/azure.go index 9fabeb066..b712cfd79 100644 --- a/api/git/azure.go +++ b/api/git/azure.go @@ -60,15 +60,9 @@ func NewAzureClient() *azureClient { } func newHttpClientForAzure(insecureSkipVerify bool) *http.Client { - tlsConfig := crypto.CreateTLSConfiguration() - - if insecureSkipVerify { - tlsConfig.InsecureSkipVerify = true - } - httpsCli := &http.Client{ Transport: &http.Transport{ - TLSClientConfig: tlsConfig, + TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify), Proxy: http.ProxyFromEnvironment, }, Timeout: 300 * time.Second, diff --git a/api/git/azure_integration_test.go b/api/git/azure_integration_test.go index 3e297a129..5de18b303 100644 --- a/api/git/azure_integration_test.go +++ b/api/git/azure_integration_test.go @@ -58,7 +58,15 @@ func TestService_ClonePublicRepository_Azure(t *testing.T) { t.Run(tt.name, func(t *testing.T) { dst := t.TempDir() repositoryUrl := fmt.Sprintf(tt.args.repositoryURLFormat, tt.args.password) - err := service.CloneRepository(dst, repositoryUrl, tt.args.referenceName, "", "", false) + err := service.CloneRepository( + dst, + repositoryUrl, + tt.args.referenceName, + "", + "", + gittypes.GitCredentialAuthType_Basic, + false, + ) assert.NoError(t, err) assert.FileExists(t, filepath.Join(dst, "README.md")) }) @@ -73,7 +81,15 @@ func TestService_ClonePrivateRepository_Azure(t *testing.T) { dst := t.TempDir() - err := service.CloneRepository(dst, privateAzureRepoURL, "refs/heads/main", "", pat, false) + err := service.CloneRepository( + dst, + privateAzureRepoURL, + "refs/heads/main", + "", + pat, + gittypes.GitCredentialAuthType_Basic, + false, + ) assert.NoError(t, err) assert.FileExists(t, filepath.Join(dst, "README.md")) } @@ -84,7 +100,14 @@ func TestService_LatestCommitID_Azure(t *testing.T) { pat := getRequiredValue(t, "AZURE_DEVOPS_PAT") service := NewService(context.TODO()) - id, err := service.LatestCommitID(privateAzureRepoURL, "refs/heads/main", "", pat, false) + id, err := service.LatestCommitID( + privateAzureRepoURL, + "refs/heads/main", + "", + pat, + gittypes.GitCredentialAuthType_Basic, + false, + ) assert.NoError(t, err) assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty") } @@ -96,7 +119,14 @@ func TestService_ListRefs_Azure(t *testing.T) { username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME") service := NewService(context.TODO()) - refs, err := service.ListRefs(privateAzureRepoURL, username, accessToken, false, false) + refs, err := service.ListRefs( + privateAzureRepoURL, + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + ) assert.NoError(t, err) assert.GreaterOrEqual(t, len(refs), 1) } @@ -108,8 +138,8 @@ func TestService_ListRefs_Azure_Concurrently(t *testing.T) { username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME") service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) - go service.ListRefs(privateAzureRepoURL, username, accessToken, false, false) - service.ListRefs(privateAzureRepoURL, username, accessToken, false, false) + go service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) + service.ListRefs(privateAzureRepoURL, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) time.Sleep(2 * time.Second) } @@ -247,7 +277,17 @@ func TestService_ListFiles_Azure(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false) + paths, err := service.ListFiles( + tt.args.repositoryUrl, + tt.args.referenceName, + tt.args.username, + tt.args.password, + gittypes.GitCredentialAuthType_Basic, + false, + false, + tt.extensions, + false, + ) if tt.expect.shouldFail { assert.Error(t, err) if tt.expect.err != nil { @@ -270,8 +310,28 @@ func TestService_ListFiles_Azure_Concurrently(t *testing.T) { username := getRequiredValue(t, "AZURE_DEVOPS_USERNAME") service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) - go service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false) - service.ListFiles(privateAzureRepoURL, "refs/heads/main", username, accessToken, false, false, []string{}, false) + go service.ListFiles( + privateAzureRepoURL, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) + service.ListFiles( + privateAzureRepoURL, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) time.Sleep(2 * time.Second) } diff --git a/api/git/backup.go b/api/git/backup.go index 286b51876..6928f521a 100644 --- a/api/git/backup.go +++ b/api/git/backup.go @@ -19,6 +19,7 @@ type CloneOptions struct { ReferenceName string Username string Password string + AuthType gittypes.GitCredentialAuthType // TLSSkipVerify skips SSL verification when cloning the Git repository TLSSkipVerify bool `example:"false"` } @@ -42,7 +43,15 @@ func CloneWithBackup(gitService portainer.GitService, fileService portainer.File cleanUp = true - if err := gitService.CloneRepository(options.ProjectPath, options.URL, options.ReferenceName, options.Username, options.Password, options.TLSSkipVerify); err != nil { + if err := gitService.CloneRepository( + options.ProjectPath, + options.URL, + options.ReferenceName, + options.Username, + options.Password, + options.AuthType, + options.TLSSkipVerify, + ); err != nil { cleanUp = false if err := filesystem.MoveDirectory(backupProjectPath, options.ProjectPath, false); err != nil { log.Warn().Err(err).Msg("failed restoring backup folder") diff --git a/api/git/git.go b/api/git/git.go index 6c2835815..cf0c9f478 100644 --- a/api/git/git.go +++ b/api/git/git.go @@ -7,12 +7,14 @@ import ( "strings" gittypes "github.com/portainer/portainer/api/git/types" + "github.com/rs/zerolog/log" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/filemode" "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/transport" githttp "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5/storage/memory" "github.com/pkg/errors" @@ -33,7 +35,7 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e URL: opt.repositoryUrl, Depth: opt.depth, InsecureSkipTLS: opt.tlsSkipVerify, - Auth: getAuth(opt.username, opt.password), + Auth: getAuth(opt.authType, opt.username, opt.password), Tags: git.NoTags, } @@ -51,7 +53,10 @@ func (c *gitClient) download(ctx context.Context, dst string, opt cloneOption) e } if !c.preserveGitDirectory { - os.RemoveAll(filepath.Join(dst, ".git")) + err := os.RemoveAll(filepath.Join(dst, ".git")) + if err != nil { + log.Error().Err(err).Msg("failed to remove .git directory") + } } return nil @@ -64,7 +69,7 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string }) listOptions := &git.ListOptions{ - Auth: getAuth(opt.username, opt.password), + Auth: getAuth(opt.authType, opt.username, opt.password), InsecureSkipTLS: opt.tlsSkipVerify, } @@ -94,7 +99,23 @@ func (c *gitClient) latestCommitID(ctx context.Context, opt fetchOption) (string return "", errors.Errorf("could not find ref %q in the repository", opt.referenceName) } -func getAuth(username, password string) *githttp.BasicAuth { +func getAuth(authType gittypes.GitCredentialAuthType, username, password string) transport.AuthMethod { + if password == "" { + return nil + } + + switch authType { + case gittypes.GitCredentialAuthType_Basic: + return getBasicAuth(username, password) + case gittypes.GitCredentialAuthType_Token: + return getTokenAuth(password) + default: + log.Warn().Msg("unknown git credentials authorization type, defaulting to None") + return nil + } +} + +func getBasicAuth(username, password string) *githttp.BasicAuth { if password != "" { if username == "" { username = "token" @@ -108,6 +129,15 @@ func getAuth(username, password string) *githttp.BasicAuth { return nil } +func getTokenAuth(token string) *githttp.TokenAuth { + if token != "" { + return &githttp.TokenAuth{ + Token: token, + } + } + return nil +} + func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, error) { rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{ Name: "origin", @@ -115,7 +145,7 @@ func (c *gitClient) listRefs(ctx context.Context, opt baseOption) ([]string, err }) listOptions := &git.ListOptions{ - Auth: getAuth(opt.username, opt.password), + Auth: getAuth(opt.authType, opt.username, opt.password), InsecureSkipTLS: opt.tlsSkipVerify, } @@ -143,7 +173,7 @@ func (c *gitClient) listFiles(ctx context.Context, opt fetchOption) ([]string, e Depth: 1, SingleBranch: true, ReferenceName: plumbing.ReferenceName(opt.referenceName), - Auth: getAuth(opt.username, opt.password), + Auth: getAuth(opt.authType, opt.username, opt.password), InsecureSkipTLS: opt.tlsSkipVerify, Tags: git.NoTags, } diff --git a/api/git/git_integration_test.go b/api/git/git_integration_test.go index add10afd6..6cb10253a 100644 --- a/api/git/git_integration_test.go +++ b/api/git/git_integration_test.go @@ -2,6 +2,8 @@ package git import ( "context" + "net/http" + "net/http/httptest" "path/filepath" "testing" "time" @@ -24,7 +26,15 @@ func TestService_ClonePrivateRepository_GitHub(t *testing.T) { dst := t.TempDir() repositoryUrl := privateGitRepoURL - err := service.CloneRepository(dst, repositoryUrl, "refs/heads/main", username, accessToken, false) + err := service.CloneRepository( + dst, + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + ) assert.NoError(t, err) assert.FileExists(t, filepath.Join(dst, "README.md")) } @@ -37,7 +47,14 @@ func TestService_LatestCommitID_GitHub(t *testing.T) { service := newService(context.TODO(), 0, 0) repositoryUrl := privateGitRepoURL - id, err := service.LatestCommitID(repositoryUrl, "refs/heads/main", username, accessToken, false) + id, err := service.LatestCommitID( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + ) assert.NoError(t, err) assert.NotEmpty(t, id, "cannot guarantee commit id, but it should be not empty") } @@ -50,7 +67,7 @@ func TestService_ListRefs_GitHub(t *testing.T) { service := newService(context.TODO(), 0, 0) repositoryUrl := privateGitRepoURL - refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false) + refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) assert.NoError(t, err) assert.GreaterOrEqual(t, len(refs), 1) } @@ -63,8 +80,8 @@ func TestService_ListRefs_Github_Concurrently(t *testing.T) { service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) repositoryUrl := privateGitRepoURL - go service.ListRefs(repositoryUrl, username, accessToken, false, false) - service.ListRefs(repositoryUrl, username, accessToken, false, false) + go service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) + service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) time.Sleep(2 * time.Second) } @@ -202,7 +219,17 @@ func TestService_ListFiles_GitHub(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - paths, err := service.ListFiles(tt.args.repositoryUrl, tt.args.referenceName, tt.args.username, tt.args.password, false, false, tt.extensions, false) + paths, err := service.ListFiles( + tt.args.repositoryUrl, + tt.args.referenceName, + tt.args.username, + tt.args.password, + gittypes.GitCredentialAuthType_Basic, + false, + false, + tt.extensions, + false, + ) if tt.expect.shouldFail { assert.Error(t, err) if tt.expect.err != nil { @@ -226,8 +253,28 @@ func TestService_ListFiles_Github_Concurrently(t *testing.T) { username := getRequiredValue(t, "GITHUB_USERNAME") service := newService(context.TODO(), repositoryCacheSize, 200*time.Millisecond) - go service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) - service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) + go service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) + service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) time.Sleep(2 * time.Second) } @@ -240,8 +287,18 @@ func TestService_purgeCache_Github(t *testing.T) { username := getRequiredValue(t, "GITHUB_USERNAME") service := NewService(context.TODO()) - service.ListRefs(repositoryUrl, username, accessToken, false, false) - service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) + service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) + service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoFileCache.Len()) @@ -261,8 +318,18 @@ func TestService_purgeCacheByTTL_Github(t *testing.T) { // 40*timeout is designed for giving enough time for ListRefs and ListFiles to cache the result service := newService(context.TODO(), 2, 40*timeout) - service.ListRefs(repositoryUrl, username, accessToken, false, false) - service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) + service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) + service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) assert.Equal(t, 1, service.repoRefCache.Len()) assert.Equal(t, 1, service.repoFileCache.Len()) @@ -293,12 +360,12 @@ func TestService_HardRefresh_ListRefs_GitHub(t *testing.T) { service := newService(context.TODO(), 2, 0) repositoryUrl := privateGitRepoURL - refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false) + refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) assert.NoError(t, err) assert.GreaterOrEqual(t, len(refs), 1) assert.Equal(t, 1, service.repoRefCache.Len()) - _, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false) + _, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false) assert.Error(t, err) assert.Equal(t, 1, service.repoRefCache.Len()) } @@ -311,26 +378,46 @@ func TestService_HardRefresh_ListRefs_And_RemoveAllCaches_GitHub(t *testing.T) { service := newService(context.TODO(), 2, 0) repositoryUrl := privateGitRepoURL - refs, err := service.ListRefs(repositoryUrl, username, accessToken, false, false) + refs, err := service.ListRefs(repositoryUrl, username, accessToken, gittypes.GitCredentialAuthType_Basic, false, false) assert.NoError(t, err) assert.GreaterOrEqual(t, len(refs), 1) assert.Equal(t, 1, service.repoRefCache.Len()) - files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) + files, err := service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) assert.NoError(t, err) assert.GreaterOrEqual(t, len(files), 1) assert.Equal(t, 1, service.repoFileCache.Len()) - files, err = service.ListFiles(repositoryUrl, "refs/heads/test", username, accessToken, false, false, []string{}, false) + files, err = service.ListFiles( + repositoryUrl, + "refs/heads/test", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) assert.NoError(t, err) assert.GreaterOrEqual(t, len(files), 1) assert.Equal(t, 2, service.repoFileCache.Len()) - _, err = service.ListRefs(repositoryUrl, username, "fake-token", false, false) + _, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, false, false) assert.Error(t, err) assert.Equal(t, 1, service.repoRefCache.Len()) - _, err = service.ListRefs(repositoryUrl, username, "fake-token", true, false) + _, err = service.ListRefs(repositoryUrl, username, "fake-token", gittypes.GitCredentialAuthType_Basic, true, false) assert.Error(t, err) assert.Equal(t, 1, service.repoRefCache.Len()) // The relevant file caches should be removed too @@ -344,12 +431,72 @@ func TestService_HardRefresh_ListFiles_GitHub(t *testing.T) { accessToken := getRequiredValue(t, "GITHUB_PAT") username := getRequiredValue(t, "GITHUB_USERNAME") repositoryUrl := privateGitRepoURL - files, err := service.ListFiles(repositoryUrl, "refs/heads/main", username, accessToken, false, false, []string{}, false) + files, err := service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{}, + false, + ) assert.NoError(t, err) assert.GreaterOrEqual(t, len(files), 1) assert.Equal(t, 1, service.repoFileCache.Len()) - _, err = service.ListFiles(repositoryUrl, "refs/heads/main", username, "fake-token", false, true, []string{}, false) + _, err = service.ListFiles( + repositoryUrl, + "refs/heads/main", + username, + "fake-token", + gittypes.GitCredentialAuthType_Basic, + false, + true, + []string{}, + false, + ) assert.Error(t, err) assert.Equal(t, 0, service.repoFileCache.Len()) } + +func TestService_CloneRepository_TokenAuth(t *testing.T) { + ensureIntegrationTest(t) + + service := newService(context.TODO(), 2, 0) + var requests []*http.Request + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests = append(requests, r) + })) + accessToken := "test_access_token" + username := "test_username" + repositoryUrl := testServer.URL + + // Since we aren't hitting a real git server we ignore the error + _ = service.CloneRepository( + "test_dir", + repositoryUrl, + "refs/heads/main", + username, + accessToken, + gittypes.GitCredentialAuthType_Token, + false, + ) + + testServer.Close() + + if len(requests) != 1 { + t.Fatalf("expected 1 request sent but got %d", len(requests)) + } + + gotAuthHeader := requests[0].Header.Get("Authorization") + if gotAuthHeader == "" { + t.Fatal("no Authorization header in git request") + } + + expectedAuthHeader := "Bearer test_access_token" + if gotAuthHeader != expectedAuthHeader { + t.Fatalf("expected Authorization header %q but got %q", expectedAuthHeader, gotAuthHeader) + } +} diff --git a/api/git/git_test.go b/api/git/git_test.go index 81efa2688..fc0db196d 100644 --- a/api/git/git_test.go +++ b/api/git/git_test.go @@ -38,7 +38,7 @@ func Test_ClonePublicRepository_Shallow(t *testing.T) { dir := t.TempDir() t.Logf("Cloning into %s", dir) - err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false) + err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false) assert.NoError(t, err) assert.Equal(t, 1, getCommitHistoryLength(t, err, dir), "cloned repo has incorrect depth") } @@ -50,7 +50,7 @@ func Test_ClonePublicRepository_NoGitDirectory(t *testing.T) { dir := t.TempDir() t.Logf("Cloning into %s", dir) - err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", false) + err := service.CloneRepository(dir, repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false) assert.NoError(t, err) assert.NoDirExists(t, filepath.Join(dir, ".git")) } @@ -84,7 +84,7 @@ func Test_latestCommitID(t *testing.T) { repositoryURL := setup(t) referenceName := "refs/heads/main" - id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", false) + id, err := service.LatestCommitID(repositoryURL, referenceName, "", "", gittypes.GitCredentialAuthType_Basic, false) assert.NoError(t, err) assert.Equal(t, "68dcaa7bd452494043c64252ab90db0f98ecf8d2", id) @@ -95,7 +95,7 @@ func Test_ListRefs(t *testing.T) { repositoryURL := setup(t) - fs, err := service.ListRefs(repositoryURL, "", "", false, false) + fs, err := service.ListRefs(repositoryURL, "", "", gittypes.GitCredentialAuthType_Basic, false, false) assert.NoError(t, err) assert.Equal(t, []string{"refs/heads/main"}, fs) @@ -107,7 +107,17 @@ func Test_ListFiles(t *testing.T) { repositoryURL := setup(t) referenceName := "refs/heads/main" - fs, err := service.ListFiles(repositoryURL, referenceName, "", "", false, false, []string{".yml"}, false) + fs, err := service.ListFiles( + repositoryURL, + referenceName, + "", + "", + gittypes.GitCredentialAuthType_Basic, + false, + false, + []string{".yml"}, + false, + ) assert.NoError(t, err) assert.Equal(t, []string{"docker-compose.yml"}, fs) @@ -255,7 +265,7 @@ func Test_listFilesPrivateRepository(t *testing.T) { name: "list tree with real repository and head ref but no credential", args: fetchOption{ baseOption: baseOption{ - repositoryUrl: privateGitRepoURL + "fake", + repositoryUrl: privateGitRepoURL, username: "", password: "", }, diff --git a/api/git/service.go b/api/git/service.go index 3e995eccd..834e0c827 100644 --- a/api/git/service.go +++ b/api/git/service.go @@ -8,6 +8,7 @@ import ( "time" lru "github.com/hashicorp/golang-lru" + gittypes "github.com/portainer/portainer/api/git/types" "github.com/rs/zerolog/log" "golang.org/x/sync/singleflight" ) @@ -22,6 +23,7 @@ type baseOption struct { repositoryUrl string username string password string + authType gittypes.GitCredentialAuthType tlsSkipVerify bool } @@ -123,13 +125,22 @@ func (service *Service) timerHasStopped() bool { // CloneRepository clones a git repository using the specified URL in the specified // destination folder. -func (service *Service) CloneRepository(destination, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error { +func (service *Service) CloneRepository( + destination, + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) error { options := cloneOption{ fetchOption: fetchOption{ baseOption: baseOption{ repositoryUrl: repositoryURL, username: username, password: password, + authType: authType, tlsSkipVerify: tlsSkipVerify, }, referenceName: referenceName, @@ -155,12 +166,20 @@ func (service *Service) cloneRepository(destination string, options cloneOption) } // LatestCommitID returns SHA1 of the latest commit of the specified reference -func (service *Service) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { +func (service *Service) LatestCommitID( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) (string, error) { options := fetchOption{ baseOption: baseOption{ repositoryUrl: repositoryURL, username: username, password: password, + authType: authType, tlsSkipVerify: tlsSkipVerify, }, referenceName: referenceName, @@ -170,7 +189,14 @@ func (service *Service) LatestCommitID(repositoryURL, referenceName, username, p } // ListRefs will list target repository's references without cloning the repository -func (service *Service) ListRefs(repositoryURL, username, password string, hardRefresh bool, tlsSkipVerify bool) ([]string, error) { +func (service *Service) ListRefs( + repositoryURL, + username, + password string, + authType gittypes.GitCredentialAuthType, + hardRefresh bool, + tlsSkipVerify bool, +) ([]string, error) { refCacheKey := generateCacheKey(repositoryURL, username, password, strconv.FormatBool(tlsSkipVerify)) if service.cacheEnabled && hardRefresh { // Should remove the cache explicitly, so that the following normal list can show the correct result @@ -196,6 +222,7 @@ func (service *Service) ListRefs(repositoryURL, username, password string, hardR repositoryUrl: repositoryURL, username: username, password: password, + authType: authType, tlsSkipVerify: tlsSkipVerify, } @@ -215,18 +242,62 @@ var singleflightGroup = &singleflight.Group{} // ListFiles will list all the files of the target repository with specific extensions. // If extension is not provided, it will list all the files under the target repository -func (service *Service) ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includedExts []string, tlsSkipVerify bool) ([]string, error) { - repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly)) +func (service *Service) ListFiles( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + dirOnly, + hardRefresh bool, + includedExts []string, + tlsSkipVerify bool, +) ([]string, error) { + repoKey := generateCacheKey( + repositoryURL, + referenceName, + username, + password, + strconv.FormatBool(tlsSkipVerify), + strconv.Itoa(int(authType)), + strconv.FormatBool(dirOnly), + ) fs, err, _ := singleflightGroup.Do(repoKey, func() (any, error) { - return service.listFiles(repositoryURL, referenceName, username, password, dirOnly, hardRefresh, tlsSkipVerify) + return service.listFiles( + repositoryURL, + referenceName, + username, + password, + authType, + dirOnly, + hardRefresh, + tlsSkipVerify, + ) }) return filterFiles(fs.([]string), includedExts), err } -func (service *Service) listFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, tlsSkipVerify bool) ([]string, error) { - repoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly)) +func (service *Service) listFiles( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + dirOnly, + hardRefresh bool, + tlsSkipVerify bool, +) ([]string, error) { + repoKey := generateCacheKey( + repositoryURL, + referenceName, + username, + password, + strconv.FormatBool(tlsSkipVerify), + strconv.Itoa(int(authType)), + strconv.FormatBool(dirOnly), + ) if service.cacheEnabled && hardRefresh { // Should remove the cache explicitly, so that the following normal list can show the correct result @@ -247,6 +318,7 @@ func (service *Service) listFiles(repositoryURL, referenceName, username, passwo repositoryUrl: repositoryURL, username: username, password: password, + authType: authType, tlsSkipVerify: tlsSkipVerify, }, referenceName: referenceName, diff --git a/api/git/types/types.go b/api/git/types/types.go index 12d95e093..cb9d7cf03 100644 --- a/api/git/types/types.go +++ b/api/git/types/types.go @@ -1,12 +1,21 @@ package gittypes -import "errors" +import ( + "errors" +) var ( ErrIncorrectRepositoryURL = errors.New("git repository could not be found, please ensure that the URL is correct") ErrAuthenticationFailure = errors.New("authentication failed, please ensure that the git credentials are correct") ) +type GitCredentialAuthType int + +const ( + GitCredentialAuthType_Basic GitCredentialAuthType = iota + GitCredentialAuthType_Token +) + // RepoConfig represents a configuration for a repo type RepoConfig struct { // The repo url @@ -24,10 +33,11 @@ type RepoConfig struct { } type GitAuthentication struct { - Username string - Password string + Username string + Password string + AuthorizationType GitCredentialAuthType // Git credentials identifier when the value is not 0 - // When the value is 0, Username and Password are set without using saved credential + // When the value is 0, Username, Password, and Authtype are set without using saved credential // This is introduced since 2.15.0 GitCredentialID int `example:"0"` } diff --git a/api/git/update/update.go b/api/git/update/update.go index 203e361dd..780d6e046 100644 --- a/api/git/update/update.go +++ b/api/git/update/update.go @@ -29,7 +29,14 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g return false, "", errors.WithMessagef(err, "failed to get credentials for %v", objId) } - newHash, err := gitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, username, password, gitConfig.TLSSkipVerify) + newHash, err := gitService.LatestCommitID( + gitConfig.URL, + gitConfig.ReferenceName, + username, + password, + gittypes.GitCredentialAuthType_Basic, + gitConfig.TLSSkipVerify, + ) if err != nil { return false, "", errors.WithMessagef(err, "failed to fetch latest commit id of %v", objId) } @@ -62,6 +69,7 @@ func UpdateGitObject(gitService portainer.GitService, objId string, gitConfig *g cloneParams.auth = &gitAuth{ username: username, password: password, + authType: gitConfig.Authentication.AuthorizationType, } } @@ -89,14 +97,31 @@ type cloneRepositoryParameters struct { } type gitAuth struct { + authType gittypes.GitCredentialAuthType username string password string } func cloneGitRepository(gitService portainer.GitService, cloneParams *cloneRepositoryParameters) error { if cloneParams.auth != nil { - return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, cloneParams.auth.username, cloneParams.auth.password, cloneParams.tlsSkipVerify) + return gitService.CloneRepository( + cloneParams.toDir, + cloneParams.url, + cloneParams.ref, + cloneParams.auth.username, + cloneParams.auth.password, + cloneParams.auth.authType, + cloneParams.tlsSkipVerify, + ) } - return gitService.CloneRepository(cloneParams.toDir, cloneParams.url, cloneParams.ref, "", "", cloneParams.tlsSkipVerify) + return gitService.CloneRepository( + cloneParams.toDir, + cloneParams.url, + cloneParams.ref, + "", + "", + gittypes.GitCredentialAuthType_Basic, + cloneParams.tlsSkipVerify, + ) } diff --git a/api/git/update/validate.go b/api/git/update/validate.go index c1b7364ce..66805895d 100644 --- a/api/git/update/validate.go +++ b/api/git/update/validate.go @@ -3,9 +3,9 @@ package update import ( "time" - "github.com/asaskevich/govalidator" portainer "github.com/portainer/portainer/api" httperrors "github.com/portainer/portainer/api/http/errors" + "github.com/portainer/portainer/pkg/validate" ) func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error { @@ -17,7 +17,7 @@ func ValidateAutoUpdateSettings(autoUpdate *portainer.AutoUpdateSettings) error return httperrors.NewInvalidPayloadError("Webhook or Interval must be provided") } - if autoUpdate.Webhook != "" && !govalidator.IsUUID(autoUpdate.Webhook) { + if autoUpdate.Webhook != "" && !validate.IsUUID(autoUpdate.Webhook) { return httperrors.NewInvalidPayloadError("invalid Webhook format") } diff --git a/api/git/validate.go b/api/git/validate.go index 8fc04abf6..1304494dd 100644 --- a/api/git/validate.go +++ b/api/git/validate.go @@ -1,19 +1,17 @@ package git import ( - "github.com/asaskevich/govalidator" - gittypes "github.com/portainer/portainer/api/git/types" httperrors "github.com/portainer/portainer/api/http/errors" + "github.com/portainer/portainer/pkg/validate" ) func ValidateRepoConfig(repoConfig *gittypes.RepoConfig) error { - if len(repoConfig.URL) == 0 || !govalidator.IsURL(repoConfig.URL) { + if len(repoConfig.URL) == 0 || !validate.IsURL(repoConfig.URL) { return httperrors.NewInvalidPayloadError("Invalid repository URL. Must correspond to a valid URL format") } return ValidateRepoAuthentication(repoConfig.Authentication) - } func ValidateRepoAuthentication(auth *gittypes.GitAuthentication) error { diff --git a/api/hostmanagement/openamt/openamt.go b/api/hostmanagement/openamt/openamt.go index b27b78878..4b5f51186 100644 --- a/api/hostmanagement/openamt/openamt.go +++ b/api/hostmanagement/openamt/openamt.go @@ -32,15 +32,12 @@ type Service struct { } // NewService initializes a new service. -func NewService() *Service { - tlsConfig := crypto.CreateTLSConfiguration() - tlsConfig.InsecureSkipVerify = true - +func NewService(insecureSkipVerify bool) *Service { return &Service{ httpsClient: &http.Client{ Timeout: httpClientTimeout, Transport: &http.Transport{ - TLSClientConfig: tlsConfig, + TLSClientConfig: crypto.CreateTLSConfiguration(insecureSkipVerify), }, }, } diff --git a/api/hostmanagement/openamt/openamt_test.go b/api/hostmanagement/openamt/openamt_test.go new file mode 100644 index 000000000..a3bfad49d --- /dev/null +++ b/api/hostmanagement/openamt/openamt_test.go @@ -0,0 +1,14 @@ +package openamt + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewService(t *testing.T) { + service := NewService(true) + require.NotNil(t, service) + require.True(t, service.httpsClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) //nolint:forbidigo +} diff --git a/api/http/client/client.go b/api/http/client/client.go index 2a7d0bf9b..ba253162f 100644 --- a/api/http/client/client.go +++ b/api/http/client/client.go @@ -1,7 +1,6 @@ package client import ( - "crypto/tls" "errors" "fmt" "io" @@ -11,6 +10,7 @@ import ( "time" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/crypto" "github.com/rs/zerolog/log" "github.com/segmentio/encoding/json" @@ -105,21 +105,28 @@ func Get(url string, timeout int) ([]byte, error) { // ExecutePingOperation will send a SystemPing operation HTTP request to a Docker environment(endpoint) // using the specified host and optional TLS configuration. // It uses a new Http.Client for each operation. -func ExecutePingOperation(host string, tlsConfig *tls.Config) (bool, error) { +func ExecutePingOperation(host string, tlsConfiguration portainer.TLSConfiguration) (bool, error) { transport := &http.Transport{} scheme := "http" - if tlsConfig != nil { + + if tlsConfiguration.TLS { + tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration) + if err != nil { + return false, err + } + transport.TLSClientConfig = tlsConfig scheme = "https" } client := &http.Client{ - Timeout: time.Second * 3, + Timeout: 3 * time.Second, Transport: transport, } target := strings.Replace(host, "tcp://", scheme+"://", 1) + return pingOperation(client, target) } diff --git a/api/http/client/client_test.go b/api/http/client/client_test.go new file mode 100644 index 000000000..d7ffe99ff --- /dev/null +++ b/api/http/client/client_test.go @@ -0,0 +1,31 @@ +package client + +import ( + "testing" + + portainer "github.com/portainer/portainer/api" + + "github.com/stretchr/testify/require" +) + +func TestExecutePingOperationFailure(t *testing.T) { + host := "http://localhost:1" + config := portainer.TLSConfiguration{ + TLS: true, + TLSSkipVerify: true, + } + + // Invalid host + ok, err := ExecutePingOperation(host, config) + require.False(t, ok) + require.Error(t, err) + + // Invalid TLS configuration + config.TLSCertPath = "/invalid/path/to/cert" + config.TLSKeyPath = "/invalid/path/to/key" + + ok, err = ExecutePingOperation(host, config) + require.False(t, ok) + require.Error(t, err) + +} diff --git a/api/http/csrf/csrf.go b/api/http/csrf/csrf.go index 857d72c8b..6205c9290 100644 --- a/api/http/csrf/csrf.go +++ b/api/http/csrf/csrf.go @@ -2,6 +2,7 @@ package csrf import ( "crypto/rand" + "errors" "fmt" "net/http" "os" @@ -9,7 +10,8 @@ import ( "github.com/portainer/portainer/api/http/security" httperror "github.com/portainer/portainer/pkg/libhttp/error" - gorillacsrf "github.com/gorilla/csrf" + gcsrf "github.com/gorilla/csrf" + "github.com/rs/zerolog/log" "github.com/urfave/negroni" ) @@ -19,7 +21,7 @@ func SkipCSRFToken(w http.ResponseWriter) { w.Header().Set(csrfSkipHeader, "1") } -func WithProtect(handler http.Handler) (http.Handler, error) { +func WithProtect(handler http.Handler, trustedOrigins []string) (http.Handler, error) { // IsDockerDesktopExtension is used to check if we should skip csrf checks in the request bouncer (ShouldSkipCSRFCheck) // DOCKER_EXTENSION is set to '1' in build/docker-extension/docker-compose.yml isDockerDesktopExtension := false @@ -34,10 +36,12 @@ func WithProtect(handler http.Handler) (http.Handler, error) { return nil, fmt.Errorf("failed to generate CSRF token: %w", err) } - handler = gorillacsrf.Protect( + handler = gcsrf.Protect( token, - gorillacsrf.Path("/"), - gorillacsrf.Secure(false), + gcsrf.Path("/"), + gcsrf.Secure(false), + gcsrf.TrustedOrigins(trustedOrigins), + gcsrf.ErrorHandler(withErrorHandler(trustedOrigins)), )(handler) return withSkipCSRF(handler, isDockerDesktopExtension), nil @@ -55,7 +59,7 @@ func withSendCSRFToken(handler http.Handler) http.Handler { } if statusCode := sw.Status(); statusCode >= 200 && statusCode < 300 { - sw.Header().Set("X-CSRF-Token", gorillacsrf.Token(r)) + sw.Header().Set("X-CSRF-Token", gcsrf.Token(r)) } }) @@ -73,9 +77,33 @@ func withSkipCSRF(handler http.Handler, isDockerDesktopExtension bool) http.Hand } if skip { - r = gorillacsrf.UnsafeSkipCheck(r) + r = gcsrf.UnsafeSkipCheck(r) } handler.ServeHTTP(w, r) }) } + +func withErrorHandler(trustedOrigins []string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + err := gcsrf.FailureReason(r) + + if errors.Is(err, gcsrf.ErrBadOrigin) || errors.Is(err, gcsrf.ErrBadReferer) || errors.Is(err, gcsrf.ErrNoReferer) { + log.Error().Err(err). + Str("request_url", r.URL.String()). + Str("host", r.Host). + Str("x_forwarded_proto", r.Header.Get("X-Forwarded-Proto")). + Str("forwarded", r.Header.Get("Forwarded")). + Str("origin", r.Header.Get("Origin")). + Str("referer", r.Header.Get("Referer")). + Strs("trusted_origins", trustedOrigins). + Msg("Failed to validate Origin or Referer") + } + + http.Error( + w, + http.StatusText(http.StatusForbidden)+" - "+err.Error(), + http.StatusForbidden, + ) + }) +} diff --git a/api/http/handler/auth/authenticate.go b/api/http/handler/auth/authenticate.go index 989949daa..4df31c92c 100644 --- a/api/http/handler/auth/authenticate.go +++ b/api/http/handler/auth/authenticate.go @@ -2,6 +2,7 @@ package auth import ( "net/http" + "strconv" "strings" portainer "github.com/portainer/portainer/api" @@ -82,6 +83,11 @@ func (handler *Handler) authenticate(rw http.ResponseWriter, r *http.Request) *h } } + // Clear any existing user caches + if user != nil { + handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(user.ID))) + } + if user != nil && isUserInitialAdmin(user) || settings.AuthenticationMethod == portainer.AuthenticationInternal { return handler.authenticateInternal(rw, user, payload.Password) } diff --git a/api/http/handler/auth/handler.go b/api/http/handler/auth/handler.go index 3b7210fbf..035ceabf8 100644 --- a/api/http/handler/auth/handler.go +++ b/api/http/handler/auth/handler.go @@ -8,6 +8,7 @@ import ( "github.com/portainer/portainer/api/http/proxy" "github.com/portainer/portainer/api/http/proxy/factory/kubernetes" "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/kubernetes/cli" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/gorilla/mux" @@ -23,16 +24,18 @@ type Handler struct { OAuthService portainer.OAuthService ProxyManager *proxy.Manager KubernetesTokenCacheManager *kubernetes.TokenCacheManager + KubernetesClientFactory *cli.ClientFactory passwordStrengthChecker security.PasswordStrengthChecker bouncer security.BouncerService } // NewHandler creates a handler to manage authentication operations. -func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker) *Handler { +func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimiter, passwordStrengthChecker security.PasswordStrengthChecker, kubernetesClientFactory *cli.ClientFactory) *Handler { h := &Handler{ Router: mux.NewRouter(), passwordStrengthChecker: passwordStrengthChecker, bouncer: bouncer, + KubernetesClientFactory: kubernetesClientFactory, } h.Handle("/auth/oauth/validate", diff --git a/api/http/handler/auth/logout.go b/api/http/handler/auth/logout.go index 977fafa69..73288565d 100644 --- a/api/http/handler/auth/logout.go +++ b/api/http/handler/auth/logout.go @@ -2,6 +2,7 @@ package auth import ( "net/http" + "strconv" "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/logoutcontext" @@ -23,6 +24,7 @@ func (handler *Handler) logout(w http.ResponseWriter, r *http.Request) *httperro if tokenData != nil { handler.KubernetesTokenCacheManager.RemoveUserFromCache(tokenData.ID) + handler.KubernetesClientFactory.ClearUserClientCache(strconv.Itoa(int(tokenData.ID))) logoutcontext.Cancel(tokenData.Token) } diff --git a/api/http/handler/backup/backup_test.go b/api/http/handler/backup/backup_test.go index 51fdf3e95..2755e1fd8 100644 --- a/api/http/handler/backup/backup_test.go +++ b/api/http/handler/backup/backup_test.go @@ -18,10 +18,15 @@ import ( "github.com/portainer/portainer/api/crypto" "github.com/portainer/portainer/api/http/offlinegate" "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/pkg/fips" "github.com/stretchr/testify/assert" ) +func init() { + fips.InitFIPS(false) +} + func listFiles(dir string) []string { items := make([]string, 0) filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { diff --git a/api/http/handler/customtemplates/customtemplate_create.go b/api/http/handler/customtemplates/customtemplate_create.go index a5b6ecdee..104ea90a4 100644 --- a/api/http/handler/customtemplates/customtemplate_create.go +++ b/api/http/handler/customtemplates/customtemplate_create.go @@ -16,8 +16,8 @@ import ( httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/rs/zerolog/log" "github.com/segmentio/encoding/json" ) @@ -228,7 +228,7 @@ func (payload *customTemplateFromGitRepositoryPayload) Validate(r *http.Request) if len(payload.Description) == 0 { return errors.New("Invalid custom template description") } - if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) { + if len(payload.RepositoryURL) == 0 || !validate.IsURL(payload.RepositoryURL) { return errors.New("Invalid repository URL. Must correspond to a valid URL format") } if payload.RepositoryAuthentication && (len(payload.RepositoryUsername) == 0 || len(payload.RepositoryPassword) == 0) { @@ -482,28 +482,3 @@ func (handler *Handler) createCustomTemplateFromFileUpload(r *http.Request) (*po return customTemplate, nil } - -// @id CustomTemplateCreate -// @summary Create a custom template -// @description Create a custom template. -// @description **Access policy**: authenticated -// @tags custom_templates -// @security ApiKeyAuth -// @security jwt -// @accept json,multipart/form-data -// @produce json -// @param method query string true "method for creating template" Enums(string, file, repository) -// @param body body object true "for body documentation see the relevant /custom_templates/{method} endpoint" -// @success 200 {object} portainer.CustomTemplate -// @failure 400 "Invalid request" -// @failure 500 "Server error" -// @deprecated -// @router /custom_templates [post] -func deprecatedCustomTemplateCreateUrlParser(w http.ResponseWriter, r *http.Request) (string, *httperror.HandlerError) { - method, err := request.RetrieveQueryParameter(r, "method", false) - if err != nil { - return "", httperror.BadRequest("Invalid query parameter: method", err) - } - - return "/custom_templates/create/" + method, nil -} diff --git a/api/http/handler/customtemplates/customtemplate_git_fetch_test.go b/api/http/handler/customtemplates/customtemplate_git_fetch_test.go index 60ed1666f..6cb614449 100644 --- a/api/http/handler/customtemplates/customtemplate_git_fetch_test.go +++ b/api/http/handler/customtemplates/customtemplate_git_fetch_test.go @@ -20,12 +20,17 @@ import ( "github.com/portainer/portainer/api/internal/authorization" "github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/jwt" + "github.com/portainer/portainer/pkg/fips" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/segmentio/encoding/json" "github.com/stretchr/testify/assert" ) +func init() { + fips.InitFIPS(false) +} + var testFileContent = "abcdefg" type TestGitService struct { @@ -33,13 +38,28 @@ type TestGitService struct { targetFilePath string } -func (g *TestGitService) CloneRepository(destination string, repositoryURL, referenceName string, username, password string, tlsSkipVerify bool) error { +func (g *TestGitService) CloneRepository( + destination string, + repositoryURL, + referenceName string, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) error { time.Sleep(100 * time.Millisecond) return createTestFile(g.targetFilePath) } -func (g *TestGitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { +func (g *TestGitService) LatestCommitID( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) (string, error) { return "", nil } @@ -56,11 +76,26 @@ type InvalidTestGitService struct { targetFilePath string } -func (g *InvalidTestGitService) CloneRepository(dest, repoUrl, refName, username, password string, tlsSkipVerify bool) error { +func (g *InvalidTestGitService) CloneRepository( + dest, + repoUrl, + refName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) error { return errors.New("simulate network error") } -func (g *InvalidTestGitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { +func (g *InvalidTestGitService) LatestCommitID( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) (string, error) { return "", nil } diff --git a/api/http/handler/customtemplates/customtemplate_list.go b/api/http/handler/customtemplates/customtemplate_list.go index 581b219ae..c96d61523 100644 --- a/api/http/handler/customtemplates/customtemplate_list.go +++ b/api/http/handler/customtemplates/customtemplate_list.go @@ -71,7 +71,7 @@ func (handler *Handler) customTemplateList(w http.ResponseWriter, r *http.Reques customTemplates = filterByType(customTemplates, templateTypes) if edge != nil { - customTemplates = slicesx.Filter(customTemplates, func(customTemplate portainer.CustomTemplate) bool { + customTemplates = slicesx.FilterInPlace(customTemplates, func(customTemplate portainer.CustomTemplate) bool { return customTemplate.EdgeTemplate == *edge }) } diff --git a/api/http/handler/customtemplates/customtemplate_update.go b/api/http/handler/customtemplates/customtemplate_update.go index 80c42b6fa..f12eeb2e1 100644 --- a/api/http/handler/customtemplates/customtemplate_update.go +++ b/api/http/handler/customtemplates/customtemplate_update.go @@ -15,8 +15,7 @@ import ( httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/asaskevich/govalidator" + "github.com/portainer/portainer/pkg/validate" ) type customTemplateUpdatePayload struct { @@ -38,14 +37,16 @@ type customTemplateUpdatePayload struct { RepositoryURL string `example:"https://github.com/openfaas/faas" validate:"required"` // Reference name of a Git repository hosting the Stack file RepositoryReferenceName string `example:"refs/heads/master"` - // Use basic authentication to clone the Git repository + // Use authentication to clone the Git repository RepositoryAuthentication bool `example:"true"` // Username used in basic authentication. Required when RepositoryAuthentication is true - // and RepositoryGitCredentialID is 0 + // and RepositoryGitCredentialID is 0. Ignored if RepositoryAuthType is token RepositoryUsername string `example:"myGitUsername"` - // Password used in basic authentication. Required when RepositoryAuthentication is true - // and RepositoryGitCredentialID is 0 + // Password used in basic authentication or token used in token authentication. + // Required when RepositoryAuthentication is true and RepositoryGitCredentialID is 0 RepositoryPassword string `example:"myGitPassword"` + // RepositoryAuthorizationType is the authorization type to use + RepositoryAuthorizationType gittypes.GitCredentialAuthType `example:"0"` // GitCredentialID used to identify the bound git credential. Required when RepositoryAuthentication // is true and RepositoryUsername/RepositoryPassword are not provided RepositoryGitCredentialID int `example:"0"` @@ -170,7 +171,7 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ customTemplate.EdgeTemplate = payload.EdgeTemplate if payload.RepositoryURL != "" { - if !govalidator.IsURL(payload.RepositoryURL) { + if !validate.IsURL(payload.RepositoryURL) { return httperror.BadRequest("Invalid repository URL. Must correspond to a valid URL format", err) } @@ -183,12 +184,15 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ repositoryUsername := "" repositoryPassword := "" + repositoryAuthType := gittypes.GitCredentialAuthType_Basic if payload.RepositoryAuthentication { repositoryUsername = payload.RepositoryUsername repositoryPassword = payload.RepositoryPassword + repositoryAuthType = payload.RepositoryAuthorizationType gitConfig.Authentication = &gittypes.GitAuthentication{ - Username: payload.RepositoryUsername, - Password: payload.RepositoryPassword, + Username: payload.RepositoryUsername, + Password: payload.RepositoryPassword, + AuthorizationType: payload.RepositoryAuthorizationType, } } @@ -198,6 +202,7 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ ReferenceName: gitConfig.ReferenceName, Username: repositoryUsername, Password: repositoryPassword, + AuthType: repositoryAuthType, TLSSkipVerify: gitConfig.TLSSkipVerify, }) if err != nil { @@ -206,7 +211,14 @@ func (handler *Handler) customTemplateUpdate(w http.ResponseWriter, r *http.Requ defer cleanBackup() - commitHash, err := handler.GitService.LatestCommitID(gitConfig.URL, gitConfig.ReferenceName, repositoryUsername, repositoryPassword, gitConfig.TLSSkipVerify) + commitHash, err := handler.GitService.LatestCommitID( + gitConfig.URL, + gitConfig.ReferenceName, + repositoryUsername, + repositoryPassword, + repositoryAuthType, + gitConfig.TLSSkipVerify, + ) if err != nil { return httperror.InternalServerError("Unable get latest commit id", fmt.Errorf("failed to fetch latest commit id of the template %v: %w", customTemplate.ID, err)) } diff --git a/api/http/handler/customtemplates/handler.go b/api/http/handler/customtemplates/handler.go index 1bb148af6..0da63d81f 100644 --- a/api/http/handler/customtemplates/handler.go +++ b/api/http/handler/customtemplates/handler.go @@ -7,7 +7,6 @@ import ( "github.com/gorilla/mux" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" - "github.com/portainer/portainer/api/http/middlewares" "github.com/portainer/portainer/api/http/security" httperror "github.com/portainer/portainer/pkg/libhttp/error" ) @@ -33,7 +32,6 @@ func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStor h.Handle("/custom_templates/create/{method}", bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.customTemplateCreate))).Methods(http.MethodPost) - h.Handle("/custom_templates", middlewares.Deprecated(h, deprecatedCustomTemplateCreateUrlParser)).Methods(http.MethodPost) // Deprecated h.Handle("/custom_templates", bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.customTemplateList))).Methods(http.MethodGet) h.Handle("/custom_templates/{id}", diff --git a/api/http/handler/docker/dashboard.go b/api/http/handler/docker/dashboard.go index ad0399569..97d40e069 100644 --- a/api/http/handler/docker/dashboard.go +++ b/api/http/handler/docker/dashboard.go @@ -6,6 +6,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" portainer "github.com/portainer/portainer/api" @@ -116,12 +117,12 @@ func (h *Handler) dashboard(w http.ResponseWriter, r *http.Request) *httperror.H return err } - networks, err := cli.NetworkList(r.Context(), types.NetworkListOptions{}) + networks, err := cli.NetworkList(r.Context(), network.ListOptions{}) if err != nil { return httperror.InternalServerError("Unable to retrieve Docker networks", err) } - networks, err = utils.FilterByResourceControl(tx, networks, portainer.NetworkResourceControl, context, func(c types.NetworkResource) string { + networks, err = utils.FilterByResourceControl(tx, networks, portainer.NetworkResourceControl, context, func(c network.Summary) string { return c.Name }) if err != nil { diff --git a/api/http/handler/docker/images/images_list.go b/api/http/handler/docker/images/images_list.go index ac7e980a0..eca99d993 100644 --- a/api/http/handler/docker/images/images_list.go +++ b/api/http/handler/docker/images/images_list.go @@ -1,10 +1,11 @@ package images import ( + "context" + "fmt" "net/http" "strings" - "github.com/portainer/portainer/api/docker/client" "github.com/portainer/portainer/api/http/handler/docker/utils" "github.com/portainer/portainer/api/set" httperror "github.com/portainer/portainer/pkg/libhttp/error" @@ -46,17 +47,16 @@ func (handler *Handler) imagesList(w http.ResponseWriter, r *http.Request) *http return httpErr } - images, err := cli.ImageList(r.Context(), image.ListOptions{}) + nodeNames := make(map[string]string) + + // Pass the node names map to the context so the custom NodeNameTransport can use it + ctx := context.WithValue(r.Context(), "nodeNames", nodeNames) + + images, err := cli.ImageList(ctx, image.ListOptions{}) if err != nil { return httperror.InternalServerError("Unable to retrieve Docker images", err) } - // Extract the node name from the custom transport - nodeNames := make(map[string]string) - if t, ok := cli.HTTPClient().Transport.(*client.NodeNameTransport); ok { - nodeNames = t.NodeNames() - } - withUsage, err := request.RetrieveBooleanQueryParameter(r, "withUsage", true) if err != nil { return httperror.BadRequest("Invalid query parameter: withUsage", err) @@ -85,8 +85,12 @@ func (handler *Handler) imagesList(w http.ResponseWriter, r *http.Request) *http } imagesList[i] = ImageResponse{ - Created: image.Created, - NodeName: nodeNames[image.ID], + Created: image.Created, + // Only works if the order of `images` is not changed between unmarshaling the agent's response + // in NodeNameTransport.RoundTrip() (api/docker/client/client.go) + // and docker's cli.ImageList() + // As both functions unmarshal the same response body, the resulting array will be ordered the same way. + NodeName: nodeNames[fmt.Sprintf("%s-%d", image.ID, i)], ID: image.ID, Size: image.Size, Tags: image.RepoTags, diff --git a/api/http/handler/edgegroups/associated_endpoints.go b/api/http/handler/edgegroups/associated_endpoints.go index d03618c56..b26e94d0c 100644 --- a/api/http/handler/edgegroups/associated_endpoints.go +++ b/api/http/handler/edgegroups/associated_endpoints.go @@ -4,6 +4,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/internal/endpointutils" + "github.com/portainer/portainer/api/roar" ) type endpointSetType map[portainer.EndpointID]bool @@ -49,22 +50,29 @@ func GetEndpointsByTags(tx dataservices.DataStoreTx, tagIDs []portainer.TagID, p return results, nil } -func getTrustedEndpoints(tx dataservices.DataStoreTx, endpointIDs []portainer.EndpointID) ([]portainer.EndpointID, error) { +func getTrustedEndpoints(tx dataservices.DataStoreTx, endpointIDs roar.Roar[portainer.EndpointID]) ([]portainer.EndpointID, error) { + var innerErr error + results := []portainer.EndpointID{} - for _, endpointID := range endpointIDs { + + endpointIDs.Iterate(func(endpointID portainer.EndpointID) bool { endpoint, err := tx.Endpoint().Endpoint(endpointID) if err != nil { - return nil, err + innerErr = err + + return false } if !endpoint.UserTrusted { - continue + return true } results = append(results, endpoint.ID) - } - return results, nil + return true + }) + + return results, innerErr } func mapEndpointGroupToEndpoints(endpoints []portainer.Endpoint) map[portainer.EndpointGroupID]endpointSetType { diff --git a/api/http/handler/edgegroups/edgegroup_create.go b/api/http/handler/edgegroups/edgegroup_create.go index 3988160f0..c074bffde 100644 --- a/api/http/handler/edgegroups/edgegroup_create.go +++ b/api/http/handler/edgegroups/edgegroup_create.go @@ -7,6 +7,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/internal/endpointutils" + "github.com/portainer/portainer/api/roar" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" ) @@ -52,6 +53,7 @@ func calculateEndpointsOrTags(tx dataservices.DataStoreTx, edgeGroup *portainer. } edgeGroup.Endpoints = endpointIDs + edgeGroup.EndpointIDs = roar.FromSlice(endpointIDs) return nil } @@ -94,6 +96,7 @@ func (handler *Handler) edgeGroupCreate(w http.ResponseWriter, r *http.Request) Dynamic: payload.Dynamic, TagIDs: []portainer.TagID{}, Endpoints: []portainer.EndpointID{}, + EndpointIDs: roar.Roar[portainer.EndpointID]{}, PartialMatch: payload.PartialMatch, } @@ -108,5 +111,5 @@ func (handler *Handler) edgeGroupCreate(w http.ResponseWriter, r *http.Request) return nil }) - return txResponse(w, edgeGroup, err) + return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err) } diff --git a/api/http/handler/edgegroups/edgegroup_create_test.go b/api/http/handler/edgegroups/edgegroup_create_test.go new file mode 100644 index 000000000..e7710432f --- /dev/null +++ b/api/http/handler/edgegroups/edgegroup_create_test.go @@ -0,0 +1,62 @@ +package edgegroups + +import ( + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/internal/testhelpers" + + "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/require" +) + +func TestEdgeGroupCreateHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + err := store.EndpointGroup().Create(&portainer.EndpointGroup{ + ID: 1, + Name: "Test Group", + }) + require.NoError(t, err) + + for i := range 3 { + err = store.Endpoint().Create(&portainer.Endpoint{ + ID: portainer.EndpointID(i + 1), + Name: "Test Endpoint " + strconv.Itoa(i+1), + Type: portainer.EdgeAgentOnDockerEnvironment, + GroupID: 1, + }) + require.NoError(t, err) + + err = store.EndpointRelation().Create(&portainer.EndpointRelation{ + EndpointID: portainer.EndpointID(i + 1), + EdgeStacks: map[portainer.EdgeStackID]bool{}, + }) + require.NoError(t, err) + } + + rr := httptest.NewRecorder() + + req := httptest.NewRequest( + http.MethodPost, + "/edge_groups", + strings.NewReader(`{"Name": "New Edge Group", "Endpoints": [1, 2, 3]}`), + ) + + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var responseGroup portainer.EdgeGroup + err = json.NewDecoder(rr.Body).Decode(&responseGroup) + require.NoError(t, err) + + require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints) +} diff --git a/api/http/handler/edgegroups/edgegroup_inspect.go b/api/http/handler/edgegroups/edgegroup_inspect.go index c17ac6b7c..76780ec1d 100644 --- a/api/http/handler/edgegroups/edgegroup_inspect.go +++ b/api/http/handler/edgegroups/edgegroup_inspect.go @@ -5,6 +5,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" + "github.com/portainer/portainer/api/roar" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" ) @@ -33,7 +34,9 @@ func (handler *Handler) edgeGroupInspect(w http.ResponseWriter, r *http.Request) return err }) - return txResponse(w, edgeGroup, err) + edgeGroup.Endpoints = edgeGroup.EndpointIDs.ToSlice() + + return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err) } func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*portainer.EdgeGroup, error) { @@ -50,7 +53,7 @@ func getEdgeGroup(tx dataservices.DataStoreTx, ID portainer.EdgeGroupID) (*porta return nil, httperror.InternalServerError("Unable to retrieve environments and environment groups for Edge group", err) } - edgeGroup.Endpoints = endpoints + edgeGroup.EndpointIDs = roar.FromSlice(endpoints) } return edgeGroup, err diff --git a/api/http/handler/edgegroups/edgegroup_inspect_test.go b/api/http/handler/edgegroups/edgegroup_inspect_test.go new file mode 100644 index 000000000..5af282372 --- /dev/null +++ b/api/http/handler/edgegroups/edgegroup_inspect_test.go @@ -0,0 +1,176 @@ +package edgegroups + +import ( + "net/http" + "net/http/httptest" + "strconv" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" + + "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEdgeGroupInspectHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + err := store.EndpointGroup().Create(&portainer.EndpointGroup{ + ID: 1, + Name: "Test Group", + }) + require.NoError(t, err) + + for i := range 3 { + err = store.Endpoint().Create(&portainer.Endpoint{ + ID: portainer.EndpointID(i + 1), + Name: "Test Endpoint " + strconv.Itoa(i+1), + Type: portainer.EdgeAgentOnDockerEnvironment, + GroupID: 1, + }) + require.NoError(t, err) + + err = store.EndpointRelation().Create(&portainer.EndpointRelation{ + EndpointID: portainer.EndpointID(i + 1), + EdgeStacks: map[portainer.EdgeStackID]bool{}, + }) + require.NoError(t, err) + } + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Test Edge Group", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{1, 2, 3}), + }) + require.NoError(t, err) + + rr := httptest.NewRecorder() + + req := httptest.NewRequest( + http.MethodGet, + "/edge_groups/1", + nil, + ) + + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var responseGroup portainer.EdgeGroup + err = json.NewDecoder(rr.Body).Decode(&responseGroup) + require.NoError(t, err) + + assert.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints) +} + +func TestEmptyEdgeGroupInspectHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + err := store.EndpointGroup().Create(&portainer.EndpointGroup{ + ID: 1, + Name: "Test Group", + }) + require.NoError(t, err) + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Test Edge Group", + EndpointIDs: roar.Roar[portainer.EndpointID]{}, + }) + require.NoError(t, err) + + rr := httptest.NewRecorder() + + req := httptest.NewRequest( + http.MethodGet, + "/edge_groups/1", + nil, + ) + + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var responseGroup portainer.EdgeGroup + err = json.NewDecoder(rr.Body).Decode(&responseGroup) + require.NoError(t, err) + + // Make sure the frontend does not get a null value but a [] instead + require.NotNil(t, responseGroup.Endpoints) + require.Len(t, responseGroup.Endpoints, 0) +} + +func TestDynamicEdgeGroupInspectHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + err := store.EndpointGroup().Create(&portainer.EndpointGroup{ + ID: 1, + Name: "Test Group", + }) + require.NoError(t, err) + + err = store.Tag().Create(&portainer.Tag{ + ID: 1, + Name: "Test Tag", + Endpoints: map[portainer.EndpointID]bool{ + 1: true, + 2: true, + 3: true, + }, + }) + require.NoError(t, err) + + for i := range 3 { + err = store.Endpoint().Create(&portainer.Endpoint{ + ID: portainer.EndpointID(i + 1), + Name: "Test Endpoint " + strconv.Itoa(i+1), + Type: portainer.EdgeAgentOnDockerEnvironment, + GroupID: 1, + TagIDs: []portainer.TagID{1}, + UserTrusted: true, + }) + require.NoError(t, err) + + err = store.EndpointRelation().Create(&portainer.EndpointRelation{ + EndpointID: portainer.EndpointID(i + 1), + EdgeStacks: map[portainer.EdgeStackID]bool{}, + }) + require.NoError(t, err) + } + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Test Edge Group", + Dynamic: true, + TagIDs: []portainer.TagID{1}, + }) + require.NoError(t, err) + + rr := httptest.NewRecorder() + + req := httptest.NewRequest( + http.MethodGet, + "/edge_groups/1", + nil, + ) + + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var responseGroup portainer.EdgeGroup + err = json.NewDecoder(rr.Body).Decode(&responseGroup) + require.NoError(t, err) + + require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints) +} diff --git a/api/http/handler/edgegroups/edgegroup_list.go b/api/http/handler/edgegroups/edgegroup_list.go index bc67176fd..87de867eb 100644 --- a/api/http/handler/edgegroups/edgegroup_list.go +++ b/api/http/handler/edgegroups/edgegroup_list.go @@ -7,11 +7,17 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" + "github.com/portainer/portainer/api/roar" httperror "github.com/portainer/portainer/pkg/libhttp/error" ) -type decoratedEdgeGroup struct { +type shadowedEdgeGroup struct { portainer.EdgeGroup + EndpointIds int `json:"EndpointIds,omitempty"` // Shadow to avoid exposing in the API +} + +type decoratedEdgeGroup struct { + shadowedEdgeGroup HasEdgeStack bool `json:"HasEdgeStack"` HasEdgeJob bool `json:"HasEdgeJob"` EndpointTypes []portainer.EndpointType @@ -76,8 +82,8 @@ func getEdgeGroupList(tx dataservices.DataStoreTx) ([]decoratedEdgeGroup, error) } edgeGroup := decoratedEdgeGroup{ - EdgeGroup: orgEdgeGroup, - EndpointTypes: []portainer.EndpointType{}, + shadowedEdgeGroup: shadowedEdgeGroup{EdgeGroup: orgEdgeGroup}, + EndpointTypes: []portainer.EndpointType{}, } if edgeGroup.Dynamic { endpointIDs, err := GetEndpointsByTags(tx, edgeGroup.TagIDs, edgeGroup.PartialMatch) @@ -88,15 +94,16 @@ func getEdgeGroupList(tx dataservices.DataStoreTx) ([]decoratedEdgeGroup, error) edgeGroup.Endpoints = endpointIDs edgeGroup.TrustedEndpoints = endpointIDs } else { - trustedEndpoints, err := getTrustedEndpoints(tx, edgeGroup.Endpoints) + trustedEndpoints, err := getTrustedEndpoints(tx, edgeGroup.EndpointIDs) if err != nil { return nil, httperror.InternalServerError("Unable to retrieve environments for Edge group", err) } + edgeGroup.Endpoints = edgeGroup.EndpointIDs.ToSlice() edgeGroup.TrustedEndpoints = trustedEndpoints } - endpointTypes, err := getEndpointTypes(tx, edgeGroup.Endpoints) + endpointTypes, err := getEndpointTypes(tx, edgeGroup.EndpointIDs) if err != nil { return nil, httperror.InternalServerError("Unable to retrieve environment types for Edge group", err) } @@ -111,15 +118,26 @@ func getEdgeGroupList(tx dataservices.DataStoreTx) ([]decoratedEdgeGroup, error) return decoratedEdgeGroups, nil } -func getEndpointTypes(tx dataservices.DataStoreTx, endpointIds []portainer.EndpointID) ([]portainer.EndpointType, error) { +func getEndpointTypes(tx dataservices.DataStoreTx, endpointIds roar.Roar[portainer.EndpointID]) ([]portainer.EndpointType, error) { + var innerErr error + typeSet := map[portainer.EndpointType]bool{} - for _, endpointID := range endpointIds { + + endpointIds.Iterate(func(endpointID portainer.EndpointID) bool { endpoint, err := tx.Endpoint().Endpoint(endpointID) if err != nil { - return nil, fmt.Errorf("failed fetching environment: %w", err) + innerErr = fmt.Errorf("failed fetching environment: %w", err) + + return false } typeSet[endpoint.Type] = true + + return true + }) + + if innerErr != nil { + return nil, innerErr } endpointTypes := make([]portainer.EndpointType, 0, len(typeSet)) diff --git a/api/http/handler/edgegroups/edgegroup_list_test.go b/api/http/handler/edgegroups/edgegroup_list_test.go index b77b2966e..bf084c377 100644 --- a/api/http/handler/edgegroups/edgegroup_list_test.go +++ b/api/http/handler/edgegroups/edgegroup_list_test.go @@ -1,11 +1,19 @@ package edgegroups import ( + "net/http" + "net/http/httptest" + "strconv" "testing" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" + + "github.com/segmentio/encoding/json" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_getEndpointTypes(t *testing.T) { @@ -38,7 +46,7 @@ func Test_getEndpointTypes(t *testing.T) { } for _, test := range tests { - ans, err := getEndpointTypes(datastore, test.endpointIds) + ans, err := getEndpointTypes(datastore, roar.FromSlice(test.endpointIds)) assert.NoError(t, err, "getEndpointTypes shouldn't fail") assert.ElementsMatch(t, test.expected, ans, "getEndpointTypes expected to return %b for %v, but returned %b", test.expected, test.endpointIds, ans) @@ -48,6 +56,61 @@ func Test_getEndpointTypes(t *testing.T) { func Test_getEndpointTypes_failWhenEndpointDontExist(t *testing.T) { datastore := testhelpers.NewDatastore(testhelpers.WithEndpoints([]portainer.Endpoint{})) - _, err := getEndpointTypes(datastore, []portainer.EndpointID{1}) + _, err := getEndpointTypes(datastore, roar.FromSlice([]portainer.EndpointID{1})) assert.Error(t, err, "getEndpointTypes should fail") } + +func TestEdgeGroupListHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + err := store.EndpointGroup().Create(&portainer.EndpointGroup{ + ID: 1, + Name: "Test Group", + }) + require.NoError(t, err) + + for i := range 3 { + err = store.Endpoint().Create(&portainer.Endpoint{ + ID: portainer.EndpointID(i + 1), + Name: "Test Endpoint " + strconv.Itoa(i+1), + Type: portainer.EdgeAgentOnDockerEnvironment, + GroupID: 1, + }) + require.NoError(t, err) + + err = store.EndpointRelation().Create(&portainer.EndpointRelation{ + EndpointID: portainer.EndpointID(i + 1), + EdgeStacks: map[portainer.EdgeStackID]bool{}, + }) + require.NoError(t, err) + } + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Test Edge Group", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{1, 2, 3}), + }) + require.NoError(t, err) + + rr := httptest.NewRecorder() + + req := httptest.NewRequest( + http.MethodGet, + "/edge_groups", + nil, + ) + + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var responseGroups []decoratedEdgeGroup + err = json.NewDecoder(rr.Body).Decode(&responseGroups) + require.NoError(t, err) + + require.Len(t, responseGroups, 1) + require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroups[0].Endpoints) + require.Len(t, responseGroups[0].TrustedEndpoints, 0) +} diff --git a/api/http/handler/edgegroups/edgegroup_update.go b/api/http/handler/edgegroups/edgegroup_update.go index 2d04f7cd2..270bd10df 100644 --- a/api/http/handler/edgegroups/edgegroup_update.go +++ b/api/http/handler/edgegroups/edgegroup_update.go @@ -24,10 +24,6 @@ type edgeGroupUpdatePayload struct { } func (payload *edgeGroupUpdatePayload) Validate(r *http.Request) error { - if len(payload.Name) == 0 { - return errors.New("invalid Edge group name") - } - if payload.Dynamic && len(payload.TagIDs) == 0 { return errors.New("tagIDs is mandatory for a dynamic Edge group") } @@ -35,7 +31,7 @@ func (payload *edgeGroupUpdatePayload) Validate(r *http.Request) error { return nil } -// @id EgeGroupUpdate +// @id EdgeGroupUpdate // @summary Updates an EdgeGroup // @description **Access policy**: administrator // @tags edge_groups @@ -162,7 +158,7 @@ func (handler *Handler) edgeGroupUpdate(w http.ResponseWriter, r *http.Request) return nil }) - return txResponse(w, edgeGroup, err) + return txResponse(w, shadowedEdgeGroup{EdgeGroup: *edgeGroup}, err) } func (handler *Handler) updateEndpointStacks(tx dataservices.DataStoreTx, endpoint *portainer.Endpoint, edgeGroups []portainer.EdgeGroup, edgeStacks []portainer.EdgeStack) error { diff --git a/api/http/handler/edgegroups/edgegroup_update_test.go b/api/http/handler/edgegroups/edgegroup_update_test.go new file mode 100644 index 000000000..dbecbdfcf --- /dev/null +++ b/api/http/handler/edgegroups/edgegroup_update_test.go @@ -0,0 +1,70 @@ +package edgegroups + +import ( + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" + + "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/require" +) + +func TestEdgeGroupUpdateHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + err := store.EndpointGroup().Create(&portainer.EndpointGroup{ + ID: 1, + Name: "Test Group", + }) + require.NoError(t, err) + + for i := range 3 { + err = store.Endpoint().Create(&portainer.Endpoint{ + ID: portainer.EndpointID(i + 1), + Name: "Test Endpoint " + strconv.Itoa(i+1), + Type: portainer.EdgeAgentOnDockerEnvironment, + GroupID: 1, + }) + require.NoError(t, err) + + err = store.EndpointRelation().Create(&portainer.EndpointRelation{ + EndpointID: portainer.EndpointID(i + 1), + EdgeStacks: map[portainer.EdgeStackID]bool{}, + }) + require.NoError(t, err) + } + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Test Edge Group", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{1}), + }) + require.NoError(t, err) + + rr := httptest.NewRecorder() + + req := httptest.NewRequest( + http.MethodPut, + "/edge_groups/1", + strings.NewReader(`{"Endpoints": [1, 2, 3]}`), + ) + + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var responseGroup portainer.EdgeGroup + err = json.NewDecoder(rr.Body).Decode(&responseGroup) + require.NoError(t, err) + + require.ElementsMatch(t, []portainer.EndpointID{1, 2, 3}, responseGroup.Endpoints) +} diff --git a/api/http/handler/edgejobs/edgejob_create.go b/api/http/handler/edgejobs/edgejob_create.go index 252770aa2..dd6b4d5df 100644 --- a/api/http/handler/edgejobs/edgejob_create.go +++ b/api/http/handler/edgejobs/edgejob_create.go @@ -15,8 +15,7 @@ import ( "github.com/portainer/portainer/api/internal/endpointutils" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" - - "github.com/asaskevich/govalidator" + "github.com/portainer/portainer/pkg/validate" ) type edgeJobBasePayload struct { @@ -53,7 +52,7 @@ func (payload *edgeJobCreateFromFileContentPayload) Validate(r *http.Request) er return errors.New("invalid Edge job name") } - if !govalidator.Matches(payload.Name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`) { + if !validate.Matches(payload.Name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`) { return errors.New("invalid Edge job name format. Allowed characters are: [a-zA-Z0-9_.-]") } @@ -136,7 +135,7 @@ func (payload *edgeJobCreateFromFilePayload) Validate(r *http.Request) error { return errors.New("invalid Edge job name") } - if !govalidator.Matches(name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) { + if !validate.Matches(name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) { return errors.New("invalid Edge job name format. Allowed characters are: [a-zA-Z0-9_.-]") } payload.Name = name @@ -271,26 +270,3 @@ func (handler *Handler) addAndPersistEdgeJob(tx dataservices.DataStoreTx, edgeJo return tx.EdgeJob().CreateWithID(edgeJob.ID, edgeJob) } - -// @id EdgeJobCreate -// @summary Create an EdgeJob -// @description **Access policy**: administrator -// @tags edge_jobs -// @security ApiKeyAuth -// @security jwt -// @produce json -// @param method query string true "Creation Method" Enums(file, string) -// @param body body object true "for body documentation see the relevant /edge_jobs/create/{method} endpoint" -// @success 200 {object} portainer.EdgeGroup -// @failure 503 "Edge compute features are disabled" -// @failure 500 -// @deprecated -// @router /edge_jobs [post] -func deprecatedEdgeJobCreateUrlParser(w http.ResponseWriter, r *http.Request) (string, *httperror.HandlerError) { - method, err := request.RetrieveQueryParameter(r, "method", false) - if err != nil { - return "", httperror.BadRequest("Invalid query parameter: method. Valid values are: file or string", err) - } - - return "/edge_jobs/create/" + method, nil -} diff --git a/api/http/handler/edgejobs/edgejob_tasks_list.go b/api/http/handler/edgejobs/edgejob_tasks_list.go index 70918f69d..64d50137b 100644 --- a/api/http/handler/edgejobs/edgejob_tasks_list.go +++ b/api/http/handler/edgejobs/edgejob_tasks_list.go @@ -1,21 +1,25 @@ package edgejobs import ( + "errors" "fmt" "maps" "net/http" + "strings" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" + "github.com/portainer/portainer/api/http/utils/filters" "github.com/portainer/portainer/api/internal/edge" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" ) type taskContainer struct { - ID string `json:"Id"` - EndpointID portainer.EndpointID `json:"EndpointId"` - LogsStatus portainer.EdgeJobLogsStatus `json:"LogsStatus"` + ID string `json:"Id"` + EndpointID portainer.EndpointID `json:"EndpointId"` + EndpointName string `json:"EndpointName"` + LogsStatus portainer.EdgeJobLogsStatus `json:"LogsStatus"` } // @id EdgeJobTasksList @@ -37,16 +41,42 @@ func (handler *Handler) edgeJobTasksList(w http.ResponseWriter, r *http.Request) return httperror.BadRequest("Invalid Edge job identifier route variable", err) } - var tasks []taskContainer - err = handler.DataStore.UpdateTx(func(tx dataservices.DataStoreTx) error { + params := filters.ExtractListModifiersQueryParams(r) + + var tasks []*taskContainer + err = handler.DataStore.ViewTx(func(tx dataservices.DataStoreTx) error { tasks, err = listEdgeJobTasks(tx, portainer.EdgeJobID(edgeJobID)) return err }) - return txResponse(w, tasks, err) + results := filters.SearchOrderAndPaginate(tasks, params, filters.Config[*taskContainer]{ + SearchAccessors: []filters.SearchAccessor[*taskContainer]{ + func(tc *taskContainer) (string, error) { + switch tc.LogsStatus { + case portainer.EdgeJobLogsStatusPending: + return "pending", nil + case 0, portainer.EdgeJobLogsStatusIdle: + return "idle", nil + case portainer.EdgeJobLogsStatusCollected: + return "collected", nil + } + return "", errors.New("unknown state") + }, + func(tc *taskContainer) (string, error) { + return tc.EndpointName, nil + }, + }, + SortBindings: []filters.SortBinding[*taskContainer]{ + {Key: "EndpointName", Fn: func(a, b *taskContainer) int { return strings.Compare(a.EndpointName, b.EndpointName) }}, + }, + }) + + filters.ApplyFilterResultsHeaders(&w, results) + + return txResponse(w, results.Items, err) } -func listEdgeJobTasks(tx dataservices.DataStoreTx, edgeJobID portainer.EdgeJobID) ([]taskContainer, error) { +func listEdgeJobTasks(tx dataservices.DataStoreTx, edgeJobID portainer.EdgeJobID) ([]*taskContainer, error) { edgeJob, err := tx.EdgeJob().Read(edgeJobID) if tx.IsErrObjectNotFound(err) { return nil, httperror.NotFound("Unable to find an Edge job with the specified identifier inside the database", err) @@ -54,7 +84,12 @@ func listEdgeJobTasks(tx dataservices.DataStoreTx, edgeJobID portainer.EdgeJobID return nil, httperror.InternalServerError("Unable to find an Edge job with the specified identifier inside the database", err) } - tasks := make([]taskContainer, 0) + endpoints, err := tx.Endpoint().Endpoints() + if err != nil { + return nil, err + } + + tasks := make([]*taskContainer, 0) endpointsMap := map[portainer.EndpointID]portainer.EdgeJobEndpointMeta{} if len(edgeJob.EdgeGroups) > 0 { @@ -70,10 +105,19 @@ func listEdgeJobTasks(tx dataservices.DataStoreTx, edgeJobID portainer.EdgeJobID maps.Copy(endpointsMap, edgeJob.Endpoints) for endpointID, meta := range endpointsMap { - tasks = append(tasks, taskContainer{ - ID: fmt.Sprintf("edgejob_task_%d_%d", edgeJob.ID, endpointID), - EndpointID: endpointID, - LogsStatus: meta.LogsStatus, + + endpointName := "" + for idx := range endpoints { + if endpoints[idx].ID == endpointID { + endpointName = endpoints[idx].Name + } + } + + tasks = append(tasks, &taskContainer{ + ID: fmt.Sprintf("edgejob_task_%d_%d", edgeJob.ID, endpointID), + EndpointID: endpointID, + EndpointName: endpointName, + LogsStatus: meta.LogsStatus, }) } diff --git a/api/http/handler/edgejobs/edgejob_tasks_list_test.go b/api/http/handler/edgejobs/edgejob_tasks_list_test.go new file mode 100644 index 000000000..6224ed628 --- /dev/null +++ b/api/http/handler/edgejobs/edgejob_tasks_list_test.go @@ -0,0 +1,131 @@ +package edgejobs + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_EdgeJobTasksListHandler(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, false) + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + addEnv := func(env *portainer.Endpoint) { + err := store.EndpointService.Create(env) + require.NoError(t, err) + } + + addEdgeGroup := func(group *portainer.EdgeGroup) { + err := store.EdgeGroupService.Create(group) + require.NoError(t, err) + } + + addJob := func(job *portainer.EdgeJob) { + err := store.EdgeJobService.Create(job) + require.NoError(t, err) + } + + envCount := 6 + + for i := range envCount { + addEnv(&portainer.Endpoint{ID: portainer.EndpointID(i + 1), Name: "env_" + strconv.Itoa(i+1)}) + } + + addEdgeGroup(&portainer.EdgeGroup{ID: 1, Name: "edge_group_1", EndpointIDs: roar.FromSlice([]portainer.EndpointID{5, 6})}) + + addJob(&portainer.EdgeJob{ + ID: 1, + Endpoints: map[portainer.EndpointID]portainer.EdgeJobEndpointMeta{ + 1: {}, + 2: {LogsStatus: portainer.EdgeJobLogsStatusIdle}, + 3: {LogsStatus: portainer.EdgeJobLogsStatusPending}, + 4: {LogsStatus: portainer.EdgeJobLogsStatusCollected}}, + EdgeGroups: []portainer.EdgeGroupID{1}, + }) + + test := func(params string, expect []taskContainer, expectedCount int) { + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/edge_jobs/1/tasks"+params, nil) + handler.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Result().StatusCode) + + var response []taskContainer + err := json.NewDecoder(rr.Body).Decode(&response) + require.NoError(t, err) + + assert.ElementsMatch(t, expect, response) + + tcStr := rr.Header().Get("x-total-count") + assert.NotEmpty(t, tcStr) + totalCount, err := strconv.Atoi(tcStr) + assert.NoError(t, err) + assert.Equal(t, expectedCount, totalCount) + + taStr := rr.Header().Get("x-total-available") + assert.NotEmpty(t, taStr) + totalAvailable, err := strconv.Atoi(taStr) + assert.NoError(t, err) + assert.Equal(t, envCount, totalAvailable) + + } + + tasks := []taskContainer{ + {}, + {"edgejob_task_1_1", 1, "env_1", 0}, + {"edgejob_task_1_2", 2, "env_2", portainer.EdgeJobLogsStatusIdle}, + {"edgejob_task_1_3", 3, "env_3", portainer.EdgeJobLogsStatusPending}, + {"edgejob_task_1_4", 4, "env_4", portainer.EdgeJobLogsStatusCollected}, + {"edgejob_task_1_5", 5, "env_5", 0}, + {"edgejob_task_1_6", 6, "env_6", 0}, + } + + t.Run("should return no results", func(t *testing.T) { + test("?search=foo", []taskContainer{}, 0) // unknown search + test("?start=100&limit=1", []taskContainer{}, 6) // overflowing start. Still return the correct count header + }) + + t.Run("should return one element", func(t *testing.T) { + // limit the *returned* results but not the total count + test("?start=0&limit=1&sort=EndpointName&order=asc", []taskContainer{tasks[1]}, envCount) // limit + test("?start=5&limit=10&sort=EndpointName&order=asc", []taskContainer{tasks[6]}, envCount) // start = last element + overflowing limit + // limit the number of results + test("?search=env_1", []taskContainer{tasks[1]}, 1) // only 1 result + }) + + t.Run("should filter by status", func(t *testing.T) { + test("?search=idle", []taskContainer{tasks[1], tasks[2], tasks[5], tasks[6]}, 4) // 0 (default value) is IDLE + test("?search=pending", []taskContainer{tasks[3]}, 1) + test("?search=collected", []taskContainer{tasks[4]}, 1) + }) + + t.Run("should return all elements", func(t *testing.T) { + test("", tasks[1:], envCount) // default + test("?some=invalid_param", tasks[1:], envCount) // unknown query params + test("?limit=-1", tasks[1:], envCount) // underflowing limit + test("?start=100", tasks[1:], envCount) // overflowing start without limit + test("?search=env", tasks[1:], envCount) // search in a match-all keyword + }) + + testError := func(params string, status int) { + rr := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/edge_jobs/2/tasks"+params, nil) + handler.ServeHTTP(rr, req) + require.Equal(t, status, rr.Result().StatusCode) + } + + t.Run("errors", func(t *testing.T) { + testError("", http.StatusNotFound) // unknown job id + }) + +} diff --git a/api/http/handler/edgejobs/edgejob_update.go b/api/http/handler/edgejobs/edgejob_update.go index 468fbb8b7..6f2b8e382 100644 --- a/api/http/handler/edgejobs/edgejob_update.go +++ b/api/http/handler/edgejobs/edgejob_update.go @@ -14,8 +14,7 @@ import ( "github.com/portainer/portainer/api/internal/endpointutils" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" - - "github.com/asaskevich/govalidator" + "github.com/portainer/portainer/pkg/validate" ) type edgeJobUpdatePayload struct { @@ -28,7 +27,7 @@ type edgeJobUpdatePayload struct { } func (payload *edgeJobUpdatePayload) Validate(r *http.Request) error { - if payload.Name != nil && !govalidator.Matches(*payload.Name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) { + if payload.Name != nil && !validate.Matches(*payload.Name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) { return errors.New("invalid Edge job name format. Allowed characters are: [a-zA-Z0-9_.-]") } diff --git a/api/http/handler/edgejobs/handler.go b/api/http/handler/edgejobs/handler.go index 93f210bb5..ab3d66b3b 100644 --- a/api/http/handler/edgejobs/handler.go +++ b/api/http/handler/edgejobs/handler.go @@ -6,7 +6,6 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" - "github.com/portainer/portainer/api/http/middlewares" "github.com/portainer/portainer/api/http/security" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/response" @@ -30,8 +29,6 @@ func NewHandler(bouncer security.BouncerService) *Handler { h.Handle("/edge_jobs", bouncer.AdminAccess(bouncer.EdgeComputeOperation(httperror.LoggerHandler(h.edgeJobList)))).Methods(http.MethodGet) - h.Handle("/edge_jobs", - bouncer.AdminAccess(bouncer.EdgeComputeOperation(middlewares.Deprecated(h, deprecatedEdgeJobCreateUrlParser)))).Methods(http.MethodPost) h.Handle("/edge_jobs/create/{method}", bouncer.AdminAccess(bouncer.EdgeComputeOperation(httperror.LoggerHandler(h.edgeJobCreate)))).Methods(http.MethodPost) h.Handle("/edge_jobs/{id}", diff --git a/api/http/handler/edgestacks/edgestack_create.go b/api/http/handler/edgestacks/edgestack_create.go index 12dfe620a..65e77764a 100644 --- a/api/http/handler/edgestacks/edgestack_create.go +++ b/api/http/handler/edgestacks/edgestack_create.go @@ -55,26 +55,3 @@ func (handler *Handler) createSwarmStack(tx dataservices.DataStoreTx, method str return nil, httperrors.NewInvalidPayloadError("Invalid value for query parameter: method. Value must be one of: string, repository or file") } - -// @id EdgeStackCreate -// @summary Create an EdgeStack -// @description **Access policy**: administrator -// @tags edge_stacks -// @security ApiKeyAuth -// @security jwt -// @produce json -// @param method query string true "Creation Method" Enums(file,string,repository) -// @param body body object true "for body documentation see the relevant /edge_stacks/create/{method} endpoint" -// @success 200 {object} portainer.EdgeStack -// @failure 500 -// @failure 503 "Edge compute features are disabled" -// @deprecated -// @router /edge_stacks [post] -func deprecatedEdgeStackCreateUrlParser(w http.ResponseWriter, r *http.Request) (string, *httperror.HandlerError) { - method, err := request.RetrieveQueryParameter(r, "method", false) - if err != nil { - return "", httperror.BadRequest("Invalid query parameter: method. Valid values are: file or string", err) - } - - return "/edge_stacks/create/" + method, nil -} diff --git a/api/http/handler/edgestacks/edgestack_create_file.go b/api/http/handler/edgestacks/edgestack_create_file.go index 555418835..a0bc2995f 100644 --- a/api/http/handler/edgestacks/edgestack_create_file.go +++ b/api/http/handler/edgestacks/edgestack_create_file.go @@ -101,8 +101,7 @@ func (payload *edgeStackFromFileUploadPayload) Validate(r *http.Request) error { // @router /edge_stacks/create/file [post] func (handler *Handler) createEdgeStackFromFileUpload(r *http.Request, tx dataservices.DataStoreTx, dryrun bool) (*portainer.EdgeStack, error) { payload := &edgeStackFromFileUploadPayload{} - err := payload.Validate(r) - if err != nil { + if err := payload.Validate(r); err != nil { return nil, err } diff --git a/api/http/handler/edgestacks/edgestack_create_git.go b/api/http/handler/edgestacks/edgestack_create_git.go index 88e4bda79..a8775495d 100644 --- a/api/http/handler/edgestacks/edgestack_create_git.go +++ b/api/http/handler/edgestacks/edgestack_create_git.go @@ -11,8 +11,8 @@ import ( httperrors "github.com/portainer/portainer/api/http/errors" "github.com/portainer/portainer/pkg/edge" "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/pkg/errors" ) @@ -33,6 +33,8 @@ type edgeStackFromGitRepositoryPayload struct { RepositoryUsername string `example:"myGitUsername"` // Password used in basic authentication. Required when RepositoryAuthentication is true. RepositoryPassword string `example:"myGitPassword"` + // RepositoryAuthorizationType is the authorization type to use + RepositoryAuthorizationType gittypes.GitCredentialAuthType `example:"0"` // Path to the Stack file inside the Git repository FilePathInRepository string `example:"docker-compose.yml" default:"docker-compose.yml"` // List of identifiers of EdgeGroups @@ -59,7 +61,7 @@ func (payload *edgeStackFromGitRepositoryPayload) Validate(r *http.Request) erro return httperrors.NewInvalidPayloadError("Invalid stack name. Stack name must only consist of lowercase alpha characters, numbers, hyphens, or underscores as well as start with a lowercase character or number") } - if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) { + if len(payload.RepositoryURL) == 0 || !validate.IsURL(payload.RepositoryURL) { return httperrors.NewInvalidPayloadError("Invalid repository URL. Must correspond to a valid URL format") } @@ -103,8 +105,7 @@ func (payload *edgeStackFromGitRepositoryPayload) Validate(r *http.Request) erro // @router /edge_stacks/create/repository [post] func (handler *Handler) createEdgeStackFromGitRepository(r *http.Request, tx dataservices.DataStoreTx, dryrun bool, userID portainer.UserID) (*portainer.EdgeStack, error) { var payload edgeStackFromGitRepositoryPayload - err := request.DecodeAndValidateJSONPayload(r, &payload) - if err != nil { + if err := request.DecodeAndValidateJSONPayload(r, &payload); err != nil { return nil, err } @@ -126,8 +127,9 @@ func (handler *Handler) createEdgeStackFromGitRepository(r *http.Request, tx dat if payload.RepositoryAuthentication { repoConfig.Authentication = &gittypes.GitAuthentication{ - Username: payload.RepositoryUsername, - Password: payload.RepositoryPassword, + Username: payload.RepositoryUsername, + Password: payload.RepositoryPassword, + AuthorizationType: payload.RepositoryAuthorizationType, } } @@ -137,24 +139,31 @@ func (handler *Handler) createEdgeStackFromGitRepository(r *http.Request, tx dat } func (handler *Handler) storeManifestFromGitRepository(tx dataservices.DataStoreTx, stackFolder string, relatedEndpointIds []portainer.EndpointID, deploymentType portainer.EdgeStackDeploymentType, currentUserID portainer.UserID, repositoryConfig gittypes.RepoConfig) (composePath, manifestPath, projectPath string, err error) { - hasWrongType, err := hasWrongEnvironmentType(tx.Endpoint(), relatedEndpointIds, deploymentType) - if err != nil { + if hasWrongType, err := hasWrongEnvironmentType(tx.Endpoint(), relatedEndpointIds, deploymentType); err != nil { return "", "", "", fmt.Errorf("unable to check for existence of non fitting environments: %w", err) - } - if hasWrongType { + } else if hasWrongType { return "", "", "", errors.New("edge stack with config do not match the environment type") } projectPath = handler.FileService.GetEdgeStackProjectPath(stackFolder) repositoryUsername := "" repositoryPassword := "" + repositoryAuthType := gittypes.GitCredentialAuthType_Basic if repositoryConfig.Authentication != nil && repositoryConfig.Authentication.Password != "" { repositoryUsername = repositoryConfig.Authentication.Username repositoryPassword = repositoryConfig.Authentication.Password + repositoryAuthType = repositoryConfig.Authentication.AuthorizationType } - err = handler.GitService.CloneRepository(projectPath, repositoryConfig.URL, repositoryConfig.ReferenceName, repositoryUsername, repositoryPassword, repositoryConfig.TLSSkipVerify) - if err != nil { + if err := handler.GitService.CloneRepository( + projectPath, + repositoryConfig.URL, + repositoryConfig.ReferenceName, + repositoryUsername, + repositoryPassword, + repositoryAuthType, + repositoryConfig.TLSSkipVerify, + ); err != nil { return "", "", "", err } diff --git a/api/http/handler/edgestacks/edgestack_create_string.go b/api/http/handler/edgestacks/edgestack_create_string.go index 556633fae..5e3fb57b8 100644 --- a/api/http/handler/edgestacks/edgestack_create_string.go +++ b/api/http/handler/edgestacks/edgestack_create_string.go @@ -76,8 +76,7 @@ func (payload *edgeStackFromStringPayload) Validate(r *http.Request) error { // @router /edge_stacks/create/string [post] func (handler *Handler) createEdgeStackFromFileContent(r *http.Request, tx dataservices.DataStoreTx, dryrun bool) (*portainer.EdgeStack, error) { var payload edgeStackFromStringPayload - err := request.DecodeAndValidateJSONPayload(r, &payload) - if err != nil { + if err := request.DecodeAndValidateJSONPayload(r, &payload); err != nil { return nil, err } @@ -96,11 +95,9 @@ func (handler *Handler) createEdgeStackFromFileContent(r *http.Request, tx datas } func (handler *Handler) storeFileContent(tx dataservices.DataStoreTx, stackFolder string, deploymentType portainer.EdgeStackDeploymentType, relatedEndpointIds []portainer.EndpointID, fileContent []byte) (composePath, manifestPath, projectPath string, err error) { - hasWrongType, err := hasWrongEnvironmentType(tx.Endpoint(), relatedEndpointIds, deploymentType) - if err != nil { + if hasWrongType, err := hasWrongEnvironmentType(tx.Endpoint(), relatedEndpointIds, deploymentType); err != nil { return "", "", "", fmt.Errorf("unable to check for existence of non fitting environments: %w", err) - } - if hasWrongType { + } else if hasWrongType { return "", "", "", errors.New("edge stack with config do not match the environment type") } @@ -124,7 +121,6 @@ func (handler *Handler) storeFileContent(tx dataservices.DataStoreTx, stackFolde } return "", manifestPath, projectPath, nil - } errMessage := fmt.Sprintf("invalid deployment type: %d", deploymentType) diff --git a/api/http/handler/edgestacks/edgestack_create_test.go b/api/http/handler/edgestacks/edgestack_create_test.go index 32158d300..70252c25d 100644 --- a/api/http/handler/edgestacks/edgestack_create_test.go +++ b/api/http/handler/edgestacks/edgestack_create_test.go @@ -8,8 +8,10 @@ import ( "testing" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/roar" "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/require" ) // Create @@ -23,14 +25,12 @@ func TestCreateAndInspect(t *testing.T) { Name: "EdgeGroup 1", Dynamic: false, TagIDs: nil, - Endpoints: []portainer.EndpointID{endpoint.ID}, + EndpointIDs: roar.FromSlice([]portainer.EndpointID{endpoint.ID}), PartialMatch: false, } err := handler.DataStore.EdgeGroup().Create(&edgeGroup) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) endpointRelation := portainer.EndpointRelation{ EndpointID: endpoint.ID, @@ -38,9 +38,7 @@ func TestCreateAndInspect(t *testing.T) { } err = handler.DataStore.EndpointRelation().Create(&endpointRelation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) payload := edgeStackFromStringPayload{ Name: "test-stack", @@ -50,16 +48,14 @@ func TestCreateAndInspect(t *testing.T) { } jsonPayload, err := json.Marshal(payload) - if err != nil { - t.Fatal("JSON marshal error:", err) - } + require.NoError(t, err) + r := bytes.NewBuffer(jsonPayload) // Create EdgeStack req, err := http.NewRequest(http.MethodPost, "/edge_stacks/create/string", r) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) + req.Header.Add("x-api-key", rawAPIKey) rec := httptest.NewRecorder() handler.ServeHTTP(rec, req) @@ -70,15 +66,11 @@ func TestCreateAndInspect(t *testing.T) { data := portainer.EdgeStack{} err = json.NewDecoder(rec.Body).Decode(&data) - if err != nil { - t.Fatal("error decoding response:", err) - } + require.NoError(t, err) // Inspect req, err = http.NewRequest(http.MethodGet, fmt.Sprintf("/edge_stacks/%d", data.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec = httptest.NewRecorder() @@ -90,9 +82,7 @@ func TestCreateAndInspect(t *testing.T) { data = portainer.EdgeStack{} err = json.NewDecoder(rec.Body).Decode(&data) - if err != nil { - t.Fatal("error decoding response:", err) - } + require.NoError(t, err) if payload.Name != data.Name { t.Fatalf("expected EdgeStack Name %s, found %s", payload.Name, data.Name) diff --git a/api/http/handler/edgestacks/edgestack_delete.go b/api/http/handler/edgestacks/edgestack_delete.go index 3d71f2bce..0e6307684 100644 --- a/api/http/handler/edgestacks/edgestack_delete.go +++ b/api/http/handler/edgestacks/edgestack_delete.go @@ -30,10 +30,9 @@ func (handler *Handler) edgeStackDelete(w http.ResponseWriter, r *http.Request) return httperror.BadRequest("Invalid edge stack identifier route variable", err) } - err = handler.DataStore.UpdateTx(func(tx dataservices.DataStoreTx) error { + if err := handler.DataStore.UpdateTx(func(tx dataservices.DataStoreTx) error { return handler.deleteEdgeStack(tx, portainer.EdgeStackID(edgeStackID)) - }) - if err != nil { + }); err != nil { var httpErr *httperror.HandlerError if errors.As(err, &httpErr) { return httpErr diff --git a/api/http/handler/edgestacks/edgestack_delete_test.go b/api/http/handler/edgestacks/edgestack_delete_test.go index ef25ae45c..ca334c7ce 100644 --- a/api/http/handler/edgestacks/edgestack_delete_test.go +++ b/api/http/handler/edgestacks/edgestack_delete_test.go @@ -8,9 +8,10 @@ import ( "testing" portainer "github.com/portainer/portainer/api" - "github.com/stretchr/testify/assert" "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Delete @@ -23,9 +24,7 @@ func TestDeleteAndInspect(t *testing.T) { // Inspect req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec := httptest.NewRecorder() @@ -37,9 +36,7 @@ func TestDeleteAndInspect(t *testing.T) { data := portainer.EdgeStack{} err = json.NewDecoder(rec.Body).Decode(&data) - if err != nil { - t.Fatal("error decoding response:", err) - } + require.NoError(t, err) if data.ID != edgeStack.ID { t.Fatalf("expected EdgeStackID %d, found %d", int(edgeStack.ID), data.ID) @@ -47,9 +44,7 @@ func TestDeleteAndInspect(t *testing.T) { // Delete req, err = http.NewRequest(http.MethodDelete, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec = httptest.NewRecorder() @@ -61,9 +56,7 @@ func TestDeleteAndInspect(t *testing.T) { // Inspect req, err = http.NewRequest(http.MethodGet, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec = httptest.NewRecorder() @@ -117,15 +110,12 @@ func TestDeleteEdgeStack_RemoveProjectFolder(t *testing.T) { } var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(payload); err != nil { - t.Fatal("error encoding payload:", err) - } + err := json.NewEncoder(&buf).Encode(payload) + require.NoError(t, err) // Create req, err := http.NewRequest(http.MethodPost, "/edge_stacks/create/string", &buf) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec := httptest.NewRecorder() @@ -138,9 +128,8 @@ func TestDeleteEdgeStack_RemoveProjectFolder(t *testing.T) { assert.DirExists(t, handler.FileService.GetEdgeStackProjectPath("1")) // Delete - if req, err = http.NewRequest(http.MethodDelete, "/edge_stacks/1", nil); err != nil { - t.Fatal("request error:", err) - } + req, err = http.NewRequest(http.MethodDelete, "/edge_stacks/1", nil) + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec = httptest.NewRecorder() diff --git a/api/http/handler/edgestacks/edgestack_inspect.go b/api/http/handler/edgestacks/edgestack_inspect.go index 06c118835..2936f320e 100644 --- a/api/http/handler/edgestacks/edgestack_inspect.go +++ b/api/http/handler/edgestacks/edgestack_inspect.go @@ -4,6 +4,7 @@ import ( "net/http" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" @@ -33,5 +34,35 @@ func (handler *Handler) edgeStackInspect(w http.ResponseWriter, r *http.Request) return handlerDBErr(err, "Unable to find an edge stack with the specified identifier inside the database") } + if err := fillEdgeStackStatus(handler.DataStore, edgeStack); err != nil { + return handlerDBErr(err, "Unable to retrieve edge stack status from the database") + } + return response.JSON(w, edgeStack) } + +func fillEdgeStackStatus(tx dataservices.DataStoreTx, edgeStack *portainer.EdgeStack) error { + status, err := tx.EdgeStackStatus().ReadAll(edgeStack.ID) + if err != nil { + return err + } + + edgeStack.Status = make(map[portainer.EndpointID]portainer.EdgeStackStatus, len(status)) + + emptyStatus := make([]portainer.EdgeStackDeploymentStatus, 0) + + for _, s := range status { + if s.Status == nil { + s.Status = emptyStatus + } + + edgeStack.Status[s.EndpointID] = portainer.EdgeStackStatus{ + Status: s.Status, + EndpointID: s.EndpointID, + DeploymentInfo: s.DeploymentInfo, + ReadyRePullImage: s.ReadyRePullImage, + } + } + + return nil +} diff --git a/api/http/handler/edgestacks/edgestack_list.go b/api/http/handler/edgestacks/edgestack_list.go index 26fd7da05..1ea991c4b 100644 --- a/api/http/handler/edgestacks/edgestack_list.go +++ b/api/http/handler/edgestacks/edgestack_list.go @@ -3,10 +3,39 @@ package edgestacks import ( "net/http" + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" + "github.com/portainer/portainer/api/slicesx" httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" ) +type aggregatedStatusesMap map[portainer.EdgeStackStatusType]int + +type SummarizedStatus string + +const ( + sumStatusUnavailable SummarizedStatus = "Unavailable" + sumStatusDeploying SummarizedStatus = "Deploying" + sumStatusFailed SummarizedStatus = "Failed" + sumStatusPaused SummarizedStatus = "Paused" + sumStatusPartiallyRunning SummarizedStatus = "PartiallyRunning" + sumStatusCompleted SummarizedStatus = "Completed" + sumStatusRunning SummarizedStatus = "Running" +) + +type edgeStackStatusSummary struct { + AggregatedStatus aggregatedStatusesMap + Status SummarizedStatus + Reason string +} + +type edgeStackListResponseItem struct { + portainer.EdgeStack + StatusSummary edgeStackStatusSummary +} + // @id EdgeStackList // @summary Fetches the list of EdgeStacks // @description **Access policy**: administrator @@ -14,16 +43,122 @@ import ( // @security ApiKeyAuth // @security jwt // @produce json +// @param summarizeStatuses query boolean false "will summarize the statuses" // @success 200 {array} portainer.EdgeStack // @failure 500 // @failure 400 // @failure 503 "Edge compute features are disabled" // @router /edge_stacks [get] func (handler *Handler) edgeStackList(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + summarizeStatuses, _ := request.RetrieveBooleanQueryParameter(r, "summarizeStatuses", true) + edgeStacks, err := handler.DataStore.EdgeStack().EdgeStacks() if err != nil { return httperror.InternalServerError("Unable to retrieve edge stacks from the database", err) } - return response.JSON(w, edgeStacks) + res := make([]edgeStackListResponseItem, len(edgeStacks)) + + for i := range edgeStacks { + res[i].EdgeStack = edgeStacks[i] + + if summarizeStatuses { + if err := fillStatusSummary(handler.DataStore, &res[i]); err != nil { + return handlerDBErr(err, "Unable to retrieve edge stack status from the database") + } + } else if err := fillEdgeStackStatus(handler.DataStore, &res[i].EdgeStack); err != nil { + return handlerDBErr(err, "Unable to retrieve edge stack status from the database") + } + } + + return response.JSON(w, res) +} + +func fillStatusSummary(tx dataservices.DataStoreTx, edgeStack *edgeStackListResponseItem) error { + statuses, err := tx.EdgeStackStatus().ReadAll(edgeStack.ID) + if err != nil { + return err + } + + aggregated := make(aggregatedStatusesMap) + + for _, envStatus := range statuses { + for _, status := range envStatus.Status { + aggregated[status.Type]++ + } + } + + status, reason := SummarizeStatuses(statuses, edgeStack.NumDeployments) + + edgeStack.StatusSummary = edgeStackStatusSummary{ + AggregatedStatus: aggregated, + Status: status, + Reason: reason, + } + + edgeStack.Status = map[portainer.EndpointID]portainer.EdgeStackStatus{} + + return nil +} + +func SummarizeStatuses(statuses []portainer.EdgeStackStatusForEnv, numDeployments int) (SummarizedStatus, string) { + if numDeployments == 0 { + return sumStatusUnavailable, "Your edge stack is currently unavailable due to the absence of an available environment in your edge group" + } + + allStatuses := slicesx.FlatMap(statuses, func(x portainer.EdgeStackStatusForEnv) []portainer.EdgeStackDeploymentStatus { + return x.Status + }) + + lastStatuses := slicesx.Map( + slicesx.Filter( + statuses, + func(s portainer.EdgeStackStatusForEnv) bool { + return len(s.Status) > 0 + }, + ), + func(x portainer.EdgeStackStatusForEnv) portainer.EdgeStackDeploymentStatus { + return x.Status[len(x.Status)-1] + }, + ) + + if len(lastStatuses) == 0 { + return sumStatusDeploying, "" + } + + if allFailed := slicesx.Every(lastStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { + return s.Type == portainer.EdgeStackStatusError + }); allFailed { + return sumStatusFailed, "" + } + + if hasPaused := slicesx.Some(allStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { + return s.Type == portainer.EdgeStackStatusPausedDeploying + }); hasPaused { + return sumStatusPaused, "" + } + + if len(lastStatuses) < numDeployments { + return sumStatusDeploying, "" + } + + hasDeploying := slicesx.Some(lastStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { return s.Type == portainer.EdgeStackStatusDeploying }) + hasRunning := slicesx.Some(lastStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { return s.Type == portainer.EdgeStackStatusRunning }) + hasFailed := slicesx.Some(lastStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { return s.Type == portainer.EdgeStackStatusError }) + + if hasRunning && hasFailed && !hasDeploying { + return sumStatusPartiallyRunning, "" + } + + if allCompleted := slicesx.Every(lastStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { return s.Type == portainer.EdgeStackStatusCompleted }); allCompleted { + return sumStatusCompleted, "" + } + + if allRunning := slicesx.Every(lastStatuses, func(s portainer.EdgeStackDeploymentStatus) bool { + return s.Type == portainer.EdgeStackStatusRunning + }); allRunning { + return sumStatusRunning, "" + } + + return sumStatusDeploying, "" } diff --git a/api/http/handler/edgestacks/edgestack_status_delete.go b/api/http/handler/edgestacks/edgestack_status_delete.go deleted file mode 100644 index eb54951b6..000000000 --- a/api/http/handler/edgestacks/edgestack_status_delete.go +++ /dev/null @@ -1,87 +0,0 @@ -package edgestacks - -import ( - "errors" - "net/http" - "time" - - portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/dataservices" - "github.com/portainer/portainer/api/http/middlewares" - httperror "github.com/portainer/portainer/pkg/libhttp/error" - "github.com/portainer/portainer/pkg/libhttp/request" - "github.com/portainer/portainer/pkg/libhttp/response" -) - -// @id EdgeStackStatusDelete -// @summary Delete an EdgeStack status -// @description Authorized only if the request is done by an Edge Environment(Endpoint) -// @tags edge_stacks -// @produce json -// @param id path int true "EdgeStack Id" -// @param environmentId path int true "Environment identifier" -// @success 200 {object} portainer.EdgeStack -// @failure 500 -// @failure 400 -// @failure 404 -// @failure 403 -// @deprecated -// @router /edge_stacks/{id}/status/{environmentId} [delete] -func (handler *Handler) edgeStackStatusDelete(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { - stackID, err := request.RetrieveNumericRouteVariableValue(r, "id") - if err != nil { - return httperror.BadRequest("Invalid stack identifier route variable", err) - } - - endpoint, err := middlewares.FetchEndpoint(r) - if err != nil { - return httperror.InternalServerError("Unable to retrieve a valid endpoint from the handler context", err) - } - - err = handler.requestBouncer.AuthorizedEdgeEndpointOperation(r, endpoint) - if err != nil { - return httperror.Forbidden("Permission denied to access environment", err) - } - - var stack *portainer.EdgeStack - err = handler.DataStore.UpdateTx(func(tx dataservices.DataStoreTx) error { - stack, err = handler.deleteEdgeStackStatus(tx, portainer.EdgeStackID(stackID), endpoint) - return err - }) - if err != nil { - var httpErr *httperror.HandlerError - if errors.As(err, &httpErr) { - return httpErr - } - - return httperror.InternalServerError("Unexpected error", err) - } - - return response.JSON(w, stack) -} - -func (handler *Handler) deleteEdgeStackStatus(tx dataservices.DataStoreTx, stackID portainer.EdgeStackID, endpoint *portainer.Endpoint) (*portainer.EdgeStack, error) { - stack, err := tx.EdgeStack().EdgeStack(stackID) - if err != nil { - return nil, handlerDBErr(err, "Unable to find a stack with the specified identifier inside the database") - } - - environmentStatus, ok := stack.Status[endpoint.ID] - if !ok { - environmentStatus = portainer.EdgeStackStatus{} - } - - environmentStatus.Status = append(environmentStatus.Status, portainer.EdgeStackDeploymentStatus{ - Time: time.Now().Unix(), - Type: portainer.EdgeStackStatusRemoved, - }) - - stack.Status[endpoint.ID] = environmentStatus - - err = tx.EdgeStack().UpdateEdgeStack(stack.ID, stack) - if err != nil { - return nil, httperror.InternalServerError("Unable to persist the stack changes inside the database", err) - } - - return stack, nil -} diff --git a/api/http/handler/edgestacks/edgestack_status_delete_test.go b/api/http/handler/edgestacks/edgestack_status_delete_test.go deleted file mode 100644 index 7a3db1315..000000000 --- a/api/http/handler/edgestacks/edgestack_status_delete_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package edgestacks - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - - portainer "github.com/portainer/portainer/api" -) - -func TestDeleteStatus(t *testing.T) { - handler, _ := setupHandler(t) - - endpoint := createEndpoint(t, handler.DataStore) - edgeStack := createEdgeStack(t, handler.DataStore, endpoint.ID) - - req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("/edge_stacks/%d/status/%d", edgeStack.ID, endpoint.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } - - req.Header.Set(portainer.PortainerAgentEdgeIDHeader, endpoint.EdgeID) - rec := httptest.NewRecorder() - handler.ServeHTTP(rec, req) - - if rec.Code != http.StatusOK { - t.Fatalf("expected a %d response, found: %d", http.StatusOK, rec.Code) - } -} diff --git a/api/http/handler/edgestacks/edgestack_status_update.go b/api/http/handler/edgestacks/edgestack_status_update.go index 4cf912030..0ff6a9eff 100644 --- a/api/http/handler/edgestacks/edgestack_status_update.go +++ b/api/http/handler/edgestacks/edgestack_status_update.go @@ -9,11 +9,10 @@ import ( "time" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/rs/zerolog/log" ) type updateStatusPayload struct { @@ -78,12 +77,25 @@ func (handler *Handler) edgeStackStatusUpdate(w http.ResponseWriter, r *http.Req return httperror.Forbidden("Permission denied to access environment", fmt.Errorf("unauthorized edge endpoint operation: %w. Environment name: %s", err, endpoint.Name)) } - updateFn := func(stack *portainer.EdgeStack) (*portainer.EdgeStack, error) { - return handler.updateEdgeStackStatus(stack, endpoint, r, stack.ID, payload) - } + var stack *portainer.EdgeStack - stack, err := handler.stackCoordinator.UpdateStatus(r, portainer.EdgeStackID(stackID), updateFn) - if err != nil { + if err := handler.DataStore.UpdateTx(func(tx dataservices.DataStoreTx) error { + var err error + stack, err = tx.EdgeStack().EdgeStack(portainer.EdgeStackID(stackID)) + if err != nil { + if dataservices.IsErrObjectNotFound(err) { + return nil + } + + return httperror.InternalServerError("Unable to retrieve Edge stack from the database", err) + } + + if err := handler.updateEdgeStackStatus(tx, stack, stack.ID, payload); err != nil { + return httperror.InternalServerError("Unable to update Edge stack status", err) + } + + return nil + }); err != nil { var httpErr *httperror.HandlerError if errors.As(err, &httpErr) { return httpErr @@ -96,43 +108,36 @@ func (handler *Handler) edgeStackStatusUpdate(w http.ResponseWriter, r *http.Req return nil } + if err := fillEdgeStackStatus(handler.DataStore, stack); err != nil { + return handlerDBErr(err, "Unable to retrieve edge stack status from the database") + } + return response.JSON(w, stack) } -func (handler *Handler) updateEdgeStackStatus(stack *portainer.EdgeStack, endpoint *portainer.Endpoint, r *http.Request, stackID portainer.EdgeStackID, payload updateStatusPayload) (*portainer.EdgeStack, error) { +func (handler *Handler) updateEdgeStackStatus(tx dataservices.DataStoreTx, stack *portainer.EdgeStack, stackID portainer.EdgeStackID, payload updateStatusPayload) error { if payload.Version > 0 && payload.Version < stack.Version { - return stack, nil + return nil } status := *payload.Status - log.Debug(). - Int("stackID", int(stackID)). - Int("status", int(status)). - Msg("Updating stack status") - deploymentStatus := portainer.EdgeStackDeploymentStatus{ Type: status, Error: payload.Error, Time: payload.Time, } - updateEnvStatus(payload.EndpointID, stack, deploymentStatus) - - return stack, nil -} - -func updateEnvStatus(environmentId portainer.EndpointID, stack *portainer.EdgeStack, deploymentStatus portainer.EdgeStackDeploymentStatus) { if deploymentStatus.Type == portainer.EdgeStackStatusRemoved { - delete(stack.Status, environmentId) - - return + return tx.EdgeStackStatus().Delete(stackID, payload.EndpointID) } - environmentStatus, ok := stack.Status[environmentId] - if !ok { - environmentStatus = portainer.EdgeStackStatus{ - EndpointID: environmentId, + environmentStatus, err := tx.EdgeStackStatus().Read(stackID, payload.EndpointID) + if err != nil && !tx.IsErrObjectNotFound(err) { + return err + } else if tx.IsErrObjectNotFound(err) { + environmentStatus = &portainer.EdgeStackStatusForEnv{ + EndpointID: payload.EndpointID, Status: []portainer.EdgeStackDeploymentStatus{}, } } @@ -143,5 +148,5 @@ func updateEnvStatus(environmentId portainer.EndpointID, stack *portainer.EdgeSt environmentStatus.Status = append(environmentStatus.Status, deploymentStatus) } - stack.Status[environmentId] = environmentStatus + return tx.EdgeStackStatus().Update(stackID, payload.EndpointID, environmentStatus) } diff --git a/api/http/handler/edgestacks/edgestack_status_update_coordinator.go b/api/http/handler/edgestacks/edgestack_status_update_coordinator.go deleted file mode 100644 index f81e2fc3e..000000000 --- a/api/http/handler/edgestacks/edgestack_status_update_coordinator.go +++ /dev/null @@ -1,150 +0,0 @@ -package edgestacks - -import ( - "errors" - "fmt" - "net/http" - - portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/dataservices" - - "github.com/rs/zerolog/log" -) - -type statusRequest struct { - respCh chan statusResponse - stackID portainer.EdgeStackID - updateFn statusUpdateFn -} - -type statusResponse struct { - Stack *portainer.EdgeStack - Error error -} - -type statusUpdateFn func(*portainer.EdgeStack) (*portainer.EdgeStack, error) - -type EdgeStackStatusUpdateCoordinator struct { - updateCh chan statusRequest - dataStore dataservices.DataStore -} - -var errAnotherStackUpdateInProgress = errors.New("another stack update is in progress") - -func NewEdgeStackStatusUpdateCoordinator(dataStore dataservices.DataStore) *EdgeStackStatusUpdateCoordinator { - return &EdgeStackStatusUpdateCoordinator{ - updateCh: make(chan statusRequest), - dataStore: dataStore, - } -} - -func (c *EdgeStackStatusUpdateCoordinator) Start() { - for { - c.loop() - } -} - -func (c *EdgeStackStatusUpdateCoordinator) loop() { - u := <-c.updateCh - - respChs := []chan statusResponse{u.respCh} - - var stack *portainer.EdgeStack - - err := c.dataStore.UpdateTx(func(tx dataservices.DataStoreTx) error { - // 1. Load the edge stack - var err error - - stack, err = loadEdgeStack(tx, u.stackID) - if err != nil { - return err - } - - // 2. Mutate the edge stack opportunistically until there are no more pending updates - for { - stack, err = u.updateFn(stack) - if err != nil { - return err - } - - if m, ok := c.getNextUpdate(stack.ID); ok { - u = m - } else { - break - } - - respChs = append(respChs, u.respCh) - } - - // 3. Save the changes back to the database - if err := tx.EdgeStack().UpdateEdgeStack(stack.ID, stack); err != nil { - return handlerDBErr(fmt.Errorf("unable to update Edge stack: %w.", err), "Unable to persist the stack changes inside the database") - } - - return nil - }) - - // 4. Send back the responses - for _, ch := range respChs { - ch <- statusResponse{Stack: stack, Error: err} - } -} - -func loadEdgeStack(tx dataservices.DataStoreTx, stackID portainer.EdgeStackID) (*portainer.EdgeStack, error) { - stack, err := tx.EdgeStack().EdgeStack(stackID) - if err != nil { - if dataservices.IsErrObjectNotFound(err) { - // Skip the error when the agent tries to update the status on a deleted stack - log.Debug(). - Err(err). - Int("stackID", int(stackID)). - Msg("Unable to find a stack inside the database, skipping error") - - return nil, nil - } - - return nil, fmt.Errorf("unable to retrieve Edge stack from the database: %w.", err) - } - - return stack, nil -} - -func (c *EdgeStackStatusUpdateCoordinator) getNextUpdate(stackID portainer.EdgeStackID) (statusRequest, bool) { - for { - select { - case u := <-c.updateCh: - // Discard the update and let the agent retry - if u.stackID != stackID { - u.respCh <- statusResponse{Error: errAnotherStackUpdateInProgress} - - continue - } - - return u, true - - default: - return statusRequest{}, false - } - } -} - -func (c *EdgeStackStatusUpdateCoordinator) UpdateStatus(r *http.Request, stackID portainer.EdgeStackID, updateFn statusUpdateFn) (*portainer.EdgeStack, error) { - respCh := make(chan statusResponse) - defer close(respCh) - - msg := statusRequest{ - respCh: respCh, - stackID: stackID, - updateFn: updateFn, - } - - select { - case c.updateCh <- msg: - r := <-respCh - - return r.Stack, r.Error - - case <-r.Context().Done(): - return nil, r.Context().Err() - } -} diff --git a/api/http/handler/edgestacks/edgestack_status_update_test.go b/api/http/handler/edgestacks/edgestack_status_update_test.go index 50a0863d4..4d94368fe 100644 --- a/api/http/handler/edgestacks/edgestack_status_update_test.go +++ b/api/http/handler/edgestacks/edgestack_status_update_test.go @@ -10,6 +10,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/require" ) // Update Status @@ -28,15 +29,11 @@ func TestUpdateStatusAndInspect(t *testing.T) { } jsonPayload, err := json.Marshal(payload) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) r := bytes.NewBuffer(jsonPayload) req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("/edge_stacks/%d/status", edgeStack.ID), r) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Set(portainer.PortainerAgentEdgeIDHeader, endpoint.EdgeID) rec := httptest.NewRecorder() @@ -48,9 +45,7 @@ func TestUpdateStatusAndInspect(t *testing.T) { // Get updated edge stack req, err = http.NewRequest(http.MethodGet, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec = httptest.NewRecorder() @@ -62,14 +57,10 @@ func TestUpdateStatusAndInspect(t *testing.T) { updatedStack := portainer.EdgeStack{} err = json.NewDecoder(rec.Body).Decode(&updatedStack) - if err != nil { - t.Fatal("error decoding response:", err) - } + require.NoError(t, err) endpointStatus, ok := updatedStack.Status[payload.EndpointID] - if !ok { - t.Fatal("Missing status") - } + require.True(t, ok) lastStatus := endpointStatus.Status[len(endpointStatus.Status)-1] @@ -84,8 +75,8 @@ func TestUpdateStatusAndInspect(t *testing.T) { if endpointStatus.EndpointID != payload.EndpointID { t.Fatalf("expected EndpointID %d, found %d", payload.EndpointID, endpointStatus.EndpointID) } - } + func TestUpdateStatusWithInvalidPayload(t *testing.T) { handler, _ := setupHandler(t) @@ -136,15 +127,11 @@ func TestUpdateStatusWithInvalidPayload(t *testing.T) { for _, tc := range cases { t.Run(tc.Name, func(t *testing.T) { jsonPayload, err := json.Marshal(tc.Payload) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) r := bytes.NewBuffer(jsonPayload) req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("/edge_stacks/%d/status", edgeStack.ID), r) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Set(portainer.PortainerAgentEdgeIDHeader, endpoint.EdgeID) rec := httptest.NewRecorder() diff --git a/api/http/handler/edgestacks/edgestack_test.go b/api/http/handler/edgestacks/edgestack_test.go index ce1e9b659..38fd4be55 100644 --- a/api/http/handler/edgestacks/edgestack_test.go +++ b/api/http/handler/edgestacks/edgestack_test.go @@ -15,8 +15,10 @@ import ( "github.com/portainer/portainer/api/internal/edge/edgestacks" "github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/jwt" + "github.com/portainer/portainer/api/roar" "github.com/pkg/errors" + "github.com/stretchr/testify/require" ) // Helpers @@ -51,27 +53,21 @@ func setupHandler(t *testing.T) (*Handler, string) { t.Fatal(err) } - coord := NewEdgeStackStatusUpdateCoordinator(store) - go coord.Start() - handler := NewHandler( security.NewRequestBouncer(store, jwtService, apiKeyService), store, edgestacks.NewService(store), - coord, ) handler.FileService = fs settings, err := handler.DataStore.Settings().Settings() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + settings.EnableEdgeComputeFeatures = true - if err := handler.DataStore.Settings().UpdateSettings(settings); err != nil { - t.Fatal(err) - } + err = handler.DataStore.Settings().UpdateSettings(settings) + require.NoError(t, err) handler.GitService = testhelpers.NewGitService(errors.New("Clone error"), "git-service-id") @@ -90,9 +86,8 @@ func createEndpointWithId(t *testing.T, store dataservices.DataStore, endpointID LastCheckInDate: time.Now().Unix(), } - if err := store.Endpoint().Create(&endpoint); err != nil { - t.Fatal(err) - } + err := store.Endpoint().Create(&endpoint) + require.NoError(t, err) return endpoint } @@ -109,19 +104,17 @@ func createEdgeStack(t *testing.T, store dataservices.DataStore, endpointID port Name: "EdgeGroup 1", Dynamic: false, TagIDs: nil, - Endpoints: []portainer.EndpointID{endpointID}, + EndpointIDs: roar.FromSlice([]portainer.EndpointID{endpointID}), PartialMatch: false, } - if err := store.EdgeGroup().Create(&edgeGroup); err != nil { - t.Fatal(err) - } + err := store.EdgeGroup().Create(&edgeGroup) + require.NoError(t, err) edgeStackID := portainer.EdgeStackID(14) edgeStack := portainer.EdgeStack{ ID: edgeStackID, Name: "test-edge-stack-" + strconv.Itoa(int(edgeStackID)), - Status: map[portainer.EndpointID]portainer.EdgeStackStatus{}, CreationDate: time.Now().Unix(), EdgeGroups: []portainer.EdgeGroupID{edgeGroup.ID}, ProjectPath: "/project/path", @@ -138,13 +131,11 @@ func createEdgeStack(t *testing.T, store dataservices.DataStore, endpointID port }, } - if err := store.EdgeStack().Create(edgeStack.ID, &edgeStack); err != nil { - t.Fatal(err) - } + err = store.EdgeStack().Create(edgeStack.ID, &edgeStack) + require.NoError(t, err) - if err := store.EndpointRelation().Create(&endpointRelation); err != nil { - t.Fatal(err) - } + err = store.EndpointRelation().Create(&endpointRelation) + require.NoError(t, err) return edgeStack } @@ -155,8 +146,8 @@ func createEdgeGroup(t *testing.T, store dataservices.DataStore) portainer.EdgeG Name: "EdgeGroup 1", } - if err := store.EdgeGroup().Create(&edgeGroup); err != nil { - t.Fatal(err) - } + err := store.EdgeGroup().Create(&edgeGroup) + require.NoError(t, err) + return edgeGroup } diff --git a/api/http/handler/edgestacks/edgestack_update.go b/api/http/handler/edgestacks/edgestack_update.go index 9b5bb623d..db896d0eb 100644 --- a/api/http/handler/edgestacks/edgestack_update.go +++ b/api/http/handler/edgestacks/edgestack_update.go @@ -74,6 +74,10 @@ func (handler *Handler) edgeStackUpdate(w http.ResponseWriter, r *http.Request) return httperror.InternalServerError("Unexpected error", err) } + if err := fillEdgeStackStatus(handler.DataStore, stack); err != nil { + return handlerDBErr(err, "Unable to retrieve edge stack status from the database") + } + return response.JSON(w, stack) } @@ -107,7 +111,7 @@ func (handler *Handler) updateEdgeStack(tx dataservices.DataStoreTx, stackID por hasWrongType, err := hasWrongEnvironmentType(tx.Endpoint(), relatedEndpointIds, payload.DeploymentType) if err != nil { - return nil, httperror.BadRequest("unable to check for existence of non fitting environments: %w", err) + return nil, httperror.InternalServerError("unable to check for existence of non fitting environments: %w", err) } if hasWrongType { return nil, httperror.BadRequest("edge stack with config do not match the environment type", nil) @@ -120,7 +124,7 @@ func (handler *Handler) updateEdgeStack(tx dataservices.DataStoreTx, stackID por stack.EdgeGroups = groupsIds if payload.UpdateVersion { - if err := handler.updateStackVersion(stack, payload.DeploymentType, []byte(payload.StackFileContent), "", relatedEndpointIds); err != nil { + if err := handler.updateStackVersion(tx, stack, payload.DeploymentType, []byte(payload.StackFileContent), "", relatedEndpointIds); err != nil { return nil, httperror.InternalServerError("Unable to update stack version", err) } } @@ -138,48 +142,23 @@ func (handler *Handler) handleChangeEdgeGroups(tx dataservices.DataStoreTx, edge return nil, nil, errors.WithMessage(err, "Unable to retrieve edge stack related environments from database") } - oldRelatedSet := set.ToSet(oldRelatedEnvironmentIDs) - newRelatedSet := set.ToSet(newRelatedEnvironmentIDs) + oldRelatedEnvironmentsSet := set.ToSet(oldRelatedEnvironmentIDs) + newRelatedEnvironmentsSet := set.ToSet(newRelatedEnvironmentIDs) - endpointsToRemove := set.Set[portainer.EndpointID]{} - for endpointID := range oldRelatedSet { - if !newRelatedSet[endpointID] { - endpointsToRemove[endpointID] = true + relatedEnvironmentsToAdd := newRelatedEnvironmentsSet.Difference(oldRelatedEnvironmentsSet) + relatedEnvironmentsToRemove := oldRelatedEnvironmentsSet.Difference(newRelatedEnvironmentsSet) + + if len(relatedEnvironmentsToRemove) > 0 { + if err := tx.EndpointRelation().RemoveEndpointRelationsForEdgeStack(relatedEnvironmentsToRemove.Keys(), edgeStackID); err != nil { + return nil, nil, errors.WithMessage(err, "Unable to remove edge stack relations from the database") } } - for endpointID := range endpointsToRemove { - relation, err := tx.EndpointRelation().EndpointRelation(endpointID) - if err != nil { - return nil, nil, errors.WithMessage(err, "Unable to find environment relation in database") - } - - delete(relation.EdgeStacks, edgeStackID) - - if err := tx.EndpointRelation().UpdateEndpointRelation(endpointID, relation); err != nil { - return nil, nil, errors.WithMessage(err, "Unable to persist environment relation in database") + if len(relatedEnvironmentsToAdd) > 0 { + if err := tx.EndpointRelation().AddEndpointRelationsForEdgeStack(relatedEnvironmentsToAdd.Keys(), edgeStackID); err != nil { + return nil, nil, errors.WithMessage(err, "Unable to add edge stack relations to the database") } } - endpointsToAdd := set.Set[portainer.EndpointID]{} - for endpointID := range newRelatedSet { - if !oldRelatedSet[endpointID] { - endpointsToAdd[endpointID] = true - } - } - - for endpointID := range endpointsToAdd { - relation, err := tx.EndpointRelation().EndpointRelation(endpointID) - if err != nil { - return nil, nil, errors.WithMessage(err, "Unable to find environment relation in database") - } - - relation.EdgeStacks[edgeStackID] = true - - if err := tx.EndpointRelation().UpdateEndpointRelation(endpointID, relation); err != nil { - return nil, nil, errors.WithMessage(err, "Unable to persist environment relation in database") - } - } - - return newRelatedEnvironmentIDs, endpointsToAdd, nil + return newRelatedEnvironmentIDs, relatedEnvironmentsToAdd, nil } diff --git a/api/http/handler/edgestacks/edgestack_update_test.go b/api/http/handler/edgestacks/edgestack_update_test.go index 7e4a9b23c..8040af329 100644 --- a/api/http/handler/edgestacks/edgestack_update_test.go +++ b/api/http/handler/edgestacks/edgestack_update_test.go @@ -9,9 +9,10 @@ import ( "testing" portainer "github.com/portainer/portainer/api" - "github.com/stretchr/testify/require" + "github.com/portainer/portainer/api/roar" "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/require" ) // Update @@ -25,9 +26,8 @@ func TestUpdateAndInspect(t *testing.T) { endpointID := portainer.EndpointID(6) newEndpoint := createEndpointWithId(t, handler.DataStore, endpointID) - if err := handler.DataStore.Endpoint().Create(&newEndpoint); err != nil { - t.Fatal(err) - } + err := handler.DataStore.Endpoint().Create(&newEndpoint) + require.NoError(t, err) endpointRelation := portainer.EndpointRelation{ EndpointID: endpointID, @@ -36,22 +36,20 @@ func TestUpdateAndInspect(t *testing.T) { }, } - if err := handler.DataStore.EndpointRelation().Create(&endpointRelation); err != nil { - t.Fatal(err) - } + err = handler.DataStore.EndpointRelation().Create(&endpointRelation) + require.NoError(t, err) newEdgeGroup := portainer.EdgeGroup{ ID: 2, Name: "EdgeGroup 2", Dynamic: false, TagIDs: nil, - Endpoints: []portainer.EndpointID{newEndpoint.ID}, + EndpointIDs: roar.FromSlice([]portainer.EndpointID{newEndpoint.ID}), PartialMatch: false, } - if err := handler.DataStore.EdgeGroup().Create(&newEdgeGroup); err != nil { - t.Fatal(err) - } + err = handler.DataStore.EdgeGroup().Create(&newEdgeGroup) + require.NoError(t, err) payload := updateEdgeStackPayload{ StackFileContent: "update-test", @@ -61,15 +59,11 @@ func TestUpdateAndInspect(t *testing.T) { } jsonPayload, err := json.Marshal(payload) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) r := bytes.NewBuffer(jsonPayload) req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), r) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec := httptest.NewRecorder() @@ -81,9 +75,7 @@ func TestUpdateAndInspect(t *testing.T) { // Get updated edge stack req, err = http.NewRequest(http.MethodGet, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), nil) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec = httptest.NewRecorder() @@ -94,9 +86,8 @@ func TestUpdateAndInspect(t *testing.T) { } updatedStack := portainer.EdgeStack{} - if err := json.NewDecoder(rec.Body).Decode(&updatedStack); err != nil { - t.Fatal("error decoding response:", err) - } + err = json.NewDecoder(rec.Body).Decode(&updatedStack) + require.NoError(t, err) if payload.UpdateVersion && updatedStack.Version != edgeStack.Version+1 { t.Fatalf("expected EdgeStack version %d, found %d", edgeStack.Version+1, updatedStack.Version+1) @@ -122,7 +113,7 @@ func TestUpdateWithInvalidEdgeGroups(t *testing.T) { Name: "EdgeGroup 2", Dynamic: false, TagIDs: nil, - Endpoints: []portainer.EndpointID{8889}, + EndpointIDs: roar.FromSlice([]portainer.EndpointID{8889}), PartialMatch: false, } @@ -226,15 +217,11 @@ func TestUpdateWithInvalidPayload(t *testing.T) { for _, tc := range cases { t.Run(tc.Name, func(t *testing.T) { jsonPayload, err := json.Marshal(tc.Payload) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) r := bytes.NewBuffer(jsonPayload) req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("/edge_stacks/%d", edgeStack.ID), r) - if err != nil { - t.Fatal("request error:", err) - } + require.NoError(t, err) req.Header.Add("x-api-key", rawAPIKey) rec := httptest.NewRecorder() diff --git a/api/http/handler/edgestacks/handler.go b/api/http/handler/edgestacks/handler.go index 290524cb0..78df853a6 100644 --- a/api/http/handler/edgestacks/handler.go +++ b/api/http/handler/edgestacks/handler.go @@ -22,23 +22,19 @@ type Handler struct { GitService portainer.GitService edgeStacksService *edgestackservice.Service KubernetesDeployer portainer.KubernetesDeployer - stackCoordinator *EdgeStackStatusUpdateCoordinator } // NewHandler creates a handler to manage environment(endpoint) group operations. -func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStore, edgeStacksService *edgestackservice.Service, stackCoordinator *EdgeStackStatusUpdateCoordinator) *Handler { +func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStore, edgeStacksService *edgestackservice.Service) *Handler { h := &Handler{ Router: mux.NewRouter(), requestBouncer: bouncer, DataStore: dataStore, edgeStacksService: edgeStacksService, - stackCoordinator: stackCoordinator, } h.Handle("/edge_stacks/create/{method}", bouncer.AdminAccess(bouncer.EdgeComputeOperation(httperror.LoggerHandler(h.edgeStackCreate)))).Methods(http.MethodPost) - h.Handle("/edge_stacks", - bouncer.AdminAccess(bouncer.EdgeComputeOperation(middlewares.Deprecated(h, deprecatedEdgeStackCreateUrlParser)))).Methods(http.MethodPost) // Deprecated h.Handle("/edge_stacks", bouncer.AdminAccess(bouncer.EdgeComputeOperation(httperror.LoggerHandler(h.edgeStackList)))).Methods(http.MethodGet) h.Handle("/edge_stacks/{id}", @@ -55,8 +51,6 @@ func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStor edgeStackStatusRouter := h.NewRoute().Subrouter() edgeStackStatusRouter.Use(middlewares.WithEndpoint(h.DataStore.Endpoint(), "endpoint_id")) - edgeStackStatusRouter.PathPrefix("/edge_stacks/{id}/status/{endpoint_id}").Handler(bouncer.PublicAccess(httperror.LoggerHandler(h.edgeStackStatusDelete))).Methods(http.MethodDelete) - return h } diff --git a/api/http/handler/edgestacks/utils_update_stack_version.go b/api/http/handler/edgestacks/utils_update_stack_version.go index 2502a88f6..78ac5002f 100644 --- a/api/http/handler/edgestacks/utils_update_stack_version.go +++ b/api/http/handler/edgestacks/utils_update_stack_version.go @@ -5,15 +5,18 @@ import ( "strconv" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/filesystem" - edgestackutils "github.com/portainer/portainer/api/internal/edge/edgestacks" "github.com/rs/zerolog/log" ) -func (handler *Handler) updateStackVersion(stack *portainer.EdgeStack, deploymentType portainer.EdgeStackDeploymentType, config []byte, oldGitHash string, relatedEnvironmentsIDs []portainer.EndpointID) error { - stack.Version = stack.Version + 1 - stack.Status = edgestackutils.NewStatus(stack.Status, relatedEnvironmentsIDs) +func (handler *Handler) updateStackVersion(tx dataservices.DataStoreTx, stack *portainer.EdgeStack, deploymentType portainer.EdgeStackDeploymentType, config []byte, oldGitHash string, relatedEnvironmentsIDs []portainer.EndpointID) error { + stack.Version++ + + if err := tx.EdgeStackStatus().Clear(stack.ID, relatedEnvironmentsIDs); err != nil { + return err + } return handler.storeStackFile(stack, deploymentType, config) } diff --git a/api/http/handler/edgetemplates/edgetemplate_list.go b/api/http/handler/edgetemplates/edgetemplate_list.go deleted file mode 100644 index 5b2c7254d..000000000 --- a/api/http/handler/edgetemplates/edgetemplate_list.go +++ /dev/null @@ -1,71 +0,0 @@ -package edgetemplates - -import ( - "net/http" - "slices" - - portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/http/client" - httperror "github.com/portainer/portainer/pkg/libhttp/error" - "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/segmentio/encoding/json" -) - -type templateFileFormat struct { - Version string `json:"version"` - Templates []portainer.Template `json:"templates"` -} - -// @id EdgeTemplateList -// @deprecated -// @summary Fetches the list of Edge Templates -// @description **Access policy**: administrator -// @tags edge_templates -// @security ApiKeyAuth -// @security jwt -// @accept json -// @produce json -// @success 200 {array} portainer.Template -// @failure 500 -// @router /edge_templates [get] -func (handler *Handler) edgeTemplateList(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { - settings, err := handler.DataStore.Settings().Settings() - if err != nil { - return httperror.InternalServerError("Unable to retrieve settings from the database", err) - } - - url := portainer.DefaultTemplatesURL - if settings.TemplatesURL != "" { - url = settings.TemplatesURL - } - - var templateData []byte - templateData, err = client.Get(url, 10) - if err != nil { - return httperror.InternalServerError("Unable to retrieve external templates", err) - } - - var templateFile templateFileFormat - - err = json.Unmarshal(templateData, &templateFile) - if err != nil { - return httperror.InternalServerError("Unable to parse template file", err) - } - - // We only support version 3 of the template format - // this is only a temporary fix until we have custom edge templates - if templateFile.Version != "3" { - return httperror.InternalServerError("Unsupported template version", nil) - } - - filteredTemplates := make([]portainer.Template, 0) - - for _, template := range templateFile.Templates { - if slices.Contains(template.Categories, "edge") && slices.Contains([]portainer.TemplateType{portainer.ComposeStackTemplate, portainer.SwarmStackTemplate}, template.Type) { - filteredTemplates = append(filteredTemplates, template) - } - } - - return response.JSON(w, filteredTemplates) -} diff --git a/api/http/handler/edgetemplates/handler.go b/api/http/handler/edgetemplates/handler.go deleted file mode 100644 index d6c98553f..000000000 --- a/api/http/handler/edgetemplates/handler.go +++ /dev/null @@ -1,32 +0,0 @@ -package edgetemplates - -import ( - "net/http" - - "github.com/portainer/portainer/api/dataservices" - "github.com/portainer/portainer/api/http/middlewares" - "github.com/portainer/portainer/api/http/security" - httperror "github.com/portainer/portainer/pkg/libhttp/error" - - "github.com/gorilla/mux" -) - -// Handler is the HTTP handler used to handle edge environment(endpoint) operations. -type Handler struct { - *mux.Router - requestBouncer security.BouncerService - DataStore dataservices.DataStore -} - -// NewHandler creates a handler to manage environment(endpoint) operations. -func NewHandler(bouncer security.BouncerService) *Handler { - h := &Handler{ - Router: mux.NewRouter(), - requestBouncer: bouncer, - } - - h.Handle("/edge_templates", - bouncer.AdminAccess(middlewares.Deprecated(httperror.LoggerHandler(h.edgeTemplateList), func(w http.ResponseWriter, r *http.Request) (string, *httperror.HandlerError) { return "", nil }))).Methods(http.MethodGet) - - return h -} diff --git a/api/http/handler/endpointedge/endpointedge_status_inspect.go b/api/http/handler/endpointedge/endpointedge_status_inspect.go index 9bd341561..9c4e4bbe1 100644 --- a/api/http/handler/endpointedge/endpointedge_status_inspect.go +++ b/api/http/handler/endpointedge/endpointedge_status_inspect.go @@ -170,7 +170,7 @@ func (handler *Handler) inspectStatus(tx dataservices.DataStoreTx, r *http.Reque Credentials: tunnel.Credentials, } - schedules, handlerErr := handler.buildSchedules(tx, endpoint.ID) + schedules, handlerErr := handler.buildSchedules(tx, endpoint) if handlerErr != nil { return nil, handlerErr } @@ -208,7 +208,7 @@ func parseAgentPlatform(r *http.Request) (portainer.EndpointType, error) { } } -func (handler *Handler) buildSchedules(tx dataservices.DataStoreTx, endpointID portainer.EndpointID) ([]edgeJobResponse, *httperror.HandlerError) { +func (handler *Handler) buildSchedules(tx dataservices.DataStoreTx, endpoint *portainer.Endpoint) ([]edgeJobResponse, *httperror.HandlerError) { schedules := []edgeJobResponse{} edgeJobs, err := tx.EdgeJob().ReadAll() @@ -216,11 +216,16 @@ func (handler *Handler) buildSchedules(tx dataservices.DataStoreTx, endpointID p return nil, httperror.InternalServerError("Unable to retrieve Edge Jobs", err) } + endpointGroups, err := tx.EndpointGroup().ReadAll() + if err != nil { + return nil, httperror.InternalServerError("Unable to retrieve endpoint groups", err) + } + for _, job := range edgeJobs { - _, endpointHasJob := job.Endpoints[endpointID] + _, endpointHasJob := job.Endpoints[endpoint.ID] if !endpointHasJob { for _, edgeGroupID := range job.EdgeGroups { - member, _, err := edge.EndpointInEdgeGroup(tx, endpointID, edgeGroupID) + member, _, err := edge.EndpointInEdgeGroup(tx, endpoint, edgeGroupID, endpointGroups) if err != nil { return nil, httperror.InternalServerError("Unable to retrieve relations", err) } else if member { @@ -236,10 +241,10 @@ func (handler *Handler) buildSchedules(tx dataservices.DataStoreTx, endpointID p } var collectLogs bool - if _, ok := job.GroupLogsCollection[endpointID]; ok { - collectLogs = job.GroupLogsCollection[endpointID].CollectLogs + if _, ok := job.GroupLogsCollection[endpoint.ID]; ok { + collectLogs = job.GroupLogsCollection[endpoint.ID].CollectLogs } else { - collectLogs = job.Endpoints[endpointID].CollectLogs + collectLogs = job.Endpoints[endpoint.ID].CollectLogs } schedule := edgeJobResponse{ diff --git a/api/http/handler/endpointedge/endpointedge_status_inspect_test.go b/api/http/handler/endpointedge/endpointedge_status_inspect_test.go index 8bfaa9814..526fc58de 100644 --- a/api/http/handler/endpointedge/endpointedge_status_inspect_test.go +++ b/api/http/handler/endpointedge/endpointedge_status_inspect_test.go @@ -16,6 +16,7 @@ import ( "github.com/portainer/portainer/api/filesystem" "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/jwt" + "github.com/portainer/portainer/api/roar" "github.com/segmentio/encoding/json" "github.com/stretchr/testify/assert" @@ -287,11 +288,8 @@ func TestEdgeStackStatus(t *testing.T) { edgeStackID := portainer.EdgeStackID(17) edgeStack := portainer.EdgeStack{ - ID: edgeStackID, - Name: "test-edge-stack-17", - Status: map[portainer.EndpointID]portainer.EdgeStackStatus{ - endpointID: {}, - }, + ID: edgeStackID, + Name: "test-edge-stack-17", CreationDate: time.Now().Unix(), EdgeGroups: []portainer.EdgeGroupID{1, 2}, ProjectPath: "/project/path", @@ -369,8 +367,8 @@ func TestEdgeJobsResponse(t *testing.T) { unrelatedEndpoint := localCreateEndpoint(80, nil) staticEdgeGroup := portainer.EdgeGroup{ - ID: 1, - Endpoints: []portainer.EndpointID{endpointFromStaticEdgeGroup.ID}, + ID: 1, + EndpointIDs: roar.FromSlice([]portainer.EndpointID{endpointFromStaticEdgeGroup.ID}), } err := handler.DataStore.EdgeGroup().Create(&staticEdgeGroup) require.NoError(t, err) diff --git a/api/http/handler/endpointgroups/endpoints.go b/api/http/handler/endpointgroups/endpoints.go index b7c9771bb..8b420f2a6 100644 --- a/api/http/handler/endpointgroups/endpoints.go +++ b/api/http/handler/endpointgroups/endpoints.go @@ -32,6 +32,9 @@ func (handler *Handler) updateEndpointRelations(tx dataservices.DataStoreTx, end edgeStacks, err := tx.EdgeStack().EdgeStacks() if err != nil { + if tx.IsErrObjectNotFound(err) { + return nil + } return err } diff --git a/api/http/handler/endpoints/endpoint_create.go b/api/http/handler/endpoints/endpoint_create.go index 1c6415023..03f2bee44 100644 --- a/api/http/handler/endpoints/endpoint_create.go +++ b/api/http/handler/endpoints/endpoint_create.go @@ -1,7 +1,6 @@ package endpoints import ( - "crypto/tls" "errors" "net/http" "runtime" @@ -285,8 +284,6 @@ func (handler *Handler) endpointCreate(w http.ResponseWriter, r *http.Request) * } func (handler *Handler) createEndpoint(tx dataservices.DataStoreTx, payload *endpointCreatePayload) (*portainer.Endpoint, *httperror.HandlerError) { - var err error - switch payload.EndpointCreationType { case azureEnvironment: return handler.createAzureEndpoint(tx, payload) @@ -301,12 +298,9 @@ func (handler *Handler) createEndpoint(tx dataservices.DataStoreTx, payload *end endpointType := portainer.DockerEnvironment var agentVersion string if payload.EndpointCreationType == agentEnvironment { - var tlsConfig *tls.Config - if payload.TLS { - tlsConfig, err = crypto.CreateTLSConfigurationFromBytes(payload.TLSCACertFile, payload.TLSCertFile, payload.TLSKeyFile, payload.TLSSkipVerify, payload.TLSSkipClientVerify) - if err != nil { - return nil, httperror.InternalServerError("Unable to create TLS configuration", err) - } + tlsConfig, err := crypto.CreateTLSConfigurationFromBytes(payload.TLS, payload.TLSCACertFile, payload.TLSCertFile, payload.TLSKeyFile, payload.TLSSkipVerify, payload.TLSSkipClientVerify) + if err != nil { + return nil, httperror.InternalServerError("Unable to create TLS configuration", err) } agentPlatform, version, err := agent.GetAgentVersionAndPlatform(payload.URL, tlsConfig) @@ -563,6 +557,10 @@ func (handler *Handler) saveEndpointAndUpdateAuthorizations(tx dataservices.Data return err } + if err := endpointutils.InitializeEdgeEndpointRelation(endpoint, tx); err != nil { + return err + } + for _, tagID := range endpoint.TagIDs { if err := tx.Tag().UpdateTagFunc(tagID, func(tag *portainer.Tag) { tag.Endpoints[endpoint.ID] = true diff --git a/api/http/handler/endpoints/endpoint_delete.go b/api/http/handler/endpoints/endpoint_delete.go index 0add2ccdf..a9b4ae5dc 100644 --- a/api/http/handler/endpoints/endpoint_delete.go +++ b/api/http/handler/endpoints/endpoint_delete.go @@ -3,7 +3,6 @@ package endpoints import ( "errors" "net/http" - "slices" "strconv" portainer "github.com/portainer/portainer/api" @@ -91,7 +90,7 @@ func (handler *Handler) endpointDelete(w http.ResponseWriter, r *http.Request) * // @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." // @failure 403 "Unauthorized access or operation not allowed." // @failure 500 "Server error occurred while attempting to delete the specified environments." -// @router /endpoints [delete] +// @router /endpoints/delete [post] func (handler *Handler) endpointDeleteBatch(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { var p endpointDeleteBatchPayload if err := request.DecodeAndValidateJSONPayload(r, &p); err != nil { @@ -127,6 +126,27 @@ func (handler *Handler) endpointDeleteBatch(w http.ResponseWriter, r *http.Reque return response.Empty(w) } +// @id EndpointDeleteBatchDeprecated +// @summary Remove multiple environments +// @deprecated +// @description Deprecated: use the `POST` endpoint instead. +// @description Remove multiple environments and optionally clean-up associated resources. +// @description **Access policy**: Administrator only. +// @tags endpoints +// @security ApiKeyAuth || jwt +// @accept json +// @produce json +// @param body body endpointDeleteBatchPayload true "List of environments to delete, with optional deleteCluster flag to clean-up associated resources (cloud environments only)" +// @success 204 "Environment(s) successfully deleted." +// @failure 207 {object} endpointDeleteBatchPartialResponse "Partial success. Some environments were deleted successfully, while others failed." +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 403 "Unauthorized access or operation not allowed." +// @failure 500 "Server error occurred while attempting to delete the specified environments." +// @router /endpoints [delete] +func (handler *Handler) endpointDeleteBatchDeprecated(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + return handler.endpointDeleteBatch(w, r) +} + func (handler *Handler) deleteEndpoint(tx dataservices.DataStoreTx, endpointID portainer.EndpointID, deleteCluster bool) error { endpoint, err := tx.Endpoint().Endpoint(endpointID) if tx.IsErrObjectNotFound(err) { @@ -179,9 +199,7 @@ func (handler *Handler) deleteEndpoint(tx dataservices.DataStoreTx, endpointID p } for _, edgeGroup := range edgeGroups { - edgeGroup.Endpoints = slices.DeleteFunc(edgeGroup.Endpoints, func(e portainer.EndpointID) bool { - return e == endpoint.ID - }) + edgeGroup.EndpointIDs.Remove(endpoint.ID) if err := tx.EdgeGroup().Update(edgeGroup.ID, &edgeGroup); err != nil { log.Warn().Err(err).Msg("Unable to update edge group") @@ -193,14 +211,9 @@ func (handler *Handler) deleteEndpoint(tx dataservices.DataStoreTx, endpointID p log.Warn().Err(err).Msg("Unable to retrieve edge stacks from the database") } - for idx := range edgeStacks { - edgeStack := &edgeStacks[idx] - if _, ok := edgeStack.Status[endpoint.ID]; ok { - delete(edgeStack.Status, endpoint.ID) - - if err := tx.EdgeStack().UpdateEdgeStack(edgeStack.ID, edgeStack); err != nil { - log.Warn().Err(err).Msg("Unable to update edge stack") - } + for _, edgeStack := range edgeStacks { + if err := tx.EdgeStackStatus().Delete(edgeStack.ID, endpoint.ID); err != nil { + log.Warn().Err(err).Msg("Unable to delete edge stack status") } } diff --git a/api/http/handler/endpoints/endpoint_delete_test.go b/api/http/handler/endpoints/endpoint_delete_test.go index 309b45ffe..73b2b878b 100644 --- a/api/http/handler/endpoints/endpoint_delete_test.go +++ b/api/http/handler/endpoints/endpoint_delete_test.go @@ -11,6 +11,7 @@ import ( "github.com/portainer/portainer/api/datastore" "github.com/portainer/portainer/api/http/proxy" "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" ) func TestEndpointDeleteEdgeGroupsConcurrently(t *testing.T) { @@ -21,7 +22,7 @@ func TestEndpointDeleteEdgeGroupsConcurrently(t *testing.T) { handler := NewHandler(testhelpers.NewTestRequestBouncer()) handler.DataStore = store handler.ProxyManager = proxy.NewManager(nil) - handler.ProxyManager.NewProxyFactory(nil, nil, nil, nil, nil, nil, nil, nil) + handler.ProxyManager.NewProxyFactory(nil, nil, nil, nil, nil, nil, nil, nil, nil) // Create all the environments and add them to the same edge group @@ -42,9 +43,9 @@ func TestEndpointDeleteEdgeGroupsConcurrently(t *testing.T) { } if err := store.EdgeGroup().Create(&portainer.EdgeGroup{ - ID: 1, - Name: "edgegroup-1", - Endpoints: endpointIDs, + ID: 1, + Name: "edgegroup-1", + EndpointIDs: roar.FromSlice(endpointIDs), }); err != nil { t.Fatal("could not create edge group:", err) } @@ -78,7 +79,7 @@ func TestEndpointDeleteEdgeGroupsConcurrently(t *testing.T) { t.Fatal("could not retrieve the edge group:", err) } - if len(edgeGroup.Endpoints) > 0 { + if edgeGroup.EndpointIDs.Len() > 0 { t.Fatal("the edge group is not consistent") } } diff --git a/api/http/handler/endpoints/endpoint_dockerhub_status.go b/api/http/handler/endpoints/endpoint_dockerhub_status.go index 7427a8750..c7d43149f 100644 --- a/api/http/handler/endpoints/endpoint_dockerhub_status.go +++ b/api/http/handler/endpoints/endpoint_dockerhub_status.go @@ -80,6 +80,13 @@ func (handler *Handler) endpointDockerhubStatus(w http.ResponseWriter, r *http.R } } + if handler.PullLimitCheckDisabled { + return response.JSON(w, &dockerhubStatusResponse{ + Limit: 10, + Remaining: 10, + }) + } + httpClient := client.NewHTTPClient() token, err := getDockerHubToken(httpClient, registry) if err != nil { diff --git a/api/http/handler/endpoints/endpoint_inspect.go b/api/http/handler/endpoints/endpoint_inspect.go index e25e79720..132ab6350 100644 --- a/api/http/handler/endpoints/endpoint_inspect.go +++ b/api/http/handler/endpoints/endpoint_inspect.go @@ -19,6 +19,8 @@ import ( // @security jwt // @produce json // @param id path int true "Environment(Endpoint) identifier" +// @param excludeSnapshot query bool false "if true, the snapshot data won't be retrieved" +// @param excludeSnapshotRaw query bool false "if true, the SnapshotRaw field won't be retrieved" // @success 200 {object} portainer.Endpoint "Success" // @failure 400 "Invalid request" // @failure 404 "Environment(Endpoint) not found" @@ -37,8 +39,7 @@ func (handler *Handler) endpointInspect(w http.ResponseWriter, r *http.Request) return httperror.InternalServerError("Unable to find an environment with the specified identifier inside the database", err) } - err = handler.requestBouncer.AuthorizedEndpointOperation(r, endpoint) - if err != nil { + if err := handler.requestBouncer.AuthorizedEndpointOperation(r, endpoint); err != nil { return httperror.Forbidden("Permission denied to access environment", err) } @@ -51,9 +52,11 @@ func (handler *Handler) endpointInspect(w http.ResponseWriter, r *http.Request) endpointutils.UpdateEdgeEndpointHeartbeat(endpoint, settings) endpoint.ComposeSyntaxMaxVersion = handler.ComposeStackManager.ComposeSyntaxMaxVersion() - if !excludeSnapshot(r) { - err = handler.SnapshotService.FillSnapshotData(endpoint) - if err != nil { + excludeSnapshot, _ := request.RetrieveBooleanQueryParameter(r, "excludeSnapshot", true) + excludeRaw, _ := request.RetrieveBooleanQueryParameter(r, "excludeSnapshotRaw", true) + + if !excludeSnapshot { + if err := handler.SnapshotService.FillSnapshotData(endpoint, !excludeRaw); err != nil { return httperror.InternalServerError("Unable to add snapshot data", err) } } @@ -83,9 +86,3 @@ func (handler *Handler) endpointInspect(w http.ResponseWriter, r *http.Request) return response.JSON(w, endpoint) } - -func excludeSnapshot(r *http.Request) bool { - excludeSnapshot, _ := request.RetrieveBooleanQueryParameter(r, "excludeSnapshot", true) - - return excludeSnapshot -} diff --git a/api/http/handler/endpoints/endpoint_list.go b/api/http/handler/endpoints/endpoint_list.go index dc95e15b0..43b14ad6a 100644 --- a/api/http/handler/endpoints/endpoint_list.go +++ b/api/http/handler/endpoints/endpoint_list.go @@ -38,15 +38,19 @@ const ( // @param tagIds query []int false "search environments(endpoints) with these tags (depends on tagsPartialMatch)" // @param tagsPartialMatch query bool false "If true, will return environment(endpoint) which has one of tagIds, if false (or missing) will return only environments(endpoints) that has all the tags" // @param endpointIds query []int false "will return only these environments(endpoints)" +// @param excludeIds query []int false "will exclude these environments(endpoints)" // @param provisioned query bool false "If true, will return environment(endpoint) that were provisioned" // @param agentVersions query []string false "will return only environments with on of these agent versions" // @param edgeAsync query bool false "if exists true show only edge async agents, false show only standard edge agents. if missing, will show both types (relevant only for edge agents)" // @param edgeDeviceUntrusted query bool false "if true, show only untrusted edge agents, if false show only trusted edge agents (relevant only for edge agents)" // @param edgeCheckInPassedSeconds query number false "if bigger then zero, show only edge agents that checked-in in the last provided seconds (relevant only for edge agents)" // @param excludeSnapshots query bool false "if true, the snapshot data won't be retrieved" +// @param excludeSnapshotRaw query bool false "if true, the SnapshotRaw field won't be retrieved" // @param name query string false "will return only environments(endpoints) with this name" // @param edgeStackId query portainer.EdgeStackID false "will return the environements of the specified edge stack" // @param edgeStackStatus query string false "only applied when edgeStackId exists. Filter the returned environments based on their deployment status in the stack (not the environment status!)" Enum("Pending", "Ok", "Error", "Acknowledged", "Remove", "RemoteUpdateSuccess", "ImagesPulled") +// @param edgeGroupIds query []int false "List environments(endpoints) of these edge groups" +// @param excludeEdgeGroupIds query []int false "Exclude environments(endpoints) of these edge groups" // @success 200 {array} portainer.Endpoint "Endpoints" // @failure 500 "Server error" // @router /endpoints [get] @@ -59,6 +63,7 @@ func (handler *Handler) endpointList(w http.ResponseWriter, r *http.Request) *ht limit, _ := request.RetrieveNumericQueryParameter(r, "limit", true) sortField, _ := request.RetrieveQueryParameter(r, "sort", true) sortOrder, _ := request.RetrieveQueryParameter(r, "order", true) + excludeRaw, _ := request.RetrieveBooleanQueryParameter(r, "excludeSnapshotRaw", true) endpointGroups, err := handler.DataStore.EndpointGroup().ReadAll() if err != nil { @@ -90,12 +95,11 @@ func (handler *Handler) endpointList(w http.ResponseWriter, r *http.Request) *ht return httperror.BadRequest("Invalid query parameters", err) } - filteredEndpoints := security.FilterEndpoints(endpoints, endpointGroups, securityContext) - - filteredEndpoints, totalAvailableEndpoints, err := handler.filterEndpointsByQuery(filteredEndpoints, query, endpointGroups, edgeGroups, settings) + filteredEndpoints, totalAvailableEndpoints, err := handler.filterEndpointsByQuery(endpoints, query, endpointGroups, edgeGroups, settings, securityContext) if err != nil { return httperror.InternalServerError("Unable to filter endpoints", err) } + filteredEndpoints = security.FilterEndpoints(filteredEndpoints, endpointGroups, securityContext) sortEnvironmentsByField(filteredEndpoints, endpointGroups, getSortKey(sortField), sortOrder == "desc") @@ -105,14 +109,16 @@ func (handler *Handler) endpointList(w http.ResponseWriter, r *http.Request) *ht for idx := range paginatedEndpoints { hideFields(&paginatedEndpoints[idx]) + paginatedEndpoints[idx].ComposeSyntaxMaxVersion = handler.ComposeStackManager.ComposeSyntaxMaxVersion() if paginatedEndpoints[idx].EdgeCheckinInterval == 0 { paginatedEndpoints[idx].EdgeCheckinInterval = settings.EdgeAgentCheckinInterval } + endpointutils.UpdateEdgeEndpointHeartbeat(&paginatedEndpoints[idx], settings) + if !query.excludeSnapshots { - err = handler.SnapshotService.FillSnapshotData(&paginatedEndpoints[idx]) - if err != nil { + if err := handler.SnapshotService.FillSnapshotData(&paginatedEndpoints[idx], !excludeRaw); err != nil { return httperror.InternalServerError("Unable to add snapshot data", err) } } @@ -120,6 +126,7 @@ func (handler *Handler) endpointList(w http.ResponseWriter, r *http.Request) *ht w.Header().Set("X-Total-Count", strconv.Itoa(filteredEndpointCount)) w.Header().Set("X-Total-Available", strconv.Itoa(totalAvailableEndpoints)) + return response.JSON(w, paginatedEndpoints) } @@ -130,18 +137,8 @@ func paginateEndpoints(endpoints []portainer.Endpoint, start, limit int) []porta endpointCount := len(endpoints) - if start < 0 { - start = 0 - } - - if start > endpointCount { - start = endpointCount - } - - end := start + limit - if end > endpointCount { - end = endpointCount - } + start = min(max(start, 0), endpointCount) + end := min(start+limit, endpointCount) return endpoints[start:end] } @@ -151,8 +148,10 @@ func getEndpointGroup(groupID portainer.EndpointGroupID, groups []portainer.Endp for _, group := range groups { if group.ID == groupID { endpointGroup = group + break } } + return endpointGroup } diff --git a/api/http/handler/endpoints/endpoint_registries_list.go b/api/http/handler/endpoints/endpoint_registries_list.go index e81bc34a9..5bc4a930d 100644 --- a/api/http/handler/endpoints/endpoint_registries_list.go +++ b/api/http/handler/endpoints/endpoint_registries_list.go @@ -75,7 +75,7 @@ func (handler *Handler) listRegistries(tx dataservices.DataStoreTx, r *http.Requ return nil, httperror.InternalServerError("Unable to retrieve registries from the database", err) } - registries, handleError := handler.filterRegistriesByAccess(r, registries, endpoint, user, securityContext.UserMemberships) + registries, handleError := handler.filterRegistriesByAccess(tx, r, registries, endpoint, user, securityContext.UserMemberships) if handleError != nil { return nil, handleError } @@ -87,15 +87,15 @@ func (handler *Handler) listRegistries(tx dataservices.DataStoreTx, r *http.Requ return registries, err } -func (handler *Handler) filterRegistriesByAccess(r *http.Request, registries []portainer.Registry, endpoint *portainer.Endpoint, user *portainer.User, memberships []portainer.TeamMembership) ([]portainer.Registry, *httperror.HandlerError) { +func (handler *Handler) filterRegistriesByAccess(tx dataservices.DataStoreTx, r *http.Request, registries []portainer.Registry, endpoint *portainer.Endpoint, user *portainer.User, memberships []portainer.TeamMembership) ([]portainer.Registry, *httperror.HandlerError) { if !endpointutils.IsKubernetesEndpoint(endpoint) { return security.FilterRegistries(registries, user, memberships, endpoint.ID), nil } - return handler.filterKubernetesEndpointRegistries(r, registries, endpoint, user, memberships) + return handler.filterKubernetesEndpointRegistries(tx, r, registries, endpoint, user, memberships) } -func (handler *Handler) filterKubernetesEndpointRegistries(r *http.Request, registries []portainer.Registry, endpoint *portainer.Endpoint, user *portainer.User, memberships []portainer.TeamMembership) ([]portainer.Registry, *httperror.HandlerError) { +func (handler *Handler) filterKubernetesEndpointRegistries(tx dataservices.DataStoreTx, r *http.Request, registries []portainer.Registry, endpoint *portainer.Endpoint, user *portainer.User, memberships []portainer.TeamMembership) ([]portainer.Registry, *httperror.HandlerError) { namespaceParam, _ := request.RetrieveQueryParameter(r, "namespace", true) isAdmin, err := security.IsAdmin(r) if err != nil { @@ -116,7 +116,7 @@ func (handler *Handler) filterKubernetesEndpointRegistries(r *http.Request, regi return registries, nil } - return handler.filterKubernetesRegistriesByUserRole(r, registries, endpoint, user) + return handler.filterKubernetesRegistriesByUserRole(tx, r, registries, endpoint, user) } func (handler *Handler) isNamespaceAuthorized(endpoint *portainer.Endpoint, namespace string, userId portainer.UserID, memberships []portainer.TeamMembership, isAdmin bool) (bool, error) { @@ -169,7 +169,7 @@ func registryAccessPoliciesContainsNamespace(registryAccess portainer.RegistryAc return false } -func (handler *Handler) filterKubernetesRegistriesByUserRole(r *http.Request, registries []portainer.Registry, endpoint *portainer.Endpoint, user *portainer.User) ([]portainer.Registry, *httperror.HandlerError) { +func (handler *Handler) filterKubernetesRegistriesByUserRole(tx dataservices.DataStoreTx, r *http.Request, registries []portainer.Registry, endpoint *portainer.Endpoint, user *portainer.User) ([]portainer.Registry, *httperror.HandlerError) { err := handler.requestBouncer.AuthorizedEndpointOperation(r, endpoint) if errors.Is(err, security.ErrAuthorizationRequired) { return nil, httperror.Forbidden("User is not authorized", err) @@ -178,7 +178,7 @@ func (handler *Handler) filterKubernetesRegistriesByUserRole(r *http.Request, re return nil, httperror.InternalServerError("Unable to retrieve info from request context", err) } - userNamespaces, err := handler.userNamespaces(endpoint, user) + userNamespaces, err := handler.userNamespaces(tx, endpoint, user) if err != nil { return nil, httperror.InternalServerError("unable to retrieve user namespaces", err) } @@ -186,7 +186,7 @@ func (handler *Handler) filterKubernetesRegistriesByUserRole(r *http.Request, re return filterRegistriesByNamespaces(registries, endpoint.ID, userNamespaces), nil } -func (handler *Handler) userNamespaces(endpoint *portainer.Endpoint, user *portainer.User) ([]string, error) { +func (handler *Handler) userNamespaces(tx dataservices.DataStoreTx, endpoint *portainer.Endpoint, user *portainer.User) ([]string, error) { kcl, err := handler.K8sClientFactory.GetPrivilegedKubeClient(endpoint) if err != nil { return nil, err @@ -197,7 +197,7 @@ func (handler *Handler) userNamespaces(endpoint *portainer.Endpoint, user *porta return nil, err } - userMemberships, err := handler.DataStore.TeamMembership().TeamMembershipsByUserID(user.ID) + userMemberships, err := tx.TeamMembership().TeamMembershipsByUserID(user.ID) if err != nil { return nil, err } diff --git a/api/http/handler/endpoints/endpoint_update.go b/api/http/handler/endpoints/endpoint_update.go index 0f57136df..71c9ef702 100644 --- a/api/http/handler/endpoints/endpoint_update.go +++ b/api/http/handler/endpoints/endpoint_update.go @@ -272,7 +272,7 @@ func (handler *Handler) endpointUpdate(w http.ResponseWriter, r *http.Request) * } } - if err := handler.SnapshotService.FillSnapshotData(endpoint); err != nil { + if err := handler.SnapshotService.FillSnapshotData(endpoint, true); err != nil { return httperror.InternalServerError("Unable to add snapshot data", err) } diff --git a/api/http/handler/endpoints/filter.go b/api/http/handler/endpoints/filter.go index df230f99e..961cad147 100644 --- a/api/http/handler/endpoints/filter.go +++ b/api/http/handler/endpoints/filter.go @@ -11,9 +11,10 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/http/handler/edgegroups" + "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/internal/edge" "github.com/portainer/portainer/api/internal/endpointutils" - "github.com/portainer/portainer/api/slicesx" + "github.com/portainer/portainer/api/roar" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/pkg/errors" @@ -37,6 +38,8 @@ type EnvironmentsQuery struct { edgeStackId portainer.EdgeStackID edgeStackStatus *portainer.EdgeStackStatusType excludeIds []portainer.EndpointID + edgeGroupIds []portainer.EdgeGroupID + excludeEdgeGroupIds []portainer.EdgeGroupID } func parseQuery(r *http.Request) (EnvironmentsQuery, error) { @@ -77,6 +80,16 @@ func parseQuery(r *http.Request) (EnvironmentsQuery, error) { return EnvironmentsQuery{}, err } + edgeGroupIDs, err := getNumberArrayQueryParameter[portainer.EdgeGroupID](r, "edgeGroupIds") + if err != nil { + return EnvironmentsQuery{}, err + } + + excludeEdgeGroupIds, err := getNumberArrayQueryParameter[portainer.EdgeGroupID](r, "excludeEdgeGroupIds") + if err != nil { + return EnvironmentsQuery{}, err + } + agentVersions := getArrayQueryParameter(r, "agentVersions") name, _ := request.RetrieveQueryParameter(r, "name", true) @@ -117,6 +130,8 @@ func parseQuery(r *http.Request) (EnvironmentsQuery, error) { edgeCheckInPassedSeconds: edgeCheckInPassedSeconds, edgeStackId: portainer.EdgeStackID(edgeStackId), edgeStackStatus: edgeStackStatus, + edgeGroupIds: edgeGroupIDs, + excludeEdgeGroupIds: excludeEdgeGroupIds, }, nil } @@ -126,11 +141,14 @@ func (handler *Handler) filterEndpointsByQuery( groups []portainer.EndpointGroup, edgeGroups []portainer.EdgeGroup, settings *portainer.Settings, + context *security.RestrictedRequestContext, ) ([]portainer.Endpoint, int, error) { totalAvailableEndpoints := len(filteredEndpoints) if len(query.endpointIds) > 0 { - filteredEndpoints = filteredEndpointsByIds(filteredEndpoints, query.endpointIds) + endpointIDs := roar.FromSlice(query.endpointIds) + + filteredEndpoints = filteredEndpointsByIds(filteredEndpoints, endpointIDs) } if len(query.excludeIds) > 0 { @@ -143,6 +161,14 @@ func (handler *Handler) filterEndpointsByQuery( filteredEndpoints = filterEndpointsByGroupIDs(filteredEndpoints, query.groupIds) } + if len(query.edgeGroupIds) > 0 { + filteredEndpoints, edgeGroups = filterEndpointsByEdgeGroupIDs(filteredEndpoints, edgeGroups, query.edgeGroupIds) + } + + if len(query.excludeEdgeGroupIds) > 0 { + filteredEndpoints, edgeGroups = filterEndpointsByExcludeEdgeGroupIDs(filteredEndpoints, edgeGroups, query.excludeEdgeGroupIds) + } + if query.name != "" { filteredEndpoints = filterEndpointsByName(filteredEndpoints, query.name) } @@ -159,11 +185,16 @@ func (handler *Handler) filterEndpointsByQuery( } // filter edge environments by trusted/untrusted + // only portainer admins are allowed to see untrusted environments filteredEndpoints = filter(filteredEndpoints, func(endpoint portainer.Endpoint) bool { if !endpointutils.IsEdgeEndpoint(&endpoint) { return true } + if query.edgeDeviceUntrusted { + return !endpoint.UserTrusted && context.IsAdmin + } + return endpoint.UserTrusted == !query.edgeDeviceUntrusted }) @@ -225,19 +256,17 @@ func (handler *Handler) filterEndpointsByQuery( return filteredEndpoints, totalAvailableEndpoints, nil } -func endpointStatusInStackMatchesFilter(edgeStackStatus map[portainer.EndpointID]portainer.EdgeStackStatus, envId portainer.EndpointID, statusFilter portainer.EdgeStackStatusType) bool { - status, ok := edgeStackStatus[envId] - +func endpointStatusInStackMatchesFilter(stackStatus *portainer.EdgeStackStatusForEnv, envId portainer.EndpointID, statusFilter portainer.EdgeStackStatusType) bool { // consider that if the env has no status in the stack it is in Pending state if statusFilter == portainer.EdgeStackStatusPending { - return !ok || len(status.Status) == 0 + return stackStatus == nil || len(stackStatus.Status) == 0 } - if !ok { + if stackStatus == nil { return false } - return slices.ContainsFunc(status.Status, func(s portainer.EdgeStackDeploymentStatus) bool { + return slices.ContainsFunc(stackStatus.Status, func(s portainer.EdgeStackDeploymentStatus) bool { return s.Type == statusFilter }) } @@ -248,7 +277,7 @@ func filterEndpointsByEdgeStack(endpoints []portainer.Endpoint, edgeStackId port return nil, errors.WithMessage(err, "Unable to retrieve edge stack from the database") } - envIds := make([]portainer.EndpointID, 0) + envIds := roar.Roar[portainer.EndpointID]{} for _, edgeGroupdId := range stack.EdgeGroups { edgeGroup, err := datastore.EdgeGroup().Read(edgeGroupdId) if err != nil { @@ -260,25 +289,37 @@ func filterEndpointsByEdgeStack(endpoints []portainer.Endpoint, edgeStackId port if err != nil { return nil, errors.WithMessage(err, "Unable to retrieve environments and environment groups for Edge group") } - edgeGroup.Endpoints = endpointIDs + edgeGroup.EndpointIDs = roar.FromSlice(endpointIDs) } - envIds = append(envIds, edgeGroup.Endpoints...) + envIds.Union(edgeGroup.EndpointIDs) } if statusFilter != nil { - n := 0 - for _, envId := range envIds { - if endpointStatusInStackMatchesFilter(stack.Status, envId, *statusFilter) { - envIds[n] = envId - n++ + var innerErr error + + envIds.Iterate(func(envId portainer.EndpointID) bool { + edgeStackStatus, err := datastore.EdgeStackStatus().Read(edgeStackId, envId) + if dataservices.IsErrObjectNotFound(err) { + return true + } else if err != nil { + innerErr = errors.WithMessagef(err, "Unable to retrieve edge stack status for environment %d", envId) + return false } + + if !endpointStatusInStackMatchesFilter(edgeStackStatus, portainer.EndpointID(envId), *statusFilter) { + envIds.Remove(envId) + } + + return true + }) + + if innerErr != nil { + return nil, innerErr } - envIds = envIds[:n] } - uniqueIds := slicesx.Unique(envIds) - filteredEndpoints := filteredEndpointsByIds(endpoints, uniqueIds) + filteredEndpoints := filteredEndpointsByIds(endpoints, envIds) return filteredEndpoints, nil } @@ -295,6 +336,67 @@ func filterEndpointsByGroupIDs(endpoints []portainer.Endpoint, endpointGroupIDs return endpoints[:n] } +func filterEndpointsByEdgeGroupIDs(endpoints []portainer.Endpoint, edgeGroups []portainer.EdgeGroup, edgeGroupIDs []portainer.EdgeGroupID) ([]portainer.Endpoint, []portainer.EdgeGroup) { + edgeGroupIDFilterSet := make(map[portainer.EdgeGroupID]struct{}, len(edgeGroupIDs)) + for _, id := range edgeGroupIDs { + edgeGroupIDFilterSet[id] = struct{}{} + } + + n := 0 + for _, edgeGroup := range edgeGroups { + if _, exists := edgeGroupIDFilterSet[edgeGroup.ID]; exists { + edgeGroups[n] = edgeGroup + n++ + } + } + edgeGroups = edgeGroups[:n] + + endpointIDSet := roar.Roar[portainer.EndpointID]{} + for _, edgeGroup := range edgeGroups { + endpointIDSet.Union(edgeGroup.EndpointIDs) + } + + n = 0 + for _, endpoint := range endpoints { + if endpointIDSet.Contains(endpoint.ID) { + endpoints[n] = endpoint + n++ + } + } + + return endpoints[:n], edgeGroups +} + +func filterEndpointsByExcludeEdgeGroupIDs(endpoints []portainer.Endpoint, edgeGroups []portainer.EdgeGroup, excludeEdgeGroupIds []portainer.EdgeGroupID) ([]portainer.Endpoint, []portainer.EdgeGroup) { + excludeEdgeGroupIDSet := make(map[portainer.EdgeGroupID]struct{}, len(excludeEdgeGroupIds)) + for _, id := range excludeEdgeGroupIds { + excludeEdgeGroupIDSet[id] = struct{}{} + } + + n := 0 + excludeEndpointIDSet := roar.Roar[portainer.EndpointID]{} + + for _, edgeGroup := range edgeGroups { + if _, ok := excludeEdgeGroupIDSet[edgeGroup.ID]; ok { + excludeEndpointIDSet.Union(edgeGroup.EndpointIDs) + } else { + edgeGroups[n] = edgeGroup + n++ + } + } + edgeGroups = edgeGroups[:n] + + n = 0 + for _, endpoint := range endpoints { + if !excludeEndpointIDSet.Contains(endpoint.ID) { + endpoints[n] = endpoint + n++ + } + } + + return endpoints[:n], edgeGroups +} + func filterEndpointsBySearchCriteria( endpoints []portainer.Endpoint, endpointGroups []portainer.EndpointGroup, @@ -511,15 +613,10 @@ func endpointFullMatchTags(endpoint portainer.Endpoint, endpointGroup portainer. return len(missingTags) == 0 } -func filteredEndpointsByIds(endpoints []portainer.Endpoint, ids []portainer.EndpointID) []portainer.Endpoint { - idsSet := make(map[portainer.EndpointID]bool, len(ids)) - for _, id := range ids { - idsSet[id] = true - } - +func filteredEndpointsByIds(endpoints []portainer.Endpoint, ids roar.Roar[portainer.EndpointID]) []portainer.Endpoint { n := 0 for _, endpoint := range endpoints { - if idsSet[endpoint.ID] { + if ids.Contains(endpoint.ID) { endpoints[n] = endpoint n++ } diff --git a/api/http/handler/endpoints/filter_test.go b/api/http/handler/endpoints/filter_test.go index f19d0a276..642448b86 100644 --- a/api/http/handler/endpoints/filter_test.go +++ b/api/http/handler/endpoints/filter_test.go @@ -6,10 +6,13 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" "github.com/portainer/portainer/api/slicesx" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type filterTest struct { @@ -174,7 +177,7 @@ func BenchmarkFilterEndpointsBySearchCriteria_PartialMatch(b *testing.B) { edgeGroups = append(edgeGroups, portainer.EdgeGroup{ ID: portainer.EdgeGroupID(i + 1), Name: "edge-group-" + strconv.Itoa(i+1), - Endpoints: append([]portainer.EndpointID{}, endpointIDs...), + EndpointIDs: roar.FromSlice(endpointIDs), Dynamic: true, TagIDs: []portainer.TagID{1, 2, 3}, PartialMatch: true, @@ -221,11 +224,11 @@ func BenchmarkFilterEndpointsBySearchCriteria_FullMatch(b *testing.B) { edgeGroups := []portainer.EdgeGroup{} for i := range 1000 { edgeGroups = append(edgeGroups, portainer.EdgeGroup{ - ID: portainer.EdgeGroupID(i + 1), - Name: "edge-group-" + strconv.Itoa(i+1), - Endpoints: append([]portainer.EndpointID{}, endpointIDs...), - Dynamic: true, - TagIDs: []portainer.TagID{1}, + ID: portainer.EdgeGroupID(i + 1), + Name: "edge-group-" + strconv.Itoa(i+1), + EndpointIDs: roar.FromSlice(endpointIDs), + Dynamic: true, + TagIDs: []portainer.TagID{1}, }) } @@ -263,6 +266,7 @@ func runTest(t *testing.T, test filterTest, handler *Handler, endpoints []portai []portainer.EndpointGroup{}, []portainer.EdgeGroup{}, &portainer.Settings{}, + &security.RestrictedRequestContext{IsAdmin: true}, ) is.NoError(err) @@ -298,3 +302,127 @@ func setupFilterTest(t *testing.T, endpoints []portainer.Endpoint) *Handler { return handler } + +func TestFilterEndpointsByEdgeStack(t *testing.T) { + _, store := datastore.MustNewTestStore(t, false, false) + + endpoints := []portainer.Endpoint{ + {ID: 1, Name: "Endpoint 1"}, + {ID: 2, Name: "Endpoint 2"}, + {ID: 3, Name: "Endpoint 3"}, + {ID: 4, Name: "Endpoint 4"}, + } + + edgeStackId := portainer.EdgeStackID(1) + + err := store.EdgeStack().Create(edgeStackId, &portainer.EdgeStack{ + ID: edgeStackId, + Name: "Test Edge Stack", + EdgeGroups: []portainer.EdgeGroupID{1, 2}, + }) + require.NoError(t, err) + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Edge Group 1", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{1}), + }) + require.NoError(t, err) + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 2, + Name: "Edge Group 2", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{2, 3}), + }) + require.NoError(t, err) + + es, err := filterEndpointsByEdgeStack(endpoints, edgeStackId, nil, store) + require.NoError(t, err) + require.Len(t, es, 3) + require.Contains(t, es, endpoints[0]) // Endpoint 1 + require.Contains(t, es, endpoints[1]) // Endpoint 2 + require.Contains(t, es, endpoints[2]) // Endpoint 3 + require.NotContains(t, es, endpoints[3]) // Endpoint 4 +} + +func TestFilterEndpointsByEdgeGroup(t *testing.T) { + _, store := datastore.MustNewTestStore(t, false, false) + + endpoints := []portainer.Endpoint{ + {ID: 1, Name: "Endpoint 1"}, + {ID: 2, Name: "Endpoint 2"}, + {ID: 3, Name: "Endpoint 3"}, + {ID: 4, Name: "Endpoint 4"}, + } + + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Edge Group 1", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{1}), + }) + require.NoError(t, err) + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 2, + Name: "Edge Group 2", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{2, 3}), + }) + require.NoError(t, err) + + edgeGroups, err := store.EdgeGroup().ReadAll() + require.NoError(t, err) + + es, egs := filterEndpointsByEdgeGroupIDs(endpoints, edgeGroups, []portainer.EdgeGroupID{1, 2}) + require.NoError(t, err) + + require.Len(t, es, 3) + require.Contains(t, es, endpoints[0]) // Endpoint 1 + require.Contains(t, es, endpoints[1]) // Endpoint 2 + require.Contains(t, es, endpoints[2]) // Endpoint 3 + require.NotContains(t, es, endpoints[3]) // Endpoint 4 + + require.Len(t, egs, 2) + require.Equal(t, egs[0].ID, portainer.EdgeGroupID(1)) + require.Equal(t, egs[1].ID, portainer.EdgeGroupID(2)) +} + +func TestFilterEndpointsByExcludeEdgeGroupIDs(t *testing.T) { + _, store := datastore.MustNewTestStore(t, false, false) + + endpoints := []portainer.Endpoint{ + {ID: 1, Name: "Endpoint 1"}, + {ID: 2, Name: "Endpoint 2"}, + {ID: 3, Name: "Endpoint 3"}, + {ID: 4, Name: "Endpoint 4"}, + } + + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 1, + Name: "Edge Group 1", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{1}), + }) + require.NoError(t, err) + + err = store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: 2, + Name: "Edge Group 2", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{2, 3}), + }) + require.NoError(t, err) + + edgeGroups, err := store.EdgeGroup().ReadAll() + require.NoError(t, err) + + es, egs := filterEndpointsByExcludeEdgeGroupIDs(endpoints, edgeGroups, []portainer.EdgeGroupID{1}) + require.NoError(t, err) + + require.Len(t, es, 3) + require.Equal(t, es, []portainer.Endpoint{ + {ID: 2, Name: "Endpoint 2"}, + {ID: 3, Name: "Endpoint 3"}, + {ID: 4, Name: "Endpoint 4"}, + }) + + require.Len(t, egs, 1) + require.Equal(t, egs[0].ID, portainer.EdgeGroupID(2)) +} diff --git a/api/http/handler/endpoints/handler.go b/api/http/handler/endpoints/handler.go index fa852633c..0260acaf0 100644 --- a/api/http/handler/endpoints/handler.go +++ b/api/http/handler/endpoints/handler.go @@ -26,19 +26,20 @@ func hideFields(endpoint *portainer.Endpoint) { // Handler is the HTTP handler used to handle environment(endpoint) operations. type Handler struct { *mux.Router - requestBouncer security.BouncerService - DataStore dataservices.DataStore - FileService portainer.FileService - ProxyManager *proxy.Manager - ReverseTunnelService portainer.ReverseTunnelService - SnapshotService portainer.SnapshotService - K8sClientFactory *cli.ClientFactory - ComposeStackManager portainer.ComposeStackManager - AuthorizationService *authorization.Service - DockerClientFactory *dockerclient.ClientFactory - BindAddress string - BindAddressHTTPS string - PendingActionsService *pendingactions.PendingActionsService + requestBouncer security.BouncerService + DataStore dataservices.DataStore + FileService portainer.FileService + ProxyManager *proxy.Manager + ReverseTunnelService portainer.ReverseTunnelService + SnapshotService portainer.SnapshotService + K8sClientFactory *cli.ClientFactory + ComposeStackManager portainer.ComposeStackManager + AuthorizationService *authorization.Service + DockerClientFactory *dockerclient.ClientFactory + BindAddress string + BindAddressHTTPS string + PendingActionsService *pendingactions.PendingActionsService + PullLimitCheckDisabled bool } // NewHandler creates a handler to manage environment(endpoint) operations. @@ -68,8 +69,8 @@ func NewHandler(bouncer security.BouncerService) *Handler { bouncer.AdminAccess(httperror.LoggerHandler(h.endpointUpdate))).Methods(http.MethodPut) h.Handle("/endpoints/{id}", bouncer.AdminAccess(httperror.LoggerHandler(h.endpointDelete))).Methods(http.MethodDelete) - h.Handle("/endpoints", - bouncer.AdminAccess(httperror.LoggerHandler(h.endpointDeleteBatch))).Methods(http.MethodDelete) + h.Handle("/endpoints/delete", + bouncer.AdminAccess(httperror.LoggerHandler(h.endpointDeleteBatch))).Methods(http.MethodPost) h.Handle("/endpoints/{id}/dockerhub/{registryId}", bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.endpointDockerhubStatus))).Methods(http.MethodGet) h.Handle("/endpoints/{id}/snapshot", @@ -85,6 +86,7 @@ func NewHandler(bouncer security.BouncerService) *Handler { // DEPRECATED h.Handle("/endpoints/{id}/status", bouncer.PublicAccess(httperror.LoggerHandler(h.endpointStatusInspect))).Methods(http.MethodGet) + h.Handle("/endpoints", bouncer.AdminAccess(httperror.LoggerHandler(h.endpointDeleteBatchDeprecated))).Methods(http.MethodDelete) return h } diff --git a/api/http/handler/endpoints/update_edge_relations.go b/api/http/handler/endpoints/update_edge_relations.go index 8bebadedd..c487519f0 100644 --- a/api/http/handler/endpoints/update_edge_relations.go +++ b/api/http/handler/endpoints/update_edge_relations.go @@ -17,16 +17,7 @@ func (handler *Handler) updateEdgeRelations(tx dataservices.DataStoreTx, endpoin relation, err := tx.EndpointRelation().EndpointRelation(endpoint.ID) if err != nil { - if !tx.IsErrObjectNotFound(err) { - return errors.WithMessage(err, "Unable to retrieve environment relation inside the database") - } - - relation = &portainer.EndpointRelation{ - EndpointID: endpoint.ID, - } - if err := tx.EndpointRelation().Create(relation); err != nil { - return errors.WithMessage(err, "Unable to create environment relation inside the database") - } + return errors.WithMessage(err, "Unable to retrieve environment relation inside the database") } endpointGroup, err := tx.EndpointGroup().Read(endpoint.GroupID) diff --git a/api/http/handler/endpoints/utils_update_edge_groups.go b/api/http/handler/endpoints/utils_update_edge_groups.go index bd9c413d7..6207acbc5 100644 --- a/api/http/handler/endpoints/utils_update_edge_groups.go +++ b/api/http/handler/endpoints/utils_update_edge_groups.go @@ -1,12 +1,11 @@ package endpoints import ( - "slices" - - "github.com/pkg/errors" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/set" + + "github.com/pkg/errors" ) func updateEnvironmentEdgeGroups(tx dataservices.DataStoreTx, newEdgeGroups []portainer.EdgeGroupID, environmentID portainer.EndpointID) (bool, error) { @@ -19,10 +18,8 @@ func updateEnvironmentEdgeGroups(tx dataservices.DataStoreTx, newEdgeGroups []po environmentEdgeGroupsSet := set.Set[portainer.EdgeGroupID]{} for _, edgeGroup := range edgeGroups { - for _, eID := range edgeGroup.Endpoints { - if eID == environmentID { - environmentEdgeGroupsSet[edgeGroup.ID] = true - } + if edgeGroup.EndpointIDs.Contains(environmentID) { + environmentEdgeGroupsSet[edgeGroup.ID] = true } } @@ -52,20 +49,16 @@ func updateEnvironmentEdgeGroups(tx dataservices.DataStoreTx, newEdgeGroups []po } removeEdgeGroups := environmentEdgeGroupsSet.Difference(newEdgeGroupsSet) - err = updateSet(removeEdgeGroups, func(edgeGroup *portainer.EdgeGroup) { - edgeGroup.Endpoints = slices.DeleteFunc(edgeGroup.Endpoints, func(eID portainer.EndpointID) bool { - return eID == environmentID - }) - }) - if err != nil { + if err := updateSet(removeEdgeGroups, func(edgeGroup *portainer.EdgeGroup) { + edgeGroup.EndpointIDs.Remove(environmentID) + }); err != nil { return false, err } addToEdgeGroups := newEdgeGroupsSet.Difference(environmentEdgeGroupsSet) - err = updateSet(addToEdgeGroups, func(edgeGroup *portainer.EdgeGroup) { - edgeGroup.Endpoints = append(edgeGroup.Endpoints, environmentID) - }) - if err != nil { + if err := updateSet(addToEdgeGroups, func(edgeGroup *portainer.EdgeGroup) { + edgeGroup.EndpointIDs.Add(environmentID) + }); err != nil { return false, err } diff --git a/api/http/handler/endpoints/utils_update_edge_groups_test.go b/api/http/handler/endpoints/utils_update_edge_groups_test.go index e89d501fb..a57651fae 100644 --- a/api/http/handler/endpoints/utils_update_edge_groups_test.go +++ b/api/http/handler/endpoints/utils_update_edge_groups_test.go @@ -6,6 +6,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/datastore" + "github.com/stretchr/testify/assert" ) @@ -14,10 +15,9 @@ func Test_updateEdgeGroups(t *testing.T) { groups := make([]portainer.EdgeGroup, len(names)) for index, name := range names { group := &portainer.EdgeGroup{ - Name: name, - Dynamic: false, - TagIDs: make([]portainer.TagID, 0), - Endpoints: make([]portainer.EndpointID, 0), + Name: name, + Dynamic: false, + TagIDs: make([]portainer.TagID, 0), } if err := store.EdgeGroup().Create(group); err != nil { @@ -35,13 +35,8 @@ func Test_updateEdgeGroups(t *testing.T) { group, err := store.EdgeGroup().Read(groupID) is.NoError(err) - for _, endpoint := range group.Endpoints { - if endpoint == endpointID { - return - } - } - - is.Fail("expected endpoint to be in group") + is.True(group.EndpointIDs.Contains(endpointID), + "expected endpoint to be in group") } } @@ -81,7 +76,7 @@ func Test_updateEdgeGroups(t *testing.T) { endpointGroups := groupsByName(groups, testCase.endpointGroupNames) for _, group := range endpointGroups { - group.Endpoints = append(group.Endpoints, testCase.endpoint.ID) + group.EndpointIDs.Add(testCase.endpoint.ID) err = store.EdgeGroup().Update(group.ID, &group) is.NoError(err) diff --git a/api/http/handler/endpoints/utils_update_tags_test.go b/api/http/handler/endpoints/utils_update_tags_test.go index ee42e4e10..527f963a4 100644 --- a/api/http/handler/endpoints/utils_update_tags_test.go +++ b/api/http/handler/endpoints/utils_update_tags_test.go @@ -10,7 +10,6 @@ import ( ) func Test_updateTags(t *testing.T) { - createTags := func(store *datastore.Store, tagNames []string) ([]portainer.Tag, error) { tags := make([]portainer.Tag, len(tagNames)) for index, tagName := range tagNames { diff --git a/api/http/handler/file/handler.go b/api/http/handler/file/handler.go index 66f81b64a..9e57478c8 100644 --- a/api/http/handler/file/handler.go +++ b/api/http/handler/file/handler.go @@ -17,12 +17,12 @@ type Handler struct { } // NewHandler creates a handler to serve static files. -func NewHandler(assetPublicPath string, wasInstanceDisabled func() bool) *Handler { +func NewHandler(assetPublicPath string, csp bool, wasInstanceDisabled func() bool) *Handler { h := &Handler{ Handler: security.MWSecureHeaders( gzhttp.GzipHandler(http.FileServer(http.Dir(assetPublicPath))), featureflags.IsEnabled("hsts"), - featureflags.IsEnabled("csp"), + csp, ), wasInstanceDisabled: wasInstanceDisabled, } @@ -36,6 +36,7 @@ func isHTML(acceptContent []string) bool { return true } } + return false } @@ -43,11 +44,13 @@ func (handler *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if handler.wasInstanceDisabled() { if r.RequestURI == "/" || r.RequestURI == "/index.html" { http.Redirect(w, r, "/timeout.html", http.StatusTemporaryRedirect) + return } } else { if strings.HasPrefix(r.RequestURI, "/timeout.html") { http.Redirect(w, r, "/", http.StatusTemporaryRedirect) + return } } diff --git a/api/http/handler/gitops/git_repo_file_preview.go b/api/http/handler/gitops/git_repo_file_preview.go index 28e8fafec..43c08c870 100644 --- a/api/http/handler/gitops/git_repo_file_preview.go +++ b/api/http/handler/gitops/git_repo_file_preview.go @@ -9,8 +9,7 @@ import ( httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/asaskevich/govalidator" + "github.com/portainer/portainer/pkg/validate" ) type fileResponse struct { @@ -18,10 +17,11 @@ type fileResponse struct { } type repositoryFilePreviewPayload struct { - Repository string `json:"repository" example:"https://github.com/openfaas/faas" validate:"required"` - Reference string `json:"reference" example:"refs/heads/master"` - Username string `json:"username" example:"myGitUsername"` - Password string `json:"password" example:"myGitPassword"` + Repository string `json:"repository" example:"https://github.com/openfaas/faas" validate:"required"` + Reference string `json:"reference" example:"refs/heads/master"` + Username string `json:"username" example:"myGitUsername"` + Password string `json:"password" example:"myGitPassword"` + AuthorizationType gittypes.GitCredentialAuthType `json:"authorizationType"` // Path to file whose content will be read TargetFile string `json:"targetFile" example:"docker-compose.yml"` // TLSSkipVerify skips SSL verification when cloning the Git repository @@ -29,7 +29,7 @@ type repositoryFilePreviewPayload struct { } func (payload *repositoryFilePreviewPayload) Validate(r *http.Request) error { - if len(payload.Repository) == 0 || !govalidator.IsURL(payload.Repository) { + if len(payload.Repository) == 0 || !validate.IsURL(payload.Repository) { return errors.New("invalid repository URL. Must correspond to a valid URL format") } @@ -69,7 +69,15 @@ func (handler *Handler) gitOperationRepoFilePreview(w http.ResponseWriter, r *ht return httperror.InternalServerError("Unable to create temporary folder", err) } - err = handler.gitService.CloneRepository(projectPath, payload.Repository, payload.Reference, payload.Username, payload.Password, payload.TLSSkipVerify) + err = handler.gitService.CloneRepository( + projectPath, + payload.Repository, + payload.Reference, + payload.Username, + payload.Password, + payload.AuthorizationType, + payload.TLSSkipVerify, + ) if err != nil { if errors.Is(err, gittypes.ErrAuthenticationFailure) { return httperror.BadRequest("Invalid git credential", err) diff --git a/api/http/handler/handler.go b/api/http/handler/handler.go index 4d7973170..1704eb316 100644 --- a/api/http/handler/handler.go +++ b/api/http/handler/handler.go @@ -11,7 +11,6 @@ import ( "github.com/portainer/portainer/api/http/handler/edgegroups" "github.com/portainer/portainer/api/http/handler/edgejobs" "github.com/portainer/portainer/api/http/handler/edgestacks" - "github.com/portainer/portainer/api/http/handler/edgetemplates" "github.com/portainer/portainer/api/http/handler/endpointedge" "github.com/portainer/portainer/api/http/handler/endpointgroups" "github.com/portainer/portainer/api/http/handler/endpointproxy" @@ -50,7 +49,6 @@ type Handler struct { EdgeGroupsHandler *edgegroups.Handler EdgeJobsHandler *edgejobs.Handler EdgeStacksHandler *edgestacks.Handler - EdgeTemplatesHandler *edgetemplates.Handler EndpointEdgeHandler *endpointedge.Handler EndpointGroupHandler *endpointgroups.Handler EndpointHandler *endpoints.Handler @@ -83,7 +81,7 @@ type Handler struct { } // @title PortainerCE API -// @version 2.26.0 +// @version 2.32.0 // @description.markdown api-description.md // @termsOfService @@ -190,8 +188,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.StripPrefix("/api", h.EdgeGroupsHandler).ServeHTTP(w, r) case strings.HasPrefix(r.URL.Path, "/api/edge_jobs"): http.StripPrefix("/api", h.EdgeJobsHandler).ServeHTTP(w, r) - case strings.HasPrefix(r.URL.Path, "/api/edge_templates"): - http.StripPrefix("/api", h.EdgeTemplatesHandler).ServeHTTP(w, r) case strings.HasPrefix(r.URL.Path, "/api/endpoint_groups"): http.StripPrefix("/api", h.EndpointGroupHandler).ServeHTTP(w, r) case strings.HasPrefix(r.URL.Path, "/api/kubernetes"): diff --git a/api/http/handler/helm/handler.go b/api/http/handler/helm/handler.go index 0108f7ef1..f7f14a1c1 100644 --- a/api/http/handler/helm/handler.go +++ b/api/http/handler/helm/handler.go @@ -1,6 +1,7 @@ package helm import ( + "fmt" "net/http" portainer "github.com/portainer/portainer/api" @@ -8,8 +9,8 @@ import ( "github.com/portainer/portainer/api/http/middlewares" "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/kubernetes" - "github.com/portainer/portainer/pkg/libhelm" "github.com/portainer/portainer/pkg/libhelm/options" + libhelmtypes "github.com/portainer/portainer/pkg/libhelm/types" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/gorilla/mux" @@ -23,11 +24,11 @@ type Handler struct { jwtService portainer.JWTService kubeClusterAccessService kubernetes.KubeClusterAccessService kubernetesDeployer portainer.KubernetesDeployer - helmPackageManager libhelm.HelmPackageManager + helmPackageManager libhelmtypes.HelmPackageManager } // NewHandler creates a handler to manage endpoint group operations. -func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStore, jwtService portainer.JWTService, kubernetesDeployer portainer.KubernetesDeployer, helmPackageManager libhelm.HelmPackageManager, kubeClusterAccessService kubernetes.KubeClusterAccessService) *Handler { +func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStore, jwtService portainer.JWTService, kubernetesDeployer portainer.KubernetesDeployer, helmPackageManager libhelmtypes.HelmPackageManager, kubeClusterAccessService kubernetes.KubeClusterAccessService) *Handler { h := &Handler{ Router: mux.NewRouter(), requestBouncer: bouncer, @@ -53,17 +54,23 @@ func NewHandler(bouncer security.BouncerService, dataStore dataservices.DataStor h.Handle("/{id}/kubernetes/helm", httperror.LoggerHandler(h.helmInstall)).Methods(http.MethodPost) - // Deprecated - h.Handle("/{id}/kubernetes/helm/repositories", - httperror.LoggerHandler(h.userGetHelmRepos)).Methods(http.MethodGet) - h.Handle("/{id}/kubernetes/helm/repositories", - httperror.LoggerHandler(h.userCreateHelmRepo)).Methods(http.MethodPost) + // `helm get all [RELEASE_NAME]` + h.Handle("/{id}/kubernetes/helm/{release}", + httperror.LoggerHandler(h.helmGet)).Methods(http.MethodGet) + + // `helm history [RELEASE_NAME]` + h.Handle("/{id}/kubernetes/helm/{release}/history", + httperror.LoggerHandler(h.helmGetHistory)).Methods(http.MethodGet) + + // `helm rollback [RELEASE_NAME] [REVISION]` + h.Handle("/{id}/kubernetes/helm/{release}/rollback", + httperror.LoggerHandler(h.helmRollback)).Methods(http.MethodPost) return h } // NewTemplateHandler creates a template handler to manage environment(endpoint) group operations. -func NewTemplateHandler(bouncer security.BouncerService, helmPackageManager libhelm.HelmPackageManager) *Handler { +func NewTemplateHandler(bouncer security.BouncerService, helmPackageManager libhelmtypes.HelmPackageManager) *Handler { h := &Handler{ Router: mux.NewRouter(), helmPackageManager: helmPackageManager, @@ -84,7 +91,7 @@ func NewTemplateHandler(bouncer security.BouncerService, helmPackageManager libh // getHelmClusterAccess obtains the core k8s cluster access details from request. // The cluster access includes the cluster server url, the user's bearer token and the tls certificate. -// The cluster access is passed in as kube config CLI params to helm binary. +// The cluster access is passed in as kube config CLI params to helm. func (handler *Handler) getHelmClusterAccess(r *http.Request) (*options.KubernetesClusterAccess, *httperror.HandlerError) { endpoint, err := middlewares.FetchEndpoint(r) if err != nil { @@ -113,6 +120,9 @@ func (handler *Handler) getHelmClusterAccess(r *http.Request) (*options.Kubernet kubeConfigInternal := handler.kubeClusterAccessService.GetClusterDetails(hostURL, endpoint.ID, true) return &options.KubernetesClusterAccess{ + ClusterName: fmt.Sprintf("%s-%s", "portainer-cluster", endpoint.Name), + ContextName: fmt.Sprintf("%s-%s", "portainer-ctx", endpoint.Name), + UserName: fmt.Sprintf("%s-%s", "portainer-sa-user", tokenData.Username), ClusterServerURL: kubeConfigInternal.ClusterServerURL, CertificateAuthorityFile: kubeConfigInternal.CertificateAuthorityFile, AuthToken: bearerToken, diff --git a/api/http/handler/helm/helm_delete_test.go b/api/http/handler/helm/helm_delete_test.go index 7d676fab9..3c90c1758 100644 --- a/api/http/handler/helm/helm_delete_test.go +++ b/api/http/handler/helm/helm_delete_test.go @@ -13,8 +13,8 @@ import ( helper "github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/jwt" "github.com/portainer/portainer/api/kubernetes" - "github.com/portainer/portainer/pkg/libhelm/binary/test" "github.com/portainer/portainer/pkg/libhelm/options" + "github.com/portainer/portainer/pkg/libhelm/test" "github.com/stretchr/testify/assert" ) @@ -34,7 +34,7 @@ func Test_helmDelete(t *testing.T) { is.NoError(err, "Error initiating jwt service") kubernetesDeployer := exectest.NewKubernetesDeployer() - helmPackageManager := test.NewMockHelmBinaryPackageManager("") + helmPackageManager := test.NewMockHelmPackageManager() kubeClusterAccessService := kubernetes.NewKubeClusterAccessService("", "", "") h := NewHandler(helper.NewTestRequestBouncer(), store, jwtService, kubernetesDeployer, helmPackageManager, kubeClusterAccessService) @@ -42,7 +42,7 @@ func Test_helmDelete(t *testing.T) { // Install a single chart directly, to be deleted by the handler options := options.InstallOptions{Name: "nginx-1", Chart: "nginx", Namespace: "default"} - h.helmPackageManager.Install(options) + h.helmPackageManager.Upgrade(options) t.Run("helmDelete succeeds with admin user", func(t *testing.T) { req := httptest.NewRequest(http.MethodDelete, "/1/kubernetes/helm/"+options.Name, nil) diff --git a/api/http/handler/helm/helm_get.go b/api/http/handler/helm/helm_get.go new file mode 100644 index 000000000..bd8ea3e60 --- /dev/null +++ b/api/http/handler/helm/helm_get.go @@ -0,0 +1,67 @@ +package helm + +import ( + "net/http" + + "github.com/portainer/portainer/pkg/libhelm/options" + _ "github.com/portainer/portainer/pkg/libhelm/release" + httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/libhttp/response" +) + +// @id HelmGet +// @summary Get a helm release +// @description Get details of a helm release by release name +// @description **Access policy**: authenticated +// @tags helm +// @security ApiKeyAuth || jwt +// @produce json +// @param id path int true "Environment(Endpoint) identifier" +// @param name path string true "Helm release name" +// @param namespace query string false "specify an optional namespace" +// @param showResources query boolean false "show resources of the release" +// @param revision query int false "specify an optional revision" +// @success 200 {object} release.Release "Success" +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." +// @failure 403 "Permission denied - the user is authenticated but does not have the necessary permissions to access the requested resource or perform the specified operation. Check your user roles and permissions." +// @failure 404 "Unable to find an environment with the specified identifier." +// @failure 500 "Server error occurred while attempting to retrieve the release." +// @router /endpoints/{id}/kubernetes/helm/{name} [get] +func (handler *Handler) helmGet(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + release, err := request.RetrieveRouteVariableValue(r, "release") + if err != nil { + return httperror.BadRequest("No release specified", err) + } + + clusterAccess, httperr := handler.getHelmClusterAccess(r) + if httperr != nil { + return httperr + } + + // build the get options + getOpts := options.GetOptions{ + KubernetesClusterAccess: clusterAccess, + Name: release, + } + namespace, _ := request.RetrieveQueryParameter(r, "namespace", true) + // optional namespace. The library defaults to "default" + if namespace != "" { + getOpts.Namespace = namespace + } + showResources, _ := request.RetrieveBooleanQueryParameter(r, "showResources", true) + getOpts.ShowResources = showResources + revision, _ := request.RetrieveNumericQueryParameter(r, "revision", true) + // optional revision. The library defaults to the latest revision if not specified + if revision > 0 { + getOpts.Revision = revision + } + + releases, err := handler.helmPackageManager.Get(getOpts) + if err != nil { + return httperror.InternalServerError("Helm returned an error", err) + } + + return response.JSON(w, releases) +} diff --git a/api/http/handler/helm/helm_get_test.go b/api/http/handler/helm/helm_get_test.go new file mode 100644 index 000000000..2dad6ef59 --- /dev/null +++ b/api/http/handler/helm/helm_get_test.go @@ -0,0 +1,66 @@ +package helm + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/exec/exectest" + "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/internal/testhelpers" + helper "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/jwt" + "github.com/portainer/portainer/api/kubernetes" + "github.com/portainer/portainer/pkg/libhelm/options" + "github.com/portainer/portainer/pkg/libhelm/release" + "github.com/portainer/portainer/pkg/libhelm/test" + + "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/assert" +) + +func Test_helmGet(t *testing.T) { + is := assert.New(t) + + _, store := datastore.MustNewTestStore(t, true, true) + + err := store.Endpoint().Create(&portainer.Endpoint{ID: 1}) + is.NoError(err, "Error creating environment") + + err = store.User().Create(&portainer.User{Username: "admin", Role: portainer.AdministratorRole}) + is.NoError(err, "Error creating a user") + + jwtService, err := jwt.NewService("1h", store) + is.NoError(err, "Error initiating jwt service") + + kubernetesDeployer := exectest.NewKubernetesDeployer() + helmPackageManager := test.NewMockHelmPackageManager() + kubeClusterAccessService := kubernetes.NewKubeClusterAccessService("", "", "") + h := NewHandler(helper.NewTestRequestBouncer(), store, jwtService, kubernetesDeployer, helmPackageManager, kubeClusterAccessService) + + is.NotNil(h, "Handler should not fail") + + // Install a single chart, to be retrieved by the handler + options := options.InstallOptions{Name: "nginx-1", Chart: "nginx", Namespace: "default"} + h.helmPackageManager.Upgrade(options) + + t.Run("helmGet sucessfuly retrieves helm release", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/1/kubernetes/helm/"+options.Name+"?namespace="+options.Namespace, nil) + ctx := security.StoreTokenData(req, &portainer.TokenData{ID: 1, Username: "admin", Role: 1}) + req = req.WithContext(ctx) + testhelpers.AddTestSecurityCookie(req, "Bearer dummytoken") + + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + data := release.Release{} + body, err := io.ReadAll(rr.Body) + is.NoError(err, "ReadAll should not return error") + json.Unmarshal(body, &data) + is.Equal(http.StatusOK, rr.Code, "Status should be 200") + is.Equal("nginx-1", data.Name) + }) +} diff --git a/api/http/handler/helm/helm_history.go b/api/http/handler/helm/helm_history.go new file mode 100644 index 000000000..68169bcd4 --- /dev/null +++ b/api/http/handler/helm/helm_history.go @@ -0,0 +1,58 @@ +package helm + +import ( + "net/http" + + "github.com/portainer/portainer/pkg/libhelm/options" + _ "github.com/portainer/portainer/pkg/libhelm/release" + httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/libhttp/response" +) + +// @id HelmGetHistory +// @summary Get a historical list of releases +// @description Get a historical list of releases by release name +// @description **Access policy**: authenticated +// @tags helm +// @security ApiKeyAuth || jwt +// @produce json +// @param id path int true "Environment(Endpoint) identifier" +// @param name path string true "Helm release name" +// @param namespace query string false "specify an optional namespace" +// @success 200 {array} release.Release "Success" +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." +// @failure 403 "Permission denied - the user is authenticated but does not have the necessary permissions to access the requested resource or perform the specified operation. Check your user roles and permissions." +// @failure 404 "Unable to find an environment with the specified identifier." +// @failure 500 "Server error occurred while attempting to retrieve the historical list of releases." +// @router /endpoints/{id}/kubernetes/helm/{release}/history [get] +func (handler *Handler) helmGetHistory(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + release, err := request.RetrieveRouteVariableValue(r, "release") + if err != nil { + return httperror.BadRequest("No release specified", err) + } + + clusterAccess, httperr := handler.getHelmClusterAccess(r) + if httperr != nil { + return httperr + } + + historyOptions := options.HistoryOptions{ + KubernetesClusterAccess: clusterAccess, + Name: release, + } + + // optional namespace. The library defaults to "default" + namespace, _ := request.RetrieveQueryParameter(r, "namespace", true) + if namespace != "" { + historyOptions.Namespace = namespace + } + + releases, err := handler.helmPackageManager.GetHistory(historyOptions) + if err != nil { + return httperror.InternalServerError("Helm returned an error", err) + } + + return response.JSON(w, releases) +} diff --git a/api/http/handler/helm/helm_history_test.go b/api/http/handler/helm/helm_history_test.go new file mode 100644 index 000000000..10fadfb1f --- /dev/null +++ b/api/http/handler/helm/helm_history_test.go @@ -0,0 +1,67 @@ +package helm + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/exec/exectest" + "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/internal/testhelpers" + helper "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/jwt" + "github.com/portainer/portainer/api/kubernetes" + "github.com/portainer/portainer/pkg/libhelm/options" + "github.com/portainer/portainer/pkg/libhelm/release" + "github.com/portainer/portainer/pkg/libhelm/test" + + "github.com/segmentio/encoding/json" + "github.com/stretchr/testify/assert" +) + +func Test_helmGetHistory(t *testing.T) { + is := assert.New(t) + + _, store := datastore.MustNewTestStore(t, true, true) + + err := store.Endpoint().Create(&portainer.Endpoint{ID: 1}) + is.NoError(err, "Error creating environment") + + err = store.User().Create(&portainer.User{Username: "admin", Role: portainer.AdministratorRole}) + is.NoError(err, "Error creating a user") + + jwtService, err := jwt.NewService("1h", store) + is.NoError(err, "Error initiating jwt service") + + kubernetesDeployer := exectest.NewKubernetesDeployer() + helmPackageManager := test.NewMockHelmPackageManager() + kubeClusterAccessService := kubernetes.NewKubeClusterAccessService("", "", "") + h := NewHandler(helper.NewTestRequestBouncer(), store, jwtService, kubernetesDeployer, helmPackageManager, kubeClusterAccessService) + + is.NotNil(h, "Handler should not fail") + + // Install a single chart, to be retrieved by the handler + options := options.InstallOptions{Name: "nginx-1", Chart: "nginx", Namespace: "default"} + h.helmPackageManager.Upgrade(options) + + t.Run("helmGetHistory sucessfuly retrieves helm release history", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/1/kubernetes/helm/"+options.Name+"/history?namespace="+options.Namespace, nil) + ctx := security.StoreTokenData(req, &portainer.TokenData{ID: 1, Username: "admin", Role: 1}) + req = req.WithContext(ctx) + testhelpers.AddTestSecurityCookie(req, "Bearer dummytoken") + + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + data := []release.Release{} + body, err := io.ReadAll(rr.Body) + is.NoError(err, "ReadAll should not return error") + json.Unmarshal(body, &data) + is.Equal(http.StatusOK, rr.Code, "Status should be 200") + is.Equal(1, len(data)) + is.Equal("nginx-1", data[0].Name) + }) +} diff --git a/api/http/handler/helm/helm_install.go b/api/http/handler/helm/helm_install.go index dd1365f82..33b0d82cd 100644 --- a/api/http/handler/helm/helm_install.go +++ b/api/http/handler/helm/helm_install.go @@ -26,6 +26,8 @@ type installChartPayload struct { Chart string `json:"chart"` Repo string `json:"repo"` Values string `json:"values"` + Version string `json:"version"` + Atomic bool `json:"atomic"` } var errChartNameInvalid = errors.New("invalid chart name. " + @@ -44,18 +46,24 @@ var errChartNameInvalid = errors.New("invalid chart name. " + // @produce json // @param id path int true "Environment(Endpoint) identifier" // @param payload body installChartPayload true "Chart details" +// @param dryRun query bool false "Dry run" // @success 201 {object} release.Release "Created" // @failure 401 "Unauthorized" // @failure 404 "Environment(Endpoint) or ServiceAccount not found" // @failure 500 "Server error" // @router /endpoints/{id}/kubernetes/helm [post] func (handler *Handler) helmInstall(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + dryRun, err := request.RetrieveBooleanQueryParameter(r, "dryRun", true) + if err != nil { + return httperror.BadRequest("Invalid dryRun query parameter", err) + } + var payload installChartPayload if err := request.DecodeAndValidateJSONPayload(r, &payload); err != nil { return httperror.BadRequest("Invalid Helm install payload", err) } - release, err := handler.installChart(r, payload) + release, err := handler.installChart(r, payload, dryRun) if err != nil { return httperror.InternalServerError("Unable to install a chart", err) } @@ -92,22 +100,21 @@ func (p *installChartPayload) Validate(_ *http.Request) error { return nil } -func (handler *Handler) installChart(r *http.Request, p installChartPayload) (*release.Release, error) { +func (handler *Handler) installChart(r *http.Request, p installChartPayload, dryRun bool) (*release.Release, error) { clusterAccess, httperr := handler.getHelmClusterAccess(r) if httperr != nil { return nil, httperr.Err } installOpts := options.InstallOptions{ - Name: p.Name, - Chart: p.Chart, - Namespace: p.Namespace, - Repo: p.Repo, - KubernetesClusterAccess: &options.KubernetesClusterAccess{ - ClusterServerURL: clusterAccess.ClusterServerURL, - CertificateAuthorityFile: clusterAccess.CertificateAuthorityFile, - AuthToken: clusterAccess.AuthToken, - }, + Name: p.Name, + Chart: p.Chart, + Version: p.Version, + Namespace: p.Namespace, + Repo: p.Repo, + Atomic: p.Atomic, + DryRun: dryRun, + KubernetesClusterAccess: clusterAccess, } if p.Values != "" { @@ -129,18 +136,19 @@ func (handler *Handler) installChart(r *http.Request, p installChartPayload) (*r installOpts.ValuesFile = file.Name() } - release, err := handler.helmPackageManager.Install(installOpts) + release, err := handler.helmPackageManager.Upgrade(installOpts) if err != nil { return nil, err } - manifest, err := handler.applyPortainerLabelsToHelmAppManifest(r, installOpts, release.Manifest) - if err != nil { - return nil, err - } - - if err := handler.updateHelmAppManifest(r, manifest, installOpts.Namespace); err != nil { - return nil, err + if !installOpts.DryRun { + manifest, err := handler.applyPortainerLabelsToHelmAppManifest(r, installOpts, release.Manifest) + if err != nil { + return nil, err + } + if err := handler.updateHelmAppManifest(r, manifest, installOpts.Namespace); err != nil { + return nil, err + } } return release, nil @@ -196,7 +204,7 @@ func (handler *Handler) updateHelmAppManifest(r *http.Request, manifest []byte, g := new(errgroup.Group) for _, resource := range yamlResources { g.Go(func() error { - tmpfile, err := os.CreateTemp("", "helm-manifest-*") + tmpfile, err := os.CreateTemp("", "helm-manifest-*.yaml") if err != nil { return errors.Wrap(err, "failed to create a tmp helm manifest file") } diff --git a/api/http/handler/helm/helm_install_test.go b/api/http/handler/helm/helm_install_test.go index 869b80554..ab2da85e6 100644 --- a/api/http/handler/helm/helm_install_test.go +++ b/api/http/handler/helm/helm_install_test.go @@ -15,9 +15,9 @@ import ( helper "github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/jwt" "github.com/portainer/portainer/api/kubernetes" - "github.com/portainer/portainer/pkg/libhelm/binary/test" "github.com/portainer/portainer/pkg/libhelm/options" "github.com/portainer/portainer/pkg/libhelm/release" + "github.com/portainer/portainer/pkg/libhelm/test" "github.com/segmentio/encoding/json" "github.com/stretchr/testify/assert" @@ -38,7 +38,7 @@ func Test_helmInstall(t *testing.T) { is.NoError(err, "Error initiating jwt service") kubernetesDeployer := exectest.NewKubernetesDeployer() - helmPackageManager := test.NewMockHelmBinaryPackageManager("") + helmPackageManager := test.NewMockHelmPackageManager() kubeClusterAccessService := kubernetes.NewKubeClusterAccessService("", "", "") h := NewHandler(helper.NewTestRequestBouncer(), store, jwtService, kubernetesDeployer, helmPackageManager, kubeClusterAccessService) diff --git a/api/http/handler/helm/helm_list_test.go b/api/http/handler/helm/helm_list_test.go index 8ef51b324..9f69fe11d 100644 --- a/api/http/handler/helm/helm_list_test.go +++ b/api/http/handler/helm/helm_list_test.go @@ -14,9 +14,9 @@ import ( helper "github.com/portainer/portainer/api/internal/testhelpers" "github.com/portainer/portainer/api/jwt" "github.com/portainer/portainer/api/kubernetes" - "github.com/portainer/portainer/pkg/libhelm/binary/test" "github.com/portainer/portainer/pkg/libhelm/options" "github.com/portainer/portainer/pkg/libhelm/release" + "github.com/portainer/portainer/pkg/libhelm/test" "github.com/segmentio/encoding/json" "github.com/stretchr/testify/assert" @@ -37,13 +37,13 @@ func Test_helmList(t *testing.T) { is.NoError(err, "Error initialising jwt service") kubernetesDeployer := exectest.NewKubernetesDeployer() - helmPackageManager := test.NewMockHelmBinaryPackageManager("") + helmPackageManager := test.NewMockHelmPackageManager() kubeClusterAccessService := kubernetes.NewKubeClusterAccessService("", "", "") h := NewHandler(helper.NewTestRequestBouncer(), store, jwtService, kubernetesDeployer, helmPackageManager, kubeClusterAccessService) // Install a single chart. We expect to get these values back options := options.InstallOptions{Name: "nginx-1", Chart: "nginx", Namespace: "default"} - h.helmPackageManager.Install(options) + h.helmPackageManager.Upgrade(options) t.Run("helmList", func(t *testing.T) { req := httptest.NewRequest(http.MethodGet, "/1/kubernetes/helm", nil) diff --git a/api/http/handler/helm/helm_repo_search.go b/api/http/handler/helm/helm_repo_search.go index aab9c523d..c29423fa9 100644 --- a/api/http/handler/helm/helm_repo_search.go +++ b/api/http/handler/helm/helm_repo_search.go @@ -7,6 +7,7 @@ import ( "github.com/portainer/portainer/pkg/libhelm/options" httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" "github.com/pkg/errors" ) @@ -17,6 +18,8 @@ import ( // @description **Access policy**: authenticated // @tags helm // @param repo query string true "Helm repository URL" +// @param chart query string false "Helm chart name" +// @param useCache query string false "If true will use cache to search" // @security ApiKeyAuth // @security jwt // @produce json @@ -32,13 +35,19 @@ func (handler *Handler) helmRepoSearch(w http.ResponseWriter, r *http.Request) * return httperror.BadRequest("Bad request", errors.New("missing `repo` query parameter")) } + chart, _ := request.RetrieveQueryParameter(r, "chart", false) + // If true will useCache to search, will always add to cache after + useCache, _ := request.RetrieveBooleanQueryParameter(r, "useCache", false) + _, err := url.ParseRequestURI(repo) if err != nil { return httperror.BadRequest("Bad request", errors.Wrap(err, fmt.Sprintf("provided URL %q is not valid", repo))) } searchOpts := options.SearchRepoOptions{ - Repo: repo, + Repo: repo, + Chart: chart, + UseCache: useCache, } result, err := handler.helmPackageManager.SearchRepo(searchOpts) diff --git a/api/http/handler/helm/helm_repo_search_test.go b/api/http/handler/helm/helm_repo_search_test.go index 5fde5e642..63dac43b5 100644 --- a/api/http/handler/helm/helm_repo_search_test.go +++ b/api/http/handler/helm/helm_repo_search_test.go @@ -8,14 +8,14 @@ import ( "testing" helper "github.com/portainer/portainer/api/internal/testhelpers" - "github.com/portainer/portainer/pkg/libhelm/binary/test" + "github.com/portainer/portainer/pkg/libhelm/test" "github.com/stretchr/testify/assert" ) func Test_helmRepoSearch(t *testing.T) { is := assert.New(t) - helmPackageManager := test.NewMockHelmBinaryPackageManager("") + helmPackageManager := test.NewMockHelmPackageManager() h := NewTemplateHandler(helper.NewTestRequestBouncer(), helmPackageManager) assert.NotNil(t, h, "Handler should not fail") diff --git a/api/http/handler/helm/helm_rollback.go b/api/http/handler/helm/helm_rollback.go new file mode 100644 index 000000000..d98f7a47a --- /dev/null +++ b/api/http/handler/helm/helm_rollback.go @@ -0,0 +1,105 @@ +package helm + +import ( + "net/http" + "time" + + "github.com/portainer/portainer/pkg/libhelm/options" + _ "github.com/portainer/portainer/pkg/libhelm/release" + httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/libhttp/response" +) + +// @id HelmRollback +// @summary Rollback a helm release +// @description Rollback a helm release to a previous revision +// @description **Access policy**: authenticated +// @tags helm +// @security ApiKeyAuth || jwt +// @produce json +// @param id path int true "Environment(Endpoint) identifier" +// @param release path string true "Helm release name" +// @param namespace query string false "specify an optional namespace" +// @param revision query int false "specify the revision to rollback to (defaults to previous revision if not specified)" +// @param wait query boolean false "wait for resources to be ready (default: false)" +// @param waitForJobs query boolean false "wait for jobs to complete before marking the release as successful (default: false)" +// @param recreate query boolean false "performs pods restart for the resource if applicable (default: true)" +// @param force query boolean false "force resource update through delete/recreate if needed (default: false)" +// @param timeout query int false "time to wait for any individual Kubernetes operation in seconds (default: 300)" +// @success 200 {object} release.Release "Success" +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." +// @failure 403 "Permission denied - the user is authenticated but does not have the necessary permissions to access the requested resource or perform the specified operation. Check your user roles and permissions." +// @failure 404 "Unable to find an environment with the specified identifier or release name." +// @failure 500 "Server error occurred while attempting to rollback the release." +// @router /endpoints/{id}/kubernetes/helm/{release}/rollback [post] +func (handler *Handler) helmRollback(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + release, err := request.RetrieveRouteVariableValue(r, "release") + if err != nil { + return httperror.BadRequest("No release specified", err) + } + + clusterAccess, httperr := handler.getHelmClusterAccess(r) + if httperr != nil { + return httperr + } + + // build the rollback options + rollbackOpts := options.RollbackOptions{ + KubernetesClusterAccess: clusterAccess, + Name: release, + // Set default values + Recreate: true, // Default to recreate pods (restart) + Timeout: 5 * time.Minute, // Default timeout of 5 minutes + } + + namespace, _ := request.RetrieveQueryParameter(r, "namespace", true) + // optional namespace. The library defaults to "default" + if namespace != "" { + rollbackOpts.Namespace = namespace + } + + revision, _ := request.RetrieveNumericQueryParameter(r, "revision", true) + // optional revision. If not specified, it will rollback to the previous revision + if revision > 0 { + rollbackOpts.Version = revision + } + + // Default for wait is false, only set to true if explicitly requested + wait, err := request.RetrieveBooleanQueryParameter(r, "wait", true) + if err == nil { + rollbackOpts.Wait = wait + } + + // Default for waitForJobs is false, only set to true if explicitly requested + waitForJobs, err := request.RetrieveBooleanQueryParameter(r, "waitForJobs", true) + if err == nil { + rollbackOpts.WaitForJobs = waitForJobs + } + + // Default for recreate is true (set above), override if specified + recreate, err := request.RetrieveBooleanQueryParameter(r, "recreate", true) + if err == nil { + rollbackOpts.Recreate = recreate + } + + // Default for force is false, only set to true if explicitly requested + force, err := request.RetrieveBooleanQueryParameter(r, "force", true) + if err == nil { + rollbackOpts.Force = force + } + + timeout, _ := request.RetrieveNumericQueryParameter(r, "timeout", true) + // Override default timeout if specified + if timeout > 0 { + rollbackOpts.Timeout = time.Duration(timeout) * time.Second + } + + releaseInfo, err := handler.helmPackageManager.Rollback(rollbackOpts) + if err != nil { + return httperror.InternalServerError("Failed to rollback helm release", err) + } + + return response.JSON(w, releaseInfo) +} diff --git a/api/http/handler/helm/helm_show.go b/api/http/handler/helm/helm_show.go index 591c57922..f139827b8 100644 --- a/api/http/handler/helm/helm_show.go +++ b/api/http/handler/helm/helm_show.go @@ -20,6 +20,7 @@ import ( // @tags helm // @param repo query string true "Helm repository URL" // @param chart query string true "Chart name" +// @param version query string true "Chart version" // @param command path string true "chart/values/readme" // @security ApiKeyAuth // @security jwt @@ -45,6 +46,11 @@ func (handler *Handler) helmShow(w http.ResponseWriter, r *http.Request) *httper return httperror.BadRequest("Bad request", errors.New("missing `chart` query parameter")) } + version, err := request.RetrieveQueryParameter(r, "version", true) + if err != nil { + return httperror.BadRequest("Bad request", errors.Wrap(err, fmt.Sprintf("provided version %q is not valid", version))) + } + cmd, err := request.RetrieveRouteVariableValue(r, "command") if err != nil { cmd = "all" @@ -55,6 +61,7 @@ func (handler *Handler) helmShow(w http.ResponseWriter, r *http.Request) *httper OutputFormat: options.ShowOutputFormat(cmd), Chart: chart, Repo: repo, + Version: version, } result, err := handler.helmPackageManager.Show(showOptions) if err != nil { diff --git a/api/http/handler/helm/helm_show_test.go b/api/http/handler/helm/helm_show_test.go index dd3957c99..385843b65 100644 --- a/api/http/handler/helm/helm_show_test.go +++ b/api/http/handler/helm/helm_show_test.go @@ -9,14 +9,14 @@ import ( "testing" helper "github.com/portainer/portainer/api/internal/testhelpers" - "github.com/portainer/portainer/pkg/libhelm/binary/test" + "github.com/portainer/portainer/pkg/libhelm/test" "github.com/stretchr/testify/assert" ) func Test_helmShow(t *testing.T) { is := assert.New(t) - helmPackageManager := test.NewMockHelmBinaryPackageManager("") + helmPackageManager := test.NewMockHelmPackageManager() h := NewTemplateHandler(helper.NewTestRequestBouncer(), helmPackageManager) is.NotNil(h, "Handler should not fail") diff --git a/api/http/handler/helm/user_helm_repos.go b/api/http/handler/helm/user_helm_repos.go deleted file mode 100644 index 5197f88f9..000000000 --- a/api/http/handler/helm/user_helm_repos.go +++ /dev/null @@ -1,127 +0,0 @@ -package helm - -import ( - "net/http" - "strings" - - portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/http/security" - "github.com/portainer/portainer/pkg/libhelm" - httperror "github.com/portainer/portainer/pkg/libhttp/error" - "github.com/portainer/portainer/pkg/libhttp/request" - "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/pkg/errors" -) - -type helmUserRepositoryResponse struct { - GlobalRepository string `json:"GlobalRepository"` - UserRepositories []portainer.HelmUserRepository `json:"UserRepositories"` -} - -type addHelmRepoUrlPayload struct { - URL string `json:"url"` -} - -func (p *addHelmRepoUrlPayload) Validate(_ *http.Request) error { - return libhelm.ValidateHelmRepositoryURL(p.URL, nil) -} - -// @id HelmUserRepositoryCreateDeprecated -// @summary Create a user helm repository -// @description Create a user helm repository. -// @description **Access policy**: authenticated -// @tags helm -// @security ApiKeyAuth -// @security jwt -// @accept json -// @produce json -// @param id path int true "Environment(Endpoint) identifier" -// @param payload body addHelmRepoUrlPayload true "Helm Repository" -// @success 200 {object} portainer.HelmUserRepository "Success" -// @failure 400 "Invalid request" -// @failure 403 "Permission denied" -// @failure 500 "Server error" -// @deprecated -// @router /endpoints/{id}/kubernetes/helm/repositories [post] -func (handler *Handler) userCreateHelmRepo(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { - tokenData, err := security.RetrieveTokenData(r) - if err != nil { - return httperror.InternalServerError("Unable to retrieve user authentication token", err) - } - userID := tokenData.ID - - p := new(addHelmRepoUrlPayload) - err = request.DecodeAndValidateJSONPayload(r, p) - if err != nil { - return httperror.BadRequest("Invalid Helm repository URL", err) - } - - // lowercase, remove trailing slash - p.URL = strings.TrimSuffix(strings.ToLower(p.URL), "/") - - records, err := handler.dataStore.HelmUserRepository().HelmUserRepositoryByUserID(userID) - if err != nil { - return httperror.InternalServerError("Unable to access the DataStore", err) - } - - // check if repo already exists - by doing case insensitive comparison - for _, record := range records { - if strings.EqualFold(record.URL, p.URL) { - errMsg := "Helm repo already registered for user" - return httperror.BadRequest(errMsg, errors.New(errMsg)) - } - } - - record := portainer.HelmUserRepository{ - UserID: userID, - URL: p.URL, - } - - err = handler.dataStore.HelmUserRepository().Create(&record) - if err != nil { - return httperror.InternalServerError("Unable to save a user Helm repository URL", err) - } - - return response.JSON(w, record) -} - -// @id HelmUserRepositoriesListDeprecated -// @summary List a users helm repositories -// @description Inspect a user helm repositories. -// @description **Access policy**: authenticated -// @tags helm -// @security ApiKeyAuth -// @security jwt -// @produce json -// @param id path int true "User identifier" -// @success 200 {object} helmUserRepositoryResponse "Success" -// @failure 400 "Invalid request" -// @failure 403 "Permission denied" -// @failure 500 "Server error" -// @deprecated -// @router /endpoints/{id}/kubernetes/helm/repositories [get] -func (handler *Handler) userGetHelmRepos(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { - tokenData, err := security.RetrieveTokenData(r) - if err != nil { - return httperror.InternalServerError("Unable to retrieve user authentication token", err) - } - userID := tokenData.ID - - settings, err := handler.dataStore.Settings().Settings() - if err != nil { - return httperror.InternalServerError("Unable to retrieve settings from the database", err) - } - - userRepos, err := handler.dataStore.HelmUserRepository().HelmUserRepositoryByUserID(userID) - if err != nil { - return httperror.InternalServerError("Unable to get user Helm repositories", err) - } - - resp := helmUserRepositoryResponse{ - GlobalRepository: settings.HelmRepositoryURL, - UserRepositories: userRepos, - } - - return response.JSON(w, resp) -} diff --git a/api/http/handler/kubernetes/application.go b/api/http/handler/kubernetes/application.go index 2da8d0fd4..cb67cd7af 100644 --- a/api/http/handler/kubernetes/application.go +++ b/api/http/handler/kubernetes/application.go @@ -69,7 +69,6 @@ func (handler *Handler) getApplicationsResources(w http.ResponseWriter, r *http. // @param id path int true "Environment(Endpoint) identifier" // @param namespace query string true "Namespace name" // @param nodeName query string true "Node name" -// @param withDependencies query boolean false "Include dependencies in the response" // @success 200 {array} models.K8sApplication "Success" // @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." // @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." @@ -117,12 +116,6 @@ func (handler *Handler) getAllKubernetesApplications(r *http.Request) ([]models. return nil, httperror.BadRequest("Unable to parse the namespace query parameter", err) } - withDependencies, err := request.RetrieveBooleanQueryParameter(r, "withDependencies", true) - if err != nil { - log.Error().Err(err).Str("context", "getAllKubernetesApplications").Msg("Unable to parse the withDependencies query parameter") - return nil, httperror.BadRequest("Unable to parse the withDependencies query parameter", err) - } - nodeName, err := request.RetrieveQueryParameter(r, "nodeName", true) if err != nil { log.Error().Err(err).Str("context", "getAllKubernetesApplications").Msg("Unable to parse the nodeName query parameter") @@ -135,7 +128,7 @@ func (handler *Handler) getAllKubernetesApplications(r *http.Request) ([]models. return nil, httperror.InternalServerError("Unable to get a Kubernetes client for the user", httpErr) } - applications, err := cli.GetApplications(namespace, nodeName, withDependencies) + applications, err := cli.GetApplications(namespace, nodeName) if err != nil { if k8serrors.IsUnauthorized(err) { log.Error().Err(err).Str("context", "getAllKubernetesApplications").Str("namespace", namespace).Str("nodeName", nodeName).Msg("Unable to get the list of applications") diff --git a/api/http/handler/kubernetes/client.go b/api/http/handler/kubernetes/client.go index a85f2cff9..a7f2485e3 100644 --- a/api/http/handler/kubernetes/client.go +++ b/api/http/handler/kubernetes/client.go @@ -2,8 +2,10 @@ package kubernetes import ( "net/http" + "strconv" "github.com/portainer/portainer/api/http/middlewares" + "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/kubernetes/cli" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/rs/zerolog/log" @@ -25,13 +27,19 @@ func (handler *Handler) prepareKubeClient(r *http.Request) (*cli.KubeClient, *ht return nil, httperror.NotFound("Unable to find the Kubernetes endpoint associated to the request.", err) } - pcli, err := handler.KubernetesClientFactory.GetPrivilegedKubeClient(endpoint) + tokenData, err := security.RetrieveTokenData(r) + if err != nil { + log.Error().Err(err).Str("context", "prepareKubeClient").Msg("Unable to retrieve token data associated to the request.") + return nil, httperror.InternalServerError("Unable to retrieve token data associated to the request.", err) + } + + pcli, err := handler.KubernetesClientFactory.GetPrivilegedUserKubeClient(endpoint, strconv.Itoa(int(tokenData.ID))) if err != nil { log.Error().Err(err).Str("context", "prepareKubeClient").Msg("Unable to get a privileged Kubernetes client for the user.") return nil, httperror.InternalServerError("Unable to get a privileged Kubernetes client for the user.", err) } - pcli.IsKubeAdmin = cli.IsKubeAdmin - pcli.NonAdminNamespaces = cli.NonAdminNamespaces + pcli.SetIsKubeAdmin(cli.GetIsKubeAdmin()) + pcli.SetClientNonAdminNamespaces(cli.GetClientNonAdminNamespaces()) return pcli, nil } diff --git a/api/http/handler/kubernetes/cluster_role_bindings.go b/api/http/handler/kubernetes/cluster_role_bindings.go index a5050c947..83621a900 100644 --- a/api/http/handler/kubernetes/cluster_role_bindings.go +++ b/api/http/handler/kubernetes/cluster_role_bindings.go @@ -32,7 +32,7 @@ func (handler *Handler) getAllKubernetesClusterRoleBindings(w http.ResponseWrite return httperror.Forbidden("User is not authorized to fetch cluster role bindings from the Kubernetes cluster.", httpErr) } - if !cli.IsKubeAdmin { + if !cli.GetIsKubeAdmin() { log.Error().Str("context", "getAllKubernetesClusterRoleBindings").Msg("user is not authorized to fetch cluster role bindings from the Kubernetes cluster.") return httperror.Forbidden("User is not authorized to fetch cluster role bindings from the Kubernetes cluster.", nil) } diff --git a/api/http/handler/kubernetes/cluster_roles.go b/api/http/handler/kubernetes/cluster_roles.go index 3fd2ca8aa..6d5d028be 100644 --- a/api/http/handler/kubernetes/cluster_roles.go +++ b/api/http/handler/kubernetes/cluster_roles.go @@ -32,7 +32,7 @@ func (handler *Handler) getAllKubernetesClusterRoles(w http.ResponseWriter, r *h return httperror.Forbidden("User is not authorized to fetch cluster roles from the Kubernetes cluster.", httpErr) } - if !cli.IsKubeAdmin { + if !cli.GetIsKubeAdmin() { log.Error().Str("context", "getAllKubernetesClusterRoles").Msg("user is not authorized to fetch cluster roles from the Kubernetes cluster.") return httperror.Forbidden("User is not authorized to fetch cluster roles from the Kubernetes cluster.", nil) } diff --git a/api/http/handler/kubernetes/config.go b/api/http/handler/kubernetes/config.go index aad9c03a8..b9de1824b 100644 --- a/api/http/handler/kubernetes/config.go +++ b/api/http/handler/kubernetes/config.go @@ -1,9 +1,14 @@ package kubernetes import ( + "crypto/x509" + "encoding/pem" "errors" "fmt" "net/http" + "net/url" + "os" + "strings" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/http/security" @@ -162,11 +167,48 @@ func (handler *Handler) buildConfig(r *http.Request, tokenData *portainer.TokenD func (handler *Handler) buildCluster(r *http.Request, endpoint portainer.Endpoint, isInternal bool) clientV1.NamedCluster { kubeConfigInternal := handler.kubeClusterAccessService.GetClusterDetails(r.Host, endpoint.ID, isInternal) + if isInternal { + return clientV1.NamedCluster{ + Name: buildClusterName(endpoint.Name), + Cluster: clientV1.Cluster{ + Server: kubeConfigInternal.ClusterServerURL, + InsecureSkipTLSVerify: true, + }, + } + } + + selfSignedCert := false + serverUrl, err := url.Parse(kubeConfigInternal.ClusterServerURL) + if err != nil { + log.Warn().Err(err).Msg("Failed to parse server URL") + } + + if strings.EqualFold(serverUrl.Scheme, "https") { + var certPem []byte + var err error + + if kubeConfigInternal.CertificateAuthorityData != "" { + certPem = []byte(kubeConfigInternal.CertificateAuthorityData) + } else if kubeConfigInternal.CertificateAuthorityFile != "" { + certPem, err = os.ReadFile(kubeConfigInternal.CertificateAuthorityFile) + if err != nil { + log.Warn().Err(err).Msg("Failed to open certificate file") + } + } + + if certPem != nil { + selfSignedCert, err = IsSelfSignedCertificate(certPem) + if err != nil { + log.Warn().Err(err).Msg("Failed to verify if certificate is self-signed") + } + } + } + return clientV1.NamedCluster{ Name: buildClusterName(endpoint.Name), Cluster: clientV1.Cluster{ Server: kubeConfigInternal.ClusterServerURL, - InsecureSkipTLSVerify: true, + InsecureSkipTLSVerify: selfSignedCert, }, } } @@ -215,3 +257,38 @@ func writeFileContent(w http.ResponseWriter, r *http.Request, endpoints []portai w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; %s.json", filenameBase)) return response.JSON(w, config) } + +func IsSelfSignedCertificate(certPem []byte) (bool, error) { + if certPem == nil { + return false, errors.New("certificate data is empty") + } + + if !strings.Contains(string(certPem), "BEGIN CERTIFICATE") { + certPem = []byte(fmt.Sprintf("-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----", string(certPem))) + } + + block, _ := pem.Decode(certPem) + if block == nil { + return false, errors.New("failed to decode certificate") + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return false, err + } + + if cert.Issuer.String() != cert.Subject.String() { + return false, nil + } + + roots := x509.NewCertPool() + roots.AddCert(cert) + + opts := x509.VerifyOptions{ + Roots: roots, + CurrentTime: cert.NotBefore, + } + + _, err = cert.Verify(opts) + return err == nil, err +} diff --git a/api/http/handler/kubernetes/config_test.go b/api/http/handler/kubernetes/config_test.go new file mode 100644 index 000000000..46875a460 --- /dev/null +++ b/api/http/handler/kubernetes/config_test.go @@ -0,0 +1,186 @@ +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsSelfSignedCertificate(t *testing.T) { + + tc := []struct { + name string + cert string + expected bool + }{ + { + name: "portainer self-signed", + cert: `-----BEGIN CERTIFICATE----- +MIIBUTCB+KADAgECAhBB7psNiJlJd/nRCCKUPVenMAoGCCqGSM49BAMCMAAwHhcN +MjUwMzEzMDQwODI0WhcNMzAwMzEzMDQwODI0WjAAMFkwEwYHKoZIzj0CAQYIKoZI +zj0DAQcDQgAESdGCaXq0r1GDxF89yKjjLeCIixiPDdXAg+lw4NqAWeJq2AOo+8IH +vcCq9bSlYlezK8RzTsbf9Z1m5jRqUEbSjqNUMFIwDgYDVR0PAQH/BAQDAgWgMBMG +A1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0RAQH/BBMwEYIJ +bG9jYWxob3N0hwQAAAAAMAoGCCqGSM49BAMCA0gAMEUCIApLliukFaCZHbc/2pkH +0VDY+fBMb12jhmVpgKh1Cqg9AiEAwFrMQLUkzATUpiHuukdUg5VsUiMIkWTPLglz +E4+1dRc= +-----END CERTIFICATE----- +`, + expected: true, + }, + { + name: "portainer self-signed without header", + cert: `MIIBUzCB+aADAgECAhEAjsskPzuCS5BeHjXGwYqc2jAKBggqhkjOPQQDAjAAMB4XDTI1MDMxMzA0MzQyNloXDTMwMDMxMzA0MzQyNlowADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABITD+dNDLYQbLYDE3UMlTzD61OYRSVkVZspdp1MvZITIG4VOxtfQUqcW3P7OHQdoi52GIQ/GM6iDgxwB1BOyi3mjVDBSMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMB0GA1UdEQEB/wQTMBGCCWxvY2FsaG9zdIcEAAAAADAKBggqhkjOPQQDAgNJADBGAiEA8SmyeYLhrnrNLAFcxZp0dk6nMN70XVAfqGnbK/s8NR8CIQDgQdqhfge8QvN2TsH4gg98a9VHDv+RlcOlJ80SS+G/Ww==`, + expected: true, + }, + { + name: "custom certificate generated by openssl", + cert: `-----BEGIN CERTIFICATE----- +MIIB9TCCAZugAwIBAgIULTkNYfYHiqfOiX7mKOIGxRefx/YwCgYIKoZIzj0EAwIw +SDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp +c2NvMRQwEgYDVQQDEwtleGFtcGxlLm5ldDAeFw0yNTAyMjgwNjI3MDBaFw0zNTAy +MjYwNjI3MDBaMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT3WlLvbGw7wPkQ +3LuHFJEaNrDv3n359JMV1CkjQi3U37u0fJrjd+8o7TxPBYgt9HDD9vsURhy41DNo +g71F2AIto4GqMIGnMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcD +AQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU+nMxx/VCE9fzrlHI +FX9mF5SRPrkwHwYDVR0jBBgwFoAUOlUIToGwnBOqzZ1dBfOvdKbwNaAwKAYDVR0R +AQH/BB4wHIIaZWRnZS4xNzIuMTcuMjIxLjIwOC5uaXAuaW8wCgYIKoZIzj0EAwID +SAAwRQIgeYrkjY0z/ypMKXZbvbMi8qOK44qoISKkSErBUCBLuwoCIQDRaJA9r931 +utpXXnysVGecVXHHKOOl1YhWglmuPvcZhw== +-----END CERTIFICATE-----`, + expected: false, + }, + { + name: "google.com certificate", + cert: `-----BEGIN CERTIFICATE----- +MIIOITCCDQmgAwIBAgIQKS0IQxknY8USDjt3IYchljANBgkqhkiG9w0BAQsFADA7 +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVR29vZ2xlIFRydXN0IFNlcnZpY2VzMQww +CgYDVQQDEwNXUjIwHhcNMjUwMjI2MTUzMjU1WhcNMjUwNTIxMTUzMjU0WjAXMRUw +EwYDVQQDDAwqLmdvb2dsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARx +nMOmIG3BuO7my/BbF/rGPAMH/JbxBDufbYFQHV+6l5pF5sdT/Zov3X+qsR3IYFl7 +F2a0gAUmK1Bq7//zTb3uo4IMDjCCDAowDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQM +MAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFN+aEjBz3PaUtelz +3g9rVTkGRgU0MB8GA1UdIwQYMBaAFN4bHu15FdQ+NyTDIbvsNDltQrIwMFgGCCsG +AQUFBwEBBEwwSjAhBggrBgEFBQcwAYYVaHR0cDovL28ucGtpLmdvb2cvd3IyMCUG +CCsGAQUFBzAChhlodHRwOi8vaS5wa2kuZ29vZy93cjIuY3J0MIIJ5AYDVR0RBIIJ +2zCCCdeCDCouZ29vZ2xlLmNvbYIWKi5hcHBlbmdpbmUuZ29vZ2xlLmNvbYIJKi5i +ZG4uZGV2ghUqLm9yaWdpbi10ZXN0LmJkbi5kZXaCEiouY2xvdWQuZ29vZ2xlLmNv +bYIYKi5jcm93ZHNvdXJjZS5nb29nbGUuY29tghgqLmRhdGFjb21wdXRlLmdvb2ds +ZS5jb22CCyouZ29vZ2xlLmNhggsqLmdvb2dsZS5jbIIOKi5nb29nbGUuY28uaW6C +DiouZ29vZ2xlLmNvLmpwgg4qLmdvb2dsZS5jby51a4IPKi5nb29nbGUuY29tLmFy +gg8qLmdvb2dsZS5jb20uYXWCDyouZ29vZ2xlLmNvbS5icoIPKi5nb29nbGUuY29t +LmNvgg8qLmdvb2dsZS5jb20ubXiCDyouZ29vZ2xlLmNvbS50coIPKi5nb29nbGUu +Y29tLnZuggsqLmdvb2dsZS5kZYILKi5nb29nbGUuZXOCCyouZ29vZ2xlLmZyggsq +Lmdvb2dsZS5odYILKi5nb29nbGUuaXSCCyouZ29vZ2xlLm5sggsqLmdvb2dsZS5w +bIILKi5nb29nbGUucHSCDyouZ29vZ2xlYXBpcy5jboIRKi5nb29nbGV2aWRlby5j +b22CDCouZ3N0YXRpYy5jboIQKi5nc3RhdGljLWNuLmNvbYIPZ29vZ2xlY25hcHBz +LmNughEqLmdvb2dsZWNuYXBwcy5jboIRZ29vZ2xlYXBwcy1jbi5jb22CEyouZ29v +Z2xlYXBwcy1jbi5jb22CDGdrZWNuYXBwcy5jboIOKi5na2VjbmFwcHMuY26CEmdv +b2dsZWRvd25sb2Fkcy5jboIUKi5nb29nbGVkb3dubG9hZHMuY26CEHJlY2FwdGNo +YS5uZXQuY26CEioucmVjYXB0Y2hhLm5ldC5jboIQcmVjYXB0Y2hhLWNuLm5ldIIS +Ki5yZWNhcHRjaGEtY24ubmV0ggt3aWRldmluZS5jboINKi53aWRldmluZS5jboIR +YW1wcHJvamVjdC5vcmcuY26CEyouYW1wcHJvamVjdC5vcmcuY26CEWFtcHByb2pl +Y3QubmV0LmNughMqLmFtcHByb2plY3QubmV0LmNughdnb29nbGUtYW5hbHl0aWNz +LWNuLmNvbYIZKi5nb29nbGUtYW5hbHl0aWNzLWNuLmNvbYIXZ29vZ2xlYWRzZXJ2 +aWNlcy1jbi5jb22CGSouZ29vZ2xlYWRzZXJ2aWNlcy1jbi5jb22CEWdvb2dsZXZh +ZHMtY24uY29tghMqLmdvb2dsZXZhZHMtY24uY29tghFnb29nbGVhcGlzLWNuLmNv +bYITKi5nb29nbGVhcGlzLWNuLmNvbYIVZ29vZ2xlb3B0aW1pemUtY24uY29tghcq +Lmdvb2dsZW9wdGltaXplLWNuLmNvbYISZG91YmxlY2xpY2stY24ubmV0ghQqLmRv +dWJsZWNsaWNrLWNuLm5ldIIYKi5mbHMuZG91YmxlY2xpY2stY24ubmV0ghYqLmcu +ZG91YmxlY2xpY2stY24ubmV0gg5kb3VibGVjbGljay5jboIQKi5kb3VibGVjbGlj +ay5jboIUKi5mbHMuZG91YmxlY2xpY2suY26CEiouZy5kb3VibGVjbGljay5jboIR +ZGFydHNlYXJjaC1jbi5uZXSCEyouZGFydHNlYXJjaC1jbi5uZXSCHWdvb2dsZXRy +YXZlbGFkc2VydmljZXMtY24uY29tgh8qLmdvb2dsZXRyYXZlbGFkc2VydmljZXMt +Y24uY29tghhnb29nbGV0YWdzZXJ2aWNlcy1jbi5jb22CGiouZ29vZ2xldGFnc2Vy +dmljZXMtY24uY29tghdnb29nbGV0YWdtYW5hZ2VyLWNuLmNvbYIZKi5nb29nbGV0 +YWdtYW5hZ2VyLWNuLmNvbYIYZ29vZ2xlc3luZGljYXRpb24tY24uY29tghoqLmdv +b2dsZXN5bmRpY2F0aW9uLWNuLmNvbYIkKi5zYWZlZnJhbWUuZ29vZ2xlc3luZGlj +YXRpb24tY24uY29tghZhcHAtbWVhc3VyZW1lbnQtY24uY29tghgqLmFwcC1tZWFz +dXJlbWVudC1jbi5jb22CC2d2dDEtY24uY29tgg0qLmd2dDEtY24uY29tggtndnQy +LWNuLmNvbYINKi5ndnQyLWNuLmNvbYILMm1kbi1jbi5uZXSCDSouMm1kbi1jbi5u +ZXSCFGdvb2dsZWZsaWdodHMtY24ubmV0ghYqLmdvb2dsZWZsaWdodHMtY24ubmV0 +ggxhZG1vYi1jbi5jb22CDiouYWRtb2ItY24uY29tghRnb29nbGVzYW5kYm94LWNu +LmNvbYIWKi5nb29nbGVzYW5kYm94LWNuLmNvbYIeKi5zYWZlbnVwLmdvb2dsZXNh +bmRib3gtY24uY29tgg0qLmdzdGF0aWMuY29tghQqLm1ldHJpYy5nc3RhdGljLmNv +bYIKKi5ndnQxLmNvbYIRKi5nY3BjZG4uZ3Z0MS5jb22CCiouZ3Z0Mi5jb22CDiou +Z2NwLmd2dDIuY29tghAqLnVybC5nb29nbGUuY29tghYqLnlvdXR1YmUtbm9jb29r +aWUuY29tggsqLnl0aW1nLmNvbYILYW5kcm9pZC5jb22CDSouYW5kcm9pZC5jb22C +EyouZmxhc2guYW5kcm9pZC5jb22CBGcuY26CBiouZy5jboIEZy5jb4IGKi5nLmNv +ggZnb28uZ2yCCnd3dy5nb28uZ2yCFGdvb2dsZS1hbmFseXRpY3MuY29tghYqLmdv +b2dsZS1hbmFseXRpY3MuY29tggpnb29nbGUuY29tghJnb29nbGVjb21tZXJjZS5j +b22CFCouZ29vZ2xlY29tbWVyY2UuY29tgghnZ3BodC5jboIKKi5nZ3BodC5jboIK +dXJjaGluLmNvbYIMKi51cmNoaW4uY29tggh5b3V0dS5iZYILeW91dHViZS5jb22C +DSoueW91dHViZS5jb22CEW11c2ljLnlvdXR1YmUuY29tghMqLm11c2ljLnlvdXR1 +YmUuY29tghR5b3V0dWJlZWR1Y2F0aW9uLmNvbYIWKi55b3V0dWJlZWR1Y2F0aW9u +LmNvbYIPeW91dHViZWtpZHMuY29tghEqLnlvdXR1YmVraWRzLmNvbYIFeXQuYmWC +ByoueXQuYmWCGmFuZHJvaWQuY2xpZW50cy5nb29nbGUuY29tghMqLmFuZHJvaWQu +Z29vZ2xlLmNughIqLmNocm9tZS5nb29nbGUuY26CFiouZGV2ZWxvcGVycy5nb29n +bGUuY26CFSouYWlzdHVkaW8uZ29vZ2xlLmNvbTATBgNVHSAEDDAKMAgGBmeBDAEC +ATA2BgNVHR8ELzAtMCugKaAnhiVodHRwOi8vYy5wa2kuZ29vZy93cjIvb0JGWVlh +aHpnVkkuY3JsMIIBBAYKKwYBBAHWeQIEAgSB9QSB8gDwAHcAzxFW7tUufK/zh1vZ +aS6b6RpxZ0qwF+ysAdJbd87MOwgAAAGVQxqxaQAABAMASDBGAiEAk6r74vfyJIaa +hYTWqNRsjl/RpCWq/wyzzMi21zgGmfkCIQCZafyS/fl0tiutICL9aOSnDBRfPYqd +CeNqKOy11EjvigB1AN6FgddQJHxrzcuvVjfF54HGTORu1hdjn480pybJ4r03AAAB +lUMasUkAAAQDAEYwRAIgYfG2iyRnmn8MI86RFDxOQW1/IOBAjQxNfIQ8toZlZkoC +IA1BHw7cqmlTP7Ks+ebX6hGfNlVsgTQS8iYyKL5/BSvTMA0GCSqGSIb3DQEBCwUA +A4IBAQAYSNtoW72rqhPfjV5Ug1ENbbimfqmqiJS4JdzaEFRpftzachTuvx8relaY ++7FAz5y4YULu9LGNjpBRYW8yW9pgfWyc53CCHSkDODguUOMCRo3hdglxZ2d5pJ/8 +TQY4zRBd8OHzOAx2kH6jLEj9I0nDie3vowSYm7FCBRLjzfForRNQWmzPu+5hS3De +QM0R2jWpmPcG3ffQ5qQwnAQnP9HCK9oEZ5cFqLvOQWfttj/rzKOz856iSEoRpf8S +wVFRu3Uv2TXQ6UYF2cDfiWCe6/mO35CIynC6FVkunze/Q/2rtaCDttLRYZcLllj8 +PSl7nmLhtqDlO7da/S34BFiyyRjN +-----END CERTIFICATE----- +`, + expected: false, + }, + { + name: "let's encrypt certificate", + cert: `-----BEGIN CERTIFICATE----- +MIIGMjCCBRqgAwIBAgISBVHH05rEMkaCuDQvABDjiam0MA0GCSqGSIb3DQEBCwUA +MDMxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQwwCgYDVQQD +EwNSMTAwHhcNMjUwMzEzMDIyMzE2WhcNMjUwNjExMDIyMzE1WjAkMSIwIAYDVQQD +Exlvei1kZW1vLnBvcnRhaW5lcmNsb3VkLmlvMIICIjANBgkqhkiG9w0BAQEFAAOC +Ag8AMIICCgKCAgEAwNCcr9azSaldEwgL54bQScuWBnmw3FMHgEATxDVp2MEawQkV +I3VScUcJWBnlHlb7TUanRC/c/vJGbzc+KDuCRTZ2/Ob2yQ9G5mZjGttBAnBSQPpV +arEEBFCClhVBn4LhLNmIsCjCy25+m0HY/dwWbKjTMT/KxpTa3L3mdmIFa7XNs6W2 +vEZGwYM+2JPMJ9DwemVrrrvRqd5vLWTZcWvWJQ7HMfw3PoELpeqyycmxDqd9PCMz +yMp8q3UwLDur3+KfDXGtGOoubxcOuJrpemOe8JeM5cEYEhvOy8D16zmWwWYDT19D +ElFfUbM0GGITpJ41Qie03DvmI0hDYDqTEZfKza967VsvD7K9bFgLHmHdv7gLNutB +FConpziNqslapWwQ5j7bKircxKjRQVkOiXH48m2IUzylqWgJPVMvHukRu0YVnvbt +Q53xNVZQEbjvZmIuz8jqo22Y/1Jr7Plnb1lUvvDznA58MHT0KA4LSZwk9tvMJJCw +vh7AoWB6/Jnl8QVnApOdCa6M/An128rBwgrCmp0wSvhMecTkWC8/gsah0Q5wKFL3 +ziBth728Qy8RlNghRUw88e/y4pdGHN8egjK1NpdgsvTFdRNQ8qwu0lx9pO3b6TNQ +qDG5pirXjS/DhPYvZtJRDK6SMTHJNm+0NGdWB8qpNssFrU6u2cRl0533LtECAwEA +AaOCAk0wggJJMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI +KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUiQi/3pZamfPxRGPI8DTZ +tej1494wHwYDVR0jBBgwFoAUu7zDR6XkvKnGw6RyDBCNojXhyOgwVwYIKwYBBQUH +AQEESzBJMCIGCCsGAQUFBzABhhZodHRwOi8vcjEwLm8ubGVuY3Iub3JnMCMGCCsG +AQUFBzAChhdodHRwOi8vcjEwLmkubGVuY3Iub3JnLzAkBgNVHREEHTAbghlvei1k +ZW1vLnBvcnRhaW5lcmNsb3VkLmlvMBMGA1UdIAQMMAowCAYGZ4EMAQIBMC4GA1Ud +HwQnMCUwI6AhoB+GHWh0dHA6Ly9yMTAuYy5sZW5jci5vcmcvNTMuY3JsMIIBBAYK +KwYBBAHWeQIEAgSB9QSB8gDwAHcAzPsPaoVxCWX+lZtTzumyfCLphVwNl422qX5U +wP5MDbAAAAGVjYW7/QAABAMASDBGAiEA8CjMOIj7wqQ60BX22A5pDkA23IxZPzwV +1MF5+VSgdqgCIQCZhry5AK2VyZX/cIODEl6eHBCUWS4vHB+J8RxeclKCpAB1AKLj +CuRF772tm3447Udnd1PXgluElNcrXhssxLlQpEfnAAABlY2Fu/QAAAQDAEYwRAIg +bwjJgZJew/1LoL9yzDD1P4Xkd8ezFucxfU3AzlV1XEYCIH5RPyW1HP9GSr+aAx+I +o3inVl1NagJFYiApAPvFmIEgMA0GCSqGSIb3DQEBCwUAA4IBAQATJWi1sJSBstO+ +hyH7DsrAtDhiQTOWzUZezBlgCn8hfmA3nX5uKsHyxPPPEQ/GFYOltRD/+34X9kFF +YNzUjJOP0bGk45I1JbspxRRvtbDpk0+dj2VE2toM8vLRDz3+DB4YB2lFofYlex++ +16xFzOIE+ZW41qBs3G8InsyHADsaFY2CQ9re/kZvenptU/ax1U2a21JJ3TT2DmXW +AHZYQ5/whVIowsebw1e28I12VhLl2BKn7v4MpCn3GUzBBQAEbJ6TIjHtFKWWnVfH +FisaUX6N4hMzGZVJOsbH4QVBGuNwUshHiD8MSpbans2w+T4bCe11XayerqxFhTao +w/pjiPVy +-----END CERTIFICATE----- +`, + expected: false, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + actual, err := IsSelfSignedCertificate([]byte(tt.cert)) + assert.NoError(t, err) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/api/http/handler/kubernetes/configmaps.go b/api/http/handler/kubernetes/configmaps.go index 633afe92d..bd10ae15d 100644 --- a/api/http/handler/kubernetes/configmaps.go +++ b/api/http/handler/kubernetes/configmaps.go @@ -146,13 +146,11 @@ func (handler *Handler) getAllKubernetesConfigMaps(r *http.Request) ([]models.K8 } if isUsed { - configMapsWithApplications, err := cli.CombineConfigMapsWithApplications(configMaps) + err = cli.SetConfigMapsIsUsed(&configMaps) if err != nil { log.Error().Err(err).Str("context", "getAllKubernetesConfigMaps").Msg("Unable to combine configMaps with associated applications") return nil, httperror.InternalServerError("Unable to combine configMaps with associated applications", err) } - - return configMapsWithApplications, nil } return configMaps, nil diff --git a/api/http/handler/kubernetes/deprecated_routes.go b/api/http/handler/kubernetes/deprecated_routes.go index cc44d10e5..e471c2ecb 100644 --- a/api/http/handler/kubernetes/deprecated_routes.go +++ b/api/http/handler/kubernetes/deprecated_routes.go @@ -36,11 +36,14 @@ func deprecatedNamespaceParser(w http.ResponseWriter, r *http.Request) (string, // Restore the original body for further use bodyBytes, err := io.ReadAll(r.Body) + if err != nil { + return "", httperror.InternalServerError("Unable to read request body", err) + } + r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) payload := models.K8sNamespaceDetails{} - err = request.DecodeAndValidateJSONPayload(r, &payload) - if err != nil { + if err := request.DecodeAndValidateJSONPayload(r, &payload); err != nil { return "", httperror.BadRequest("Invalid request. Unable to parse namespace payload", err) } namespaceName := payload.Name diff --git a/api/http/handler/kubernetes/describe.go b/api/http/handler/kubernetes/describe.go new file mode 100644 index 000000000..ce8f17418 --- /dev/null +++ b/api/http/handler/kubernetes/describe.go @@ -0,0 +1,73 @@ +package kubernetes + +import ( + "net/http" + + httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/libhttp/response" + "github.com/portainer/portainer/pkg/libkubectl" + "github.com/rs/zerolog/log" +) + +type describeResourceResponse struct { + Describe string `json:"describe"` +} + +// @id DescribeResource +// @summary Get a description of a kubernetes resource +// @description Get a description of a kubernetes resource. +// @description **Access policy**: Authenticated user. +// @tags kubernetes +// @security ApiKeyAuth || jwt +// @produce json +// @param id path int true "Environment identifier" +// @param name query string true "Resource name" +// @param kind query string true "Resource kind" +// @param namespace query string false "Namespace" +// @success 200 {object} describeResourceResponse "Success" +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." +// @failure 403 "Permission denied - the user is authenticated but does not have the necessary permissions to access the requested resource or perform the specified operation. Check your user roles and permissions." +// @failure 404 "Unable to find an environment with the specified identifier." +// @failure 500 "Server error occurred while attempting to retrieve resource description" +// @router /kubernetes/{id}/describe [get] +func (handler *Handler) describeResource(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + name, err := request.RetrieveQueryParameter(r, "name", false) + if err != nil { + log.Error().Err(err).Str("context", "describeResource").Msg("Invalid query parameter name") + return httperror.BadRequest("an error occurred during the describeResource operation, invalid query parameter name. Error: ", err) + } + + kind, err := request.RetrieveQueryParameter(r, "kind", false) + if err != nil { + log.Error().Err(err).Str("context", "describeResource").Msg("Invalid query parameter kind") + return httperror.BadRequest("an error occurred during the describeResource operation, invalid query parameter kind. Error: ", err) + } + + namespace, err := request.RetrieveQueryParameter(r, "namespace", true) + if err != nil { + log.Error().Err(err).Str("context", "describeResource").Msg("Invalid query parameter namespace") + return httperror.BadRequest("an error occurred during the describeResource operation, invalid query parameter namespace. Error: ", err) + } + + // fetches the token and the correct server URL for the endpoint, similar to getHelmClusterAccess + libKubectlAccess, err := handler.getLibKubectlAccess(r) + if err != nil { + return httperror.InternalServerError("an error occurred during the describeResource operation, failed to get libKubectlAccess. Error: ", err) + } + + client, err := libkubectl.NewClient(libKubectlAccess, namespace, "", true) + if err != nil { + log.Error().Err(err).Str("context", "describeResource").Msg("Failed to create kubernetes client") + return httperror.InternalServerError("an error occurred during the describeResource operation, failed to create kubernetes client. Error: ", err) + } + + out, err := client.Describe(namespace, name, kind) + if err != nil { + log.Error().Err(err).Str("context", "describeResource").Msg("Failed to describe kubernetes resource") + return httperror.InternalServerError("an error occurred during the describeResource operation, failed to describe kubernetes resource. Error: ", err) + } + + return response.JSON(w, describeResourceResponse{Describe: out}) +} diff --git a/api/http/handler/kubernetes/event.go b/api/http/handler/kubernetes/event.go new file mode 100644 index 000000000..25f024303 --- /dev/null +++ b/api/http/handler/kubernetes/event.go @@ -0,0 +1,102 @@ +package kubernetes + +import ( + "net/http" + + httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/libhttp/response" + "github.com/rs/zerolog/log" + k8serrors "k8s.io/apimachinery/pkg/api/errors" +) + +// @id getKubernetesEventsForNamespace +// @summary Gets kubernetes events for namespace +// @description Get events by optional query param resourceId for a given namespace. +// @description **Access policy**: Authenticated user. +// @tags kubernetes +// @security ApiKeyAuth || jwt +// @produce json +// @param id path int true "Environment identifier" +// @param namespace path string true "The namespace name the events are associated to" +// @param resourceId query string false "The resource id of the involved kubernetes object" example:"e5b021b6-4bce-4c06-bd3b-6cca906797aa" +// @success 200 {object} []kubernetes.K8sEvent "Success" +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." +// @failure 403 "Permission denied - the user is authenticated but does not have the necessary permissions to access the requested resource or perform the specified operation. Check your user roles and permissions." +// @failure 500 "Server error occurred while attempting to retrieve the events within the specified namespace." +// @router /kubernetes/{id}/namespaces/{namespace}/events [get] +func (handler *Handler) getKubernetesEventsForNamespace(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + namespace, err := request.RetrieveRouteVariableValue(r, "namespace") + if err != nil { + log.Error().Err(err).Str("context", "getKubernetesEvents").Str("namespace", namespace).Msg("Unable to retrieve namespace identifier route variable") + return httperror.BadRequest("Unable to retrieve namespace identifier route variable", err) + } + + resourceId, err := request.RetrieveQueryParameter(r, "resourceId", true) + if err != nil { + log.Error().Err(err).Str("context", "getKubernetesEvents").Msg("Unable to retrieve resourceId query parameter") + return httperror.BadRequest("Unable to retrieve resourceId query parameter", err) + } + + cli, httpErr := handler.getProxyKubeClient(r) + if httpErr != nil { + log.Error().Err(httpErr).Str("context", "getKubernetesEvents").Str("resourceId", resourceId).Msg("Unable to get a Kubernetes client for the user") + return httperror.InternalServerError("Unable to get a Kubernetes client for the user", httpErr) + } + + events, err := cli.GetEvents(namespace, resourceId) + if err != nil { + if k8serrors.IsUnauthorized(err) || k8serrors.IsForbidden(err) { + log.Error().Err(err).Str("context", "getKubernetesEvents").Msg("Unauthorized access to the Kubernetes API") + return httperror.Forbidden("Unauthorized access to the Kubernetes API", err) + } + + log.Error().Err(err).Str("context", "getKubernetesEvents").Msg("Unable to retrieve events") + return httperror.InternalServerError("Unable to retrieve events", err) + } + + return response.JSON(w, events) +} + +// @id getAllKubernetesEvents +// @summary Gets kubernetes events +// @description Get events by query param resourceId +// @description **Access policy**: Authenticated user. +// @tags kubernetes +// @security ApiKeyAuth || jwt +// @produce json +// @param id path int true "Environment identifier" +// @param resourceId query string false "The resource id of the involved kubernetes object" example:"e5b021b6-4bce-4c06-bd3b-6cca906797aa" +// @success 200 {object} []kubernetes.K8sEvent "Success" +// @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." +// @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." +// @failure 403 "Permission denied - the user is authenticated but does not have the necessary permissions to access the requested resource or perform the specified operation. Check your user roles and permissions." +// @failure 500 "Server error occurred while attempting to retrieve the events." +// @router /kubernetes/{id}/events [get] +func (handler *Handler) getAllKubernetesEvents(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { + resourceId, err := request.RetrieveQueryParameter(r, "resourceId", true) + if err != nil { + log.Error().Err(err).Str("context", "getKubernetesEvents").Msg("Unable to retrieve resourceId query parameter") + return httperror.BadRequest("Unable to retrieve resourceId query parameter", err) + } + + cli, httpErr := handler.getProxyKubeClient(r) + if httpErr != nil { + log.Error().Err(httpErr).Str("context", "getKubernetesEvents").Str("resourceId", resourceId).Msg("Unable to get a Kubernetes client for the user") + return httperror.InternalServerError("Unable to get a Kubernetes client for the user", httpErr) + } + + events, err := cli.GetEvents("", resourceId) + if err != nil { + if k8serrors.IsUnauthorized(err) || k8serrors.IsForbidden(err) { + log.Error().Err(err).Str("context", "getKubernetesEvents").Msg("Unauthorized access to the Kubernetes API") + return httperror.Forbidden("Unauthorized access to the Kubernetes API", err) + } + + log.Error().Err(err).Str("context", "getKubernetesEvents").Msg("Unable to retrieve events") + return httperror.InternalServerError("Unable to retrieve events", err) + } + + return response.JSON(w, events) +} diff --git a/api/http/handler/kubernetes/event_test.go b/api/http/handler/kubernetes/event_test.go new file mode 100644 index 000000000..77f38c511 --- /dev/null +++ b/api/http/handler/kubernetes/event_test.go @@ -0,0 +1,60 @@ +package kubernetes + +import ( + "net/http" + "net/http/httptest" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/internal/authorization" + "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/jwt" + "github.com/portainer/portainer/api/kubernetes" + kubeClient "github.com/portainer/portainer/api/kubernetes/cli" + "github.com/stretchr/testify/assert" +) + +// Currently this test just tests the HTTP Handler is setup correctly, in the future we should move the ClientFactory to a mock in order +// test the logic in event.go +func TestGetKubernetesEvents(t *testing.T) { + is := assert.New(t) + + _, store := datastore.MustNewTestStore(t, true, true) + + err := store.Endpoint().Create(&portainer.Endpoint{ + ID: 1, + Type: portainer.AgentOnKubernetesEnvironment, + }, + ) + is.NoError(err, "error creating environment") + + err = store.User().Create(&portainer.User{Username: "admin", Role: portainer.AdministratorRole}) + is.NoError(err, "error creating a user") + + jwtService, err := jwt.NewService("1h", store) + is.NoError(err, "Error initiating jwt service") + + tk, _, _ := jwtService.GenerateToken(&portainer.TokenData{ID: 1, Username: "admin", Role: portainer.AdministratorRole}) + + kubeClusterAccessService := kubernetes.NewKubeClusterAccessService("", "", "") + + cli := testhelpers.NewKubernetesClient() + factory, _ := kubeClient.NewClientFactory(nil, nil, store, "", "", "") + + authorizationService := authorization.NewService(store) + handler := NewHandler(testhelpers.NewTestRequestBouncer(), authorizationService, store, jwtService, kubeClusterAccessService, + factory, cli) + is.NotNil(handler, "Handler should not fail") + + req := httptest.NewRequest(http.MethodGet, "/kubernetes/1/events?resourceId=8", nil) + ctx := security.StoreTokenData(req, &portainer.TokenData{ID: 1, Username: "admin", Role: 1}) + req = req.WithContext(ctx) + testhelpers.AddTestSecurityCookie(req, tk) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + is.Equal(http.StatusOK, rr.Code, "Status should be 200") +} diff --git a/api/http/handler/kubernetes/handler.go b/api/http/handler/kubernetes/handler.go index a47bd6ed9..a8a5898c8 100644 --- a/api/http/handler/kubernetes/handler.go +++ b/api/http/handler/kubernetes/handler.go @@ -15,6 +15,7 @@ import ( "github.com/portainer/portainer/api/kubernetes/cli" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/libkubectl" "github.com/rs/zerolog/log" "github.com/gorilla/mux" @@ -57,6 +58,7 @@ func NewHandler(bouncer security.BouncerService, authorizationService *authoriza endpointRouter.Handle("/configmaps/count", httperror.LoggerHandler(h.getAllKubernetesConfigMapsCount)).Methods(http.MethodGet) endpointRouter.Handle("/cron_jobs", httperror.LoggerHandler(h.getAllKubernetesCronJobs)).Methods(http.MethodGet) endpointRouter.Handle("/cron_jobs/delete", httperror.LoggerHandler(h.deleteKubernetesCronJobs)).Methods(http.MethodPost) + endpointRouter.Handle("/events", httperror.LoggerHandler(h.getAllKubernetesEvents)).Methods(http.MethodGet) endpointRouter.Handle("/jobs", httperror.LoggerHandler(h.getAllKubernetesJobs)).Methods(http.MethodGet) endpointRouter.Handle("/jobs/delete", httperror.LoggerHandler(h.deleteKubernetesJobs)).Methods(http.MethodPost) endpointRouter.Handle("/cluster_roles", httperror.LoggerHandler(h.getAllKubernetesClusterRoles)).Methods(http.MethodGet) @@ -102,12 +104,14 @@ func NewHandler(bouncer security.BouncerService, authorizationService *authoriza endpointRouter.Handle("/cluster_roles/delete", httperror.LoggerHandler(h.deleteClusterRoles)).Methods(http.MethodPost) endpointRouter.Handle("/cluster_role_bindings", httperror.LoggerHandler(h.getAllKubernetesClusterRoleBindings)).Methods(http.MethodGet) endpointRouter.Handle("/cluster_role_bindings/delete", httperror.LoggerHandler(h.deleteClusterRoleBindings)).Methods(http.MethodPost) + endpointRouter.Handle("/describe", httperror.LoggerHandler(h.describeResource)).Methods(http.MethodGet) // namespaces // in the future this piece of code might be in another package (or a few different packages - namespaces/namespace?) // to keep it simple, we've decided to leave it like this. namespaceRouter := endpointRouter.PathPrefix("/namespaces/{namespace}").Subrouter() namespaceRouter.Handle("/configmaps/{configmap}", httperror.LoggerHandler(h.getKubernetesConfigMap)).Methods(http.MethodGet) + namespaceRouter.Handle("/events", httperror.LoggerHandler(h.getKubernetesEventsForNamespace)).Methods(http.MethodGet) namespaceRouter.Handle("/system", bouncer.RestrictedAccess(httperror.LoggerHandler(h.namespacesToggleSystem))).Methods(http.MethodPut) namespaceRouter.Handle("/ingresscontrollers", httperror.LoggerHandler(h.getKubernetesIngressControllersByNamespace)).Methods(http.MethodGet) namespaceRouter.Handle("/ingresscontrollers", httperror.LoggerHandler(h.updateKubernetesIngressControllersByNamespace)).Methods(http.MethodPut) @@ -131,7 +135,7 @@ func NewHandler(bouncer security.BouncerService, authorizationService *authoriza // getProxyKubeClient gets a kubeclient for the user. It's generally what you want as it retrieves the kubeclient // from the Authorization token of the currently logged in user. The kubeclient that is not from the proxy is actually using // admin permissions. If you're unsure which one to use, use this. -func (h *Handler) getProxyKubeClient(r *http.Request) (*cli.KubeClient, *httperror.HandlerError) { +func (h *Handler) getProxyKubeClient(r *http.Request) (portainer.KubeClient, *httperror.HandlerError) { endpointID, err := request.RetrieveNumericRouteVariableValue(r, "id") if err != nil { return nil, httperror.BadRequest(fmt.Sprintf("an error occurred during the getProxyKubeClient operation, the environment identifier route variable is invalid for /api/kubernetes/%d. Error: ", endpointID), err) @@ -142,7 +146,7 @@ func (h *Handler) getProxyKubeClient(r *http.Request) (*cli.KubeClient, *httperr return nil, httperror.Forbidden(fmt.Sprintf("an error occurred during the getProxyKubeClient operation, permission denied to access the environment /api/kubernetes/%d. Error: ", endpointID), err) } - cli, ok := h.KubernetesClientFactory.GetProxyKubeClient(strconv.Itoa(endpointID), tokenData.Token) + cli, ok := h.KubernetesClientFactory.GetProxyKubeClient(strconv.Itoa(endpointID), strconv.Itoa(int(tokenData.ID))) if !ok { return nil, httperror.InternalServerError("an error occurred during the getProxyKubeClient operation,failed to get proxy KubeClient", nil) } @@ -175,7 +179,7 @@ func (handler *Handler) kubeClientMiddleware(next http.Handler) http.Handler { } // Check if we have a kubeclient against this auth token already, otherwise generate a new one - _, ok := handler.KubernetesClientFactory.GetProxyKubeClient(strconv.Itoa(endpointID), tokenData.Token) + _, ok := handler.KubernetesClientFactory.GetProxyKubeClient(strconv.Itoa(endpointID), strconv.Itoa(int(tokenData.ID))) if ok { next.ServeHTTP(w, r) return @@ -251,7 +255,7 @@ func (handler *Handler) kubeClientMiddleware(next http.Handler) http.Handler { return } serverURL.Scheme = "https" - serverURL.Host = "localhost" + handler.KubernetesClientFactory.AddrHTTPS + serverURL.Host = "localhost" + handler.KubernetesClientFactory.GetAddrHTTPS() config.Clusters[0].Cluster.Server = serverURL.String() yaml, err := cli.GenerateYAML(config) @@ -265,7 +269,40 @@ func (handler *Handler) kubeClientMiddleware(next http.Handler) http.Handler { return } - handler.KubernetesClientFactory.SetProxyKubeClient(strconv.Itoa(int(endpoint.ID)), tokenData.Token, kubeCli) + handler.KubernetesClientFactory.SetProxyKubeClient(strconv.Itoa(int(endpoint.ID)), strconv.Itoa(int(tokenData.ID)), kubeCli) next.ServeHTTP(w, r) }) } + +func (handler *Handler) getLibKubectlAccess(r *http.Request) (*libkubectl.ClientAccess, error) { + tokenData, err := security.RetrieveTokenData(r) + if err != nil { + return nil, httperror.InternalServerError("Unable to retrieve user authentication token", err) + } + + bearerToken, _, err := handler.JwtService.GenerateToken(tokenData) + if err != nil { + return nil, httperror.Unauthorized("Unauthorized", err) + } + + endpoint, err := middlewares.FetchEndpoint(r) + if err != nil { + return nil, httperror.InternalServerError("Unable to find the Kubernetes endpoint associated to the request.", err) + } + + sslSettings, err := handler.DataStore.SSLSettings().Settings() + if err != nil { + return nil, httperror.InternalServerError("Unable to retrieve settings from the database", err) + } + + hostURL := "localhost" + if !sslSettings.SelfSigned { + hostURL = r.Host + } + + kubeConfigInternal := handler.kubeClusterAccessService.GetClusterDetails(hostURL, endpoint.ID, true) + return &libkubectl.ClientAccess{ + Token: bearerToken, + ServerUrl: kubeConfigInternal.ClusterServerURL, + }, nil +} diff --git a/api/http/handler/kubernetes/namespaces.go b/api/http/handler/kubernetes/namespaces.go index 2efde3b85..75dae9e69 100644 --- a/api/http/handler/kubernetes/namespaces.go +++ b/api/http/handler/kubernetes/namespaces.go @@ -22,6 +22,7 @@ import ( // @produce json // @param id path int true "Environment identifier" // @param withResourceQuota query boolean true "When set to true, include the resource quota information as part of the Namespace information. Default is false" +// @param withUnhealthyEvents query boolean true "When set to true, include the unhealthy events information as part of the Namespace information. Default is false" // @success 200 {array} portainer.K8sNamespaceInfo "Success" // @failure 400 "Invalid request payload, such as missing required fields or fields not meeting validation criteria." // @failure 401 "Unauthorized access - the user is not authenticated or does not have the necessary permissions. Ensure that you have provided a valid API key or JWT token, and that you have the required permissions." @@ -36,6 +37,12 @@ func (handler *Handler) getKubernetesNamespaces(w http.ResponseWriter, r *http.R return httperror.BadRequest("an error occurred during the GetKubernetesNamespaces operation, invalid query parameter withResourceQuota. Error: ", err) } + withUnhealthyEvents, err := request.RetrieveBooleanQueryParameter(r, "withUnhealthyEvents", true) + if err != nil { + log.Error().Err(err).Str("context", "GetKubernetesNamespaces").Msg("Invalid query parameter withUnhealthyEvents") + return httperror.BadRequest("an error occurred during the GetKubernetesNamespaces operation, invalid query parameter withUnhealthyEvents. Error: ", err) + } + cli, httpErr := handler.prepareKubeClient(r) if httpErr != nil { log.Error().Err(httpErr).Str("context", "GetKubernetesNamespaces").Msg("Unable to get a Kubernetes client for the user") @@ -48,6 +55,14 @@ func (handler *Handler) getKubernetesNamespaces(w http.ResponseWriter, r *http.R return httperror.InternalServerError("an error occurred during the GetKubernetesNamespaces operation, unable to retrieve namespaces from the Kubernetes cluster. Error: ", err) } + if withUnhealthyEvents { + namespaces, err = cli.CombineNamespacesWithUnhealthyEvents(namespaces) + if err != nil { + log.Error().Err(err).Str("context", "GetKubernetesNamespaces").Msg("Unable to combine namespaces with unhealthy events") + return httperror.InternalServerError("an error occurred during the GetKubernetesNamespaces operation, unable to combine namespaces with unhealthy events. Error: ", err) + } + } + if withResourceQuota { return cli.CombineNamespacesWithResourceQuotas(namespaces, w) } diff --git a/api/http/handler/kubernetes/secrets.go b/api/http/handler/kubernetes/secrets.go index 1375e9e6b..782e6107e 100644 --- a/api/http/handler/kubernetes/secrets.go +++ b/api/http/handler/kubernetes/secrets.go @@ -130,13 +130,11 @@ func (handler *Handler) getAllKubernetesSecrets(r *http.Request) ([]models.K8sSe } if isUsed { - secretsWithApplications, err := cli.CombineSecretsWithApplications(secrets) + err = cli.SetSecretsIsUsed(&secrets) if err != nil { log.Error().Err(err).Str("context", "GetAllKubernetesSecrets").Msg("Unable to combine secrets with associated applications") return nil, httperror.InternalServerError("unable to combine secrets with associated applications. Error: ", err) } - - return secretsWithApplications, nil } return secrets, nil diff --git a/api/http/handler/motd/motd.go b/api/http/handler/motd/motd.go index dd2112c16..2865db245 100644 --- a/api/http/handler/motd/motd.go +++ b/api/http/handler/motd/motd.go @@ -7,7 +7,9 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/http/client" "github.com/portainer/portainer/pkg/libcrypto" + libclient "github.com/portainer/portainer/pkg/libhttp/client" "github.com/portainer/portainer/pkg/libhttp/response" + "github.com/rs/zerolog/log" "github.com/segmentio/encoding/json" ) @@ -37,6 +39,12 @@ type motdData struct { // @success 200 {object} motdResponse // @router /motd [get] func (handler *Handler) motd(w http.ResponseWriter, r *http.Request) { + if err := libclient.ExternalRequestDisabled(portainer.MessageOfTheDayURL); err != nil { + log.Debug().Err(err).Msg("External request disabled: MOTD") + response.JSON(w, &motdResponse{Message: ""}) + return + } + motd, err := client.Get(portainer.MessageOfTheDayURL, 0) if err != nil { response.JSON(w, &motdResponse{Message: ""}) @@ -52,7 +60,7 @@ func (handler *Handler) motd(w http.ResponseWriter, r *http.Request) { message := strings.Join(data.Message, "\n") - hash := libcrypto.HashFromBytes([]byte(message)) + hash := libcrypto.InsecureHashFromBytes([]byte(message)) resp := motdResponse{ Title: data.Title, Message: message, diff --git a/api/http/handler/registries/handler.go b/api/http/handler/registries/handler.go index dee14885e..026039833 100644 --- a/api/http/handler/registries/handler.go +++ b/api/http/handler/registries/handler.go @@ -5,10 +5,10 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" + httperrors "github.com/portainer/portainer/api/http/errors" "github.com/portainer/portainer/api/http/proxy" "github.com/portainer/portainer/api/http/security" - "github.com/portainer/portainer/api/internal/endpointutils" - "github.com/portainer/portainer/api/kubernetes" + "github.com/portainer/portainer/api/internal/registryutils/access" "github.com/portainer/portainer/api/kubernetes/cli" "github.com/portainer/portainer/api/pendingactions" httperror "github.com/portainer/portainer/pkg/libhttp/error" @@ -17,6 +17,7 @@ import ( "github.com/gorilla/mux" "github.com/pkg/errors" + "github.com/rs/zerolog/log" ) func hideFields(registry *portainer.Registry, hideAccesses bool) { @@ -56,17 +57,20 @@ func newHandler(bouncer security.BouncerService) *Handler { func (handler *Handler) initRouter(bouncer accessGuard) { adminRouter := handler.NewRoute().Subrouter() adminRouter.Use(bouncer.AdminAccess) - - authenticatedRouter := handler.NewRoute().Subrouter() - authenticatedRouter.Use(bouncer.AuthenticatedAccess) - adminRouter.Handle("/registries", httperror.LoggerHandler(handler.registryList)).Methods(http.MethodGet) adminRouter.Handle("/registries", httperror.LoggerHandler(handler.registryCreate)).Methods(http.MethodPost) adminRouter.Handle("/registries/{id}", httperror.LoggerHandler(handler.registryUpdate)).Methods(http.MethodPut) adminRouter.Handle("/registries/{id}/configure", httperror.LoggerHandler(handler.registryConfigure)).Methods(http.MethodPost) adminRouter.Handle("/registries/{id}", httperror.LoggerHandler(handler.registryDelete)).Methods(http.MethodDelete) - authenticatedRouter.Handle("/registries/{id}", httperror.LoggerHandler(handler.registryInspect)).Methods(http.MethodGet) + // Use registry-specific access bouncer for inspect and repositories endpoints + registryAccessRouter := handler.NewRoute().Subrouter() + registryAccessRouter.Use(bouncer.AuthenticatedAccess, handler.RegistryAccess) + registryAccessRouter.Handle("/registries/{id}", httperror.LoggerHandler(handler.registryInspect)).Methods(http.MethodGet) + + // Keep the gitlab proxy on the regular authenticated router as it doesn't require specific registry access + authenticatedRouter := handler.NewRoute().Subrouter() + authenticatedRouter.Use(bouncer.AuthenticatedAccess) authenticatedRouter.PathPrefix("/registries/proxies/gitlab").Handler(httperror.LoggerHandler(handler.proxyRequestsToGitlabAPIWithoutRegistry)) } @@ -88,9 +92,7 @@ func (handler *Handler) registriesHaveSameURLAndCredentials(r1, r2 *portainer.Re } // this function validates that -// // 1. user has the appropriate authorizations to perform the request -// // 2. user has a direct or indirect access to the registry func (handler *Handler) userHasRegistryAccess(r *http.Request, registry *portainer.Registry) (hasAccess bool, isAdmin bool, err error) { securityContext, err := security.RetrieveRestrictedRequestContext(r) @@ -98,11 +100,6 @@ func (handler *Handler) userHasRegistryAccess(r *http.Request, registry *portain return false, false, err } - user, err := handler.DataStore.User().Read(securityContext.UserID) - if err != nil { - return false, false, err - } - // Portainer admins always have access to everything if securityContext.IsAdmin { return true, true, nil @@ -128,47 +125,68 @@ func (handler *Handler) userHasRegistryAccess(r *http.Request, registry *portain return false, false, err } - memberships, err := handler.DataStore.TeamMembership().TeamMembershipsByUserID(user.ID) + // Use the enhanced registry access utility function that includes namespace validation + _, err = access.GetAccessibleRegistry( + handler.DataStore, + handler.K8sClientFactory, + securityContext.UserID, + endpointId, + registry.ID, + ) if err != nil { - return false, false, nil + return false, false, nil // No access } - // validate access for kubernetes namespaces (leverage registry.RegistryAccesses[endpointId].Namespaces) - if endpointutils.IsKubernetesEndpoint(endpoint) { - kcl, err := handler.K8sClientFactory.GetPrivilegedKubeClient(endpoint) - if err != nil { - return false, false, errors.Wrap(err, "unable to retrieve kubernetes client to validate registry access") - } - accessPolicies, err := kcl.GetNamespaceAccessPolicies() - if err != nil { - return false, false, errors.Wrap(err, "unable to retrieve environment's namespaces policies to validate registry access") - } - - authorizedNamespaces := registry.RegistryAccesses[endpointId].Namespaces - - for _, namespace := range authorizedNamespaces { - // when the default namespace is authorized to use a registry, all users have the ability to use it - // unless the default namespace is restricted: in this case continue to search for other potential accesses authorizations - if namespace == kubernetes.DefaultNamespace && !endpoint.Kubernetes.Configuration.RestrictDefaultNamespace { - return true, false, nil - } - - namespacePolicy := accessPolicies[namespace] - if security.AuthorizedAccess(user.ID, memberships, namespacePolicy.UserAccessPolicies, namespacePolicy.TeamAccessPolicies) { - return true, false, nil - } - } - return false, false, nil - } - - // validate access for docker environments - // leverage registry.RegistryAccesses[endpointId].UserAccessPolicies (direct access) - // and registry.RegistryAccesses[endpointId].TeamAccessPolicies (indirect access via his teams) - if security.AuthorizedRegistryAccess(registry, user, memberships, endpoint.ID) { - return true, false, nil - } - - // when user has no access via their role, direct grant or indirect grant - // then they don't have access to the registry - return false, false, nil + return true, false, nil +} + +// RegistryAccess defines a security check for registry-specific API endpoints. +// Authentication is required to access these endpoints. +// The user must have direct or indirect access to the specific registry being requested. +// This bouncer validates registry access using the userHasRegistryAccess logic. +func (handler *Handler) RegistryAccess(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // First ensure the user is authenticated + tokenData, err := security.RetrieveTokenData(r) + if err != nil { + httperror.WriteError(w, http.StatusUnauthorized, "Authentication required", httperrors.ErrUnauthorized) + return + } + + // Extract registry ID from the route + registryID, err := request.RetrieveNumericRouteVariableValue(r, "id") + if err != nil { + httperror.WriteError(w, http.StatusBadRequest, "Invalid registry identifier route variable", err) + return + } + + // Get the registry from the database + registry, err := handler.DataStore.Registry().Read(portainer.RegistryID(registryID)) + if handler.DataStore.IsErrObjectNotFound(err) { + httperror.WriteError(w, http.StatusNotFound, "Unable to find a registry with the specified identifier inside the database", err) + return + } else if err != nil { + httperror.WriteError(w, http.StatusInternalServerError, "Unable to find a registry with the specified identifier inside the database", err) + return + } + + // Check if user has access to this registry + hasAccess, _, err := handler.userHasRegistryAccess(r, registry) + if err != nil { + httperror.WriteError(w, http.StatusInternalServerError, "Unable to retrieve info from request context", err) + return + } + if !hasAccess { + log.Debug(). + Int("registry_id", registryID). + Str("registry_name", registry.Name). + Int("user_id", int(tokenData.ID)). + Str("context", "RegistryAccessBouncer"). + Msg("User access denied to registry") + httperror.WriteError(w, http.StatusForbidden, "Access denied to resource", httperrors.ErrResourceAccessDenied) + return + } + + next.ServeHTTP(w, r) + }) } diff --git a/api/http/handler/registries/registry_access_test.go b/api/http/handler/registries/registry_access_test.go new file mode 100644 index 000000000..8231f4d66 --- /dev/null +++ b/api/http/handler/registries/registry_access_test.go @@ -0,0 +1,89 @@ +package registries + +import ( + "net/http" + "net/http/httptest" + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/internal/testhelpers" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" +) + +func Test_RegistryAccess_RequiresAuthentication(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + registry := &portainer.Registry{ + ID: 1, + Name: "test-registry", + URL: "https://registry.test.com", + } + err := store.Registry().Create(registry) + assert.NoError(t, err) + handler := &Handler{ + DataStore: store, + } + req := httptest.NewRequest(http.MethodGet, "/registries/1", nil) + req = mux.SetURLVars(req, map[string]string{"id": "1"}) + rr := httptest.NewRecorder() + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + bouncer := handler.RegistryAccess(testHandler) + bouncer.ServeHTTP(rr, req) + assert.Equal(t, http.StatusUnauthorized, rr.Code) +} + +func Test_RegistryAccess_InvalidRegistryID(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + user := &portainer.User{ID: 1, Username: "test", Role: portainer.StandardUserRole} + err := store.User().Create(user) + assert.NoError(t, err) + + handler := &Handler{ + DataStore: store, + } + req := httptest.NewRequest(http.MethodGet, "/registries/invalid", nil) + req = mux.SetURLVars(req, map[string]string{"id": "invalid"}) + tokenData := &portainer.TokenData{ID: 1, Role: portainer.StandardUserRole} + req = req.WithContext(security.StoreTokenData(req, tokenData)) + + rr := httptest.NewRecorder() + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + bouncer := handler.RegistryAccess(testHandler) + bouncer.ServeHTTP(rr, req) + assert.Equal(t, http.StatusBadRequest, rr.Code) +} + +func Test_RegistryAccess_RegistryNotFound(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, true) + user := &portainer.User{ID: 1, Username: "test", Role: portainer.StandardUserRole} + err := store.User().Create(user) + assert.NoError(t, err) + + handler := &Handler{ + DataStore: store, + requestBouncer: testhelpers.NewTestRequestBouncer(), + } + req := httptest.NewRequest(http.MethodGet, "/registries/999", nil) + req = mux.SetURLVars(req, map[string]string{"id": "999"}) + tokenData := &portainer.TokenData{ID: 1, Role: portainer.StandardUserRole} + req = req.WithContext(security.StoreTokenData(req, tokenData)) + + rr := httptest.NewRecorder() + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + bouncer := handler.RegistryAccess(testHandler) + bouncer.ServeHTTP(rr, req) + assert.Equal(t, http.StatusNotFound, rr.Code) +} diff --git a/api/http/handler/registries/registry_inspect.go b/api/http/handler/registries/registry_inspect.go index a1f0bd9c5..f606a953e 100644 --- a/api/http/handler/registries/registry_inspect.go +++ b/api/http/handler/registries/registry_inspect.go @@ -4,10 +4,12 @@ import ( "net/http" portainer "github.com/portainer/portainer/api" - httperrors "github.com/portainer/portainer/api/http/errors" + "github.com/portainer/portainer/api/http/security" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" + + "github.com/rs/zerolog/log" ) // @id RegistryInspect @@ -31,6 +33,11 @@ func (handler *Handler) registryInspect(w http.ResponseWriter, r *http.Request) return httperror.BadRequest("Invalid registry identifier route variable", err) } + log.Debug(). + Int("registry_id", registryID). + Str("context", "RegistryInspectHandler"). + Msg("Starting registry inspection") + registry, err := handler.DataStore.Registry().Read(portainer.RegistryID(registryID)) if handler.DataStore.IsErrObjectNotFound(err) { return httperror.NotFound("Unable to find a registry with the specified identifier inside the database", err) @@ -38,14 +45,12 @@ func (handler *Handler) registryInspect(w http.ResponseWriter, r *http.Request) return httperror.InternalServerError("Unable to find a registry with the specified identifier inside the database", err) } - hasAccess, isAdmin, err := handler.userHasRegistryAccess(r, registry) + // Check if user is admin to determine if we should hide sensitive fields + securityContext, err := security.RetrieveRestrictedRequestContext(r) if err != nil { return httperror.InternalServerError("Unable to retrieve info from request context", err) } - if !hasAccess { - return httperror.Forbidden("Access denied to resource", httperrors.ErrResourceAccessDenied) - } - hideFields(registry, !isAdmin) + hideFields(registry, !securityContext.IsAdmin) return response.JSON(w, registry) } diff --git a/api/http/handler/settings/settings_update.go b/api/http/handler/settings/settings_update.go index 0b36dbc62..98da8da7d 100644 --- a/api/http/handler/settings/settings_update.go +++ b/api/http/handler/settings/settings_update.go @@ -14,8 +14,8 @@ import ( httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/pkg/errors" "golang.org/x/oauth2" ) @@ -62,15 +62,15 @@ func (payload *settingsUpdatePayload) Validate(r *http.Request) error { return errors.New("Invalid authentication method value. Value must be one of: 1 (internal), 2 (LDAP/AD) or 3 (OAuth)") } - if payload.LogoURL != nil && *payload.LogoURL != "" && !govalidator.IsURL(*payload.LogoURL) { + if payload.LogoURL != nil && *payload.LogoURL != "" && !validate.IsURL(*payload.LogoURL) { return errors.New("Invalid logo URL. Must correspond to a valid URL format") } - if payload.TemplatesURL != nil && *payload.TemplatesURL != "" && !govalidator.IsURL(*payload.TemplatesURL) { + if payload.TemplatesURL != nil && *payload.TemplatesURL != "" && !validate.IsURL(*payload.TemplatesURL) { return errors.New("Invalid external templates URL. Must correspond to a valid URL format") } - if payload.HelmRepositoryURL != nil && *payload.HelmRepositoryURL != "" && !govalidator.IsURL(*payload.HelmRepositoryURL) { + if payload.HelmRepositoryURL != nil && *payload.HelmRepositoryURL != "" && !validate.IsURL(*payload.HelmRepositoryURL) { return errors.New("Invalid Helm repository URL. Must correspond to a valid URL format") } diff --git a/api/http/handler/stacks/create_compose_stack.go b/api/http/handler/stacks/create_compose_stack.go index dce39337a..fc5bed1ff 100644 --- a/api/http/handler/stacks/create_compose_stack.go +++ b/api/http/handler/stacks/create_compose_stack.go @@ -14,8 +14,8 @@ import ( "github.com/portainer/portainer/api/stacks/stackutils" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/pkg/errors" "github.com/rs/zerolog/log" ) @@ -205,7 +205,7 @@ func (payload *composeStackFromGitRepositoryPayload) Validate(r *http.Request) e if len(payload.Name) == 0 { return errors.New("Invalid stack name") } - if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) { + if len(payload.RepositoryURL) == 0 || !validate.IsURL(payload.RepositoryURL) { return errors.New("Invalid repository URL. Must correspond to a valid URL format") } if payload.RepositoryAuthentication && len(payload.RepositoryPassword) == 0 { diff --git a/api/http/handler/stacks/create_kubernetes_stack.go b/api/http/handler/stacks/create_kubernetes_stack.go index 397ccfec2..f1a142e6b 100644 --- a/api/http/handler/stacks/create_kubernetes_stack.go +++ b/api/http/handler/stacks/create_kubernetes_stack.go @@ -15,8 +15,8 @@ import ( httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/pkg/errors" ) @@ -96,7 +96,7 @@ func (payload *kubernetesStringDeploymentPayload) Validate(r *http.Request) erro } func (payload *kubernetesGitDeploymentPayload) Validate(r *http.Request) error { - if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) { + if len(payload.RepositoryURL) == 0 || !validate.IsURL(payload.RepositoryURL) { return errors.New("Invalid repository URL. Must correspond to a valid URL format") } @@ -112,7 +112,7 @@ func (payload *kubernetesGitDeploymentPayload) Validate(r *http.Request) error { } func (payload *kubernetesManifestURLDeploymentPayload) Validate(r *http.Request) error { - if len(payload.ManifestURL) == 0 || !govalidator.IsURL(payload.ManifestURL) { + if len(payload.ManifestURL) == 0 || !validate.IsURL(payload.ManifestURL) { return errors.New("Invalid manifest URL") } diff --git a/api/http/handler/stacks/create_swarm_stack.go b/api/http/handler/stacks/create_swarm_stack.go index 4603b6d6b..e10d23f2f 100644 --- a/api/http/handler/stacks/create_swarm_stack.go +++ b/api/http/handler/stacks/create_swarm_stack.go @@ -11,8 +11,8 @@ import ( "github.com/portainer/portainer/api/stacks/stackutils" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" + valid "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/pkg/errors" ) @@ -142,7 +142,7 @@ func (payload *swarmStackFromGitRepositoryPayload) Validate(r *http.Request) err if len(payload.SwarmID) == 0 { return errors.New("Invalid Swarm ID") } - if len(payload.RepositoryURL) == 0 || !govalidator.IsURL(payload.RepositoryURL) { + if len(payload.RepositoryURL) == 0 || !valid.IsURL(payload.RepositoryURL) { return errors.New("Invalid repository URL. Must correspond to a valid URL format") } if payload.RepositoryAuthentication && len(payload.RepositoryPassword) == 0 { diff --git a/api/http/handler/stacks/handler.go b/api/http/handler/stacks/handler.go index 7e5cce040..2ad40a182 100644 --- a/api/http/handler/stacks/handler.go +++ b/api/http/handler/stacks/handler.go @@ -11,7 +11,6 @@ import ( "github.com/portainer/portainer/api/dataservices" dockerclient "github.com/portainer/portainer/api/docker/client" "github.com/portainer/portainer/api/docker/consts" - "github.com/portainer/portainer/api/http/middlewares" "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/internal/authorization" "github.com/portainer/portainer/api/internal/endpointutils" @@ -62,8 +61,6 @@ func NewHandler(bouncer security.BouncerService) *Handler { h.Handle("/stacks/create/{type}/{method}", bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.stackCreate))).Methods(http.MethodPost) - h.Handle("/stacks", - bouncer.AuthenticatedAccess(middlewares.Deprecated(h, deprecatedStackCreateUrlParser))).Methods(http.MethodPost) // Deprecated h.Handle("/stacks", bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.stackList))).Methods(http.MethodGet) h.Handle("/stacks/{id}", diff --git a/api/http/handler/stacks/stack_create.go b/api/http/handler/stacks/stack_create.go index f45592d09..cb297f9d7 100644 --- a/api/http/handler/stacks/stack_create.go +++ b/api/http/handler/stacks/stack_create.go @@ -1,7 +1,6 @@ package stacks import ( - "fmt" "net/http" portainer "github.com/portainer/portainer/api" @@ -141,53 +140,3 @@ func (handler *Handler) decorateStackResponse(w http.ResponseWriter, stack *port return response.JSON(w, stack) } - -func getStackTypeFromQueryParameter(r *http.Request) (string, error) { - stackType, err := request.RetrieveNumericQueryParameter(r, "type", false) - if err != nil { - return "", err - } - - switch stackType { - case 1: - return "swarm", nil - case 2: - return "standalone", nil - case 3: - return "kubernetes", nil - } - - return "", errors.New(request.ErrInvalidQueryParameter) -} - -// @id StackCreate -// @summary Deploy a new stack -// @description Deploy a new stack into a Docker environment(endpoint) specified via the environment(endpoint) identifier. -// @description **Access policy**: authenticated -// @tags stacks -// @security ApiKeyAuth -// @security jwt -// @accept json,multipart/form-data -// @produce json -// @param type query int true "Stack deployment type. Possible values: 1 (Swarm stack), 2 (Compose stack) or 3 (Kubernetes stack)." Enums(1,2,3) -// @param method query string true "Stack deployment method. Possible values: file, string, repository or url." Enums(string, file, repository, url) -// @param endpointId query int true "Identifier of the environment(endpoint) that will be used to deploy the stack" -// @param body body object true "for body documentation see the relevant /stacks/create/{type}/{method} endpoint" -// @success 200 {object} portainer.Stack -// @failure 400 "Invalid request" -// @failure 500 "Server error" -// @deprecated -// @router /stacks [post] -func deprecatedStackCreateUrlParser(w http.ResponseWriter, r *http.Request) (string, *httperror.HandlerError) { - method, err := request.RetrieveQueryParameter(r, "method", false) - if err != nil { - return "", httperror.BadRequest("Invalid query parameter: method. Valid values are: file or string", err) - } - - stackType, err := getStackTypeFromQueryParameter(r) - if err != nil { - return "", httperror.BadRequest("Invalid query parameter: type", err) - } - - return fmt.Sprintf("/stacks/create/%s/%s", stackType, method), nil -} diff --git a/api/http/handler/stacks/stack_update_git.go b/api/http/handler/stacks/stack_update_git.go index 8d0687694..2bdf2b71f 100644 --- a/api/http/handler/stacks/stack_update_git.go +++ b/api/http/handler/stacks/stack_update_git.go @@ -19,14 +19,15 @@ import ( ) type stackGitUpdatePayload struct { - AutoUpdate *portainer.AutoUpdateSettings - Env []portainer.Pair - Prune bool - RepositoryReferenceName string - RepositoryAuthentication bool - RepositoryUsername string - RepositoryPassword string - TLSSkipVerify bool + AutoUpdate *portainer.AutoUpdateSettings + Env []portainer.Pair + Prune bool + RepositoryReferenceName string + RepositoryAuthentication bool + RepositoryUsername string + RepositoryPassword string + RepositoryAuthorizationType gittypes.GitCredentialAuthType + TLSSkipVerify bool } func (payload *stackGitUpdatePayload) Validate(r *http.Request) error { @@ -151,11 +152,19 @@ func (handler *Handler) stackUpdateGit(w http.ResponseWriter, r *http.Request) * } stack.GitConfig.Authentication = &gittypes.GitAuthentication{ - Username: payload.RepositoryUsername, - Password: password, + Username: payload.RepositoryUsername, + Password: password, + AuthorizationType: payload.RepositoryAuthorizationType, } - if _, err := handler.GitService.LatestCommitID(stack.GitConfig.URL, stack.GitConfig.ReferenceName, stack.GitConfig.Authentication.Username, stack.GitConfig.Authentication.Password, stack.GitConfig.TLSSkipVerify); err != nil { + if _, err := handler.GitService.LatestCommitID( + stack.GitConfig.URL, + stack.GitConfig.ReferenceName, + stack.GitConfig.Authentication.Username, + stack.GitConfig.Authentication.Password, + stack.GitConfig.Authentication.AuthorizationType, + stack.GitConfig.TLSSkipVerify, + ); err != nil { return httperror.InternalServerError("Unable to fetch git repository", err) } } else { diff --git a/api/http/handler/stacks/stack_update_git_redeploy.go b/api/http/handler/stacks/stack_update_git_redeploy.go index e65e1e70c..c595808aa 100644 --- a/api/http/handler/stacks/stack_update_git_redeploy.go +++ b/api/http/handler/stacks/stack_update_git_redeploy.go @@ -6,6 +6,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/git" + gittypes "github.com/portainer/portainer/api/git/types" httperrors "github.com/portainer/portainer/api/http/errors" "github.com/portainer/portainer/api/http/security" k "github.com/portainer/portainer/api/kubernetes" @@ -19,12 +20,13 @@ import ( ) type stackGitRedployPayload struct { - RepositoryReferenceName string - RepositoryAuthentication bool - RepositoryUsername string - RepositoryPassword string - Env []portainer.Pair - Prune bool + RepositoryReferenceName string + RepositoryAuthentication bool + RepositoryUsername string + RepositoryPassword string + RepositoryAuthorizationType gittypes.GitCredentialAuthType + Env []portainer.Pair + Prune bool // Force a pulling to current image with the original tag though the image is already the latest PullImage bool `example:"false"` @@ -135,13 +137,16 @@ func (handler *Handler) stackGitRedeploy(w http.ResponseWriter, r *http.Request) repositoryUsername := "" repositoryPassword := "" + repositoryAuthType := gittypes.GitCredentialAuthType_Basic if payload.RepositoryAuthentication { repositoryPassword = payload.RepositoryPassword + repositoryAuthType = payload.RepositoryAuthorizationType // When the existing stack is using the custom username/password and the password is not updated, // the stack should keep using the saved username/password if repositoryPassword == "" && stack.GitConfig != nil && stack.GitConfig.Authentication != nil { repositoryPassword = stack.GitConfig.Authentication.Password + repositoryAuthType = stack.GitConfig.Authentication.AuthorizationType } repositoryUsername = payload.RepositoryUsername } @@ -152,6 +157,7 @@ func (handler *Handler) stackGitRedeploy(w http.ResponseWriter, r *http.Request) ReferenceName: stack.GitConfig.ReferenceName, Username: repositoryUsername, Password: repositoryPassword, + AuthType: repositoryAuthType, TLSSkipVerify: stack.GitConfig.TLSSkipVerify, } @@ -166,7 +172,7 @@ func (handler *Handler) stackGitRedeploy(w http.ResponseWriter, r *http.Request) return err } - newHash, err := handler.GitService.LatestCommitID(stack.GitConfig.URL, stack.GitConfig.ReferenceName, repositoryUsername, repositoryPassword, stack.GitConfig.TLSSkipVerify) + newHash, err := handler.GitService.LatestCommitID(stack.GitConfig.URL, stack.GitConfig.ReferenceName, repositoryUsername, repositoryPassword, repositoryAuthType, stack.GitConfig.TLSSkipVerify) if err != nil { return httperror.InternalServerError("Unable get latest commit id", errors.WithMessagef(err, "failed to fetch latest commit id of the stack %v", stack.ID)) } diff --git a/api/http/handler/stacks/update_kubernetes_stack.go b/api/http/handler/stacks/update_kubernetes_stack.go index 95195bb10..42ecbaa04 100644 --- a/api/http/handler/stacks/update_kubernetes_stack.go +++ b/api/http/handler/stacks/update_kubernetes_stack.go @@ -27,12 +27,13 @@ type kubernetesFileStackUpdatePayload struct { } type kubernetesGitStackUpdatePayload struct { - RepositoryReferenceName string - RepositoryAuthentication bool - RepositoryUsername string - RepositoryPassword string - AutoUpdate *portainer.AutoUpdateSettings - TLSSkipVerify bool + RepositoryReferenceName string + RepositoryAuthentication bool + RepositoryUsername string + RepositoryPassword string + RepositoryAuthorizationType gittypes.GitCredentialAuthType + AutoUpdate *portainer.AutoUpdateSettings + TLSSkipVerify bool } func (payload *kubernetesFileStackUpdatePayload) Validate(r *http.Request) error { @@ -76,11 +77,19 @@ func (handler *Handler) updateKubernetesStack(r *http.Request, stack *portainer. } stack.GitConfig.Authentication = &gittypes.GitAuthentication{ - Username: payload.RepositoryUsername, - Password: password, + Username: payload.RepositoryUsername, + Password: password, + AuthorizationType: payload.RepositoryAuthorizationType, } - if _, err := handler.GitService.LatestCommitID(stack.GitConfig.URL, stack.GitConfig.ReferenceName, stack.GitConfig.Authentication.Username, stack.GitConfig.Authentication.Password, stack.GitConfig.TLSSkipVerify); err != nil { + if _, err := handler.GitService.LatestCommitID( + stack.GitConfig.URL, + stack.GitConfig.ReferenceName, + stack.GitConfig.Authentication.Username, + stack.GitConfig.Authentication.Password, + stack.GitConfig.Authentication.AuthorizationType, + stack.GitConfig.TLSSkipVerify, + ); err != nil { return httperror.InternalServerError("Unable to fetch git repository", err) } } diff --git a/api/http/handler/system/handler.go b/api/http/handler/system/handler.go index d2e3f5485..4cab43332 100644 --- a/api/http/handler/system/handler.go +++ b/api/http/handler/system/handler.go @@ -59,10 +59,6 @@ func NewHandler(bouncer security.BouncerService, // Deprecated /status endpoint, will be removed in the future. h.Handle("/status", bouncer.PublicAccess(httperror.LoggerHandler(h.statusInspectDeprecated))).Methods(http.MethodGet) - h.Handle("/status/version", - bouncer.AuthenticatedAccess(http.HandlerFunc(h.versionDeprecated))).Methods(http.MethodGet) - h.Handle("/status/nodes", - bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.statusNodesCountDeprecated))).Methods(http.MethodGet) return h } diff --git a/api/http/handler/system/nodes_count.go b/api/http/handler/system/nodes_count.go index 0b9971619..7d150d911 100644 --- a/api/http/handler/system/nodes_count.go +++ b/api/http/handler/system/nodes_count.go @@ -8,8 +8,6 @@ import ( "github.com/portainer/portainer/api/internal/snapshot" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/rs/zerolog/log" ) type nodesCountResponse struct { @@ -35,7 +33,7 @@ func (handler *Handler) systemNodesCount(w http.ResponseWriter, r *http.Request) var nodes int for _, endpoint := range endpoints { - if err := snapshot.FillSnapshotData(handler.dataStore, &endpoint); err != nil { + if err := snapshot.FillSnapshotData(handler.dataStore, &endpoint, false); err != nil { return httperror.InternalServerError("Unable to add snapshot data", err) } @@ -44,21 +42,3 @@ func (handler *Handler) systemNodesCount(w http.ResponseWriter, r *http.Request) return response.JSON(w, &nodesCountResponse{Nodes: nodes}) } - -// @id statusNodesCount -// @summary Retrieve the count of nodes -// @deprecated -// @description Deprecated: use the `/system/nodes` endpoint instead. -// @description **Access policy**: authenticated -// @security ApiKeyAuth -// @security jwt -// @tags status -// @produce json -// @success 200 {object} nodesCountResponse "Success" -// @failure 500 "Server error" -// @router /status/nodes [get] -func (handler *Handler) statusNodesCountDeprecated(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { - log.Warn().Msg("The /status/nodes endpoint is deprecated, please use the /system/nodes endpoint instead") - - return handler.systemNodesCount(w, r) -} diff --git a/api/http/handler/system/system_info.go b/api/http/handler/system/system_info.go index ad5f944ef..64a915313 100644 --- a/api/http/handler/system/system_info.go +++ b/api/http/handler/system/system_info.go @@ -3,6 +3,7 @@ package system import ( "net/http" + "github.com/pkg/errors" "github.com/portainer/portainer/api/internal/endpointutils" plf "github.com/portainer/portainer/api/platform" httperror "github.com/portainer/portainer/pkg/libhttp/error" @@ -46,7 +47,12 @@ func (handler *Handler) systemInfo(w http.ResponseWriter, r *http.Request) *http platform, err := handler.platformService.GetPlatform() if err != nil { - return httperror.InternalServerError("Failed to get platform", err) + if !errors.Is(err, plf.ErrNoLocalEnvironment) { + return httperror.InternalServerError("Failed to get platform", err) + } + // If no local environment is detected, we assume the platform is Docker + // UI will stop showing the upgrade banner + platform = plf.PlatformDocker } return response.JSON(w, &systemInfoResponse{ diff --git a/api/http/handler/system/system_upgrade.go b/api/http/handler/system/system_upgrade.go index f881b6233..8eb41413f 100644 --- a/api/http/handler/system/system_upgrade.go +++ b/api/http/handler/system/system_upgrade.go @@ -4,6 +4,7 @@ import ( "net/http" "regexp" + ceplf "github.com/portainer/portainer/api/platform" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" @@ -45,6 +46,9 @@ func (handler *Handler) systemUpgrade(w http.ResponseWriter, r *http.Request) *h environment, err := handler.platformService.GetLocalEnvironment() if err != nil { + if errors.Is(err, ceplf.ErrNoLocalEnvironment) { + return httperror.NotFound("The system upgrade feature is disabled because no local environment was detected.", err) + } return httperror.InternalServerError("Failed to get local environment", err) } @@ -53,8 +57,7 @@ func (handler *Handler) systemUpgrade(w http.ResponseWriter, r *http.Request) *h return httperror.InternalServerError("Failed to get platform", err) } - err = handler.upgradeService.Upgrade(platform, environment, payload.License) - if err != nil { + if err := handler.upgradeService.Upgrade(platform, environment, payload.License); err != nil { return httperror.InternalServerError("Failed to upgrade Portainer", err) } diff --git a/api/http/handler/system/version.go b/api/http/handler/system/version.go index 50ad2f6af..52af5879c 100644 --- a/api/http/handler/system/version.go +++ b/api/http/handler/system/version.go @@ -7,6 +7,7 @@ import ( "github.com/portainer/portainer/api/http/client" "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/pkg/build" + libclient "github.com/portainer/portainer/pkg/libhttp/client" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/response" @@ -69,10 +70,14 @@ func (handler *Handler) version(w http.ResponseWriter, r *http.Request) *httperr } func GetLatestVersion() string { + if err := libclient.ExternalRequestDisabled(portainer.VersionCheckURL); err != nil { + log.Debug().Err(err).Msg("External request disabled: Version check") + return "" + } + motd, err := client.Get(portainer.VersionCheckURL, 5) if err != nil { log.Debug().Err(err).Msg("couldn't fetch latest Portainer release version") - return "" } @@ -106,21 +111,3 @@ func HasNewerVersion(currentVersion, latestVersion string) bool { return currentVersionSemver.LessThan(*latestVersionSemver) } - -// @id Version -// @summary Check for portainer updates -// @deprecated -// @description Deprecated: use the `/system/version` endpoint instead. -// @description Check if portainer has an update available -// @description **Access policy**: authenticated -// @security ApiKeyAuth -// @security jwt -// @tags status -// @produce json -// @success 200 {object} versionResponse "Success" -// @router /status/version [get] -func (handler *Handler) versionDeprecated(w http.ResponseWriter, r *http.Request) { - log.Warn().Msg("The /status/version endpoint is deprecated, please use the /system/version endpoint instead") - - handler.version(w, r) -} diff --git a/api/http/handler/tags/tag_delete.go b/api/http/handler/tags/tag_delete.go index ad8ef1347..f8f1b7786 100644 --- a/api/http/handler/tags/tag_delete.go +++ b/api/http/handler/tags/tag_delete.go @@ -8,6 +8,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/internal/edge" + "github.com/portainer/portainer/api/internal/endpointutils" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" @@ -58,6 +59,9 @@ func deleteTag(tx dataservices.DataStoreTx, tagID portainer.TagID) error { for endpointID := range tag.Endpoints { endpoint, err := tx.Endpoint().Endpoint(endpointID) + if tx.IsErrObjectNotFound(err) { + continue + } if err != nil { return httperror.InternalServerError("Unable to retrieve environment from the database", err) } @@ -103,15 +107,10 @@ func deleteTag(tx dataservices.DataStoreTx, tagID portainer.TagID) error { return httperror.InternalServerError("Unable to retrieve edge stacks from the database", err) } - for _, endpoint := range endpoints { - if (tag.Endpoints[endpoint.ID] || tag.EndpointGroups[endpoint.GroupID]) && (endpoint.Type == portainer.EdgeAgentOnDockerEnvironment || endpoint.Type == portainer.EdgeAgentOnKubernetesEnvironment) { - err = updateEndpointRelations(tx, endpoint, edgeGroups, edgeStacks) - if err != nil { - return httperror.InternalServerError("Unable to update environment relations in the database", err) - } - } + edgeJobs, err := tx.EdgeJob().ReadAll() + if err != nil { + return httperror.InternalServerError("Unable to retrieve edge job configurations from the database", err) } - for _, edgeGroup := range edgeGroups { edgeGroup.TagIDs = slices.DeleteFunc(edgeGroup.TagIDs, func(t portainer.TagID) bool { return t == tagID @@ -123,6 +122,16 @@ func deleteTag(tx dataservices.DataStoreTx, tagID portainer.TagID) error { } } + for _, endpoint := range endpoints { + if (!tag.Endpoints[endpoint.ID] && !tag.EndpointGroups[endpoint.GroupID]) || !endpointutils.IsEdgeEndpoint(&endpoint) { + continue + } + + if err := updateEndpointRelations(tx, endpoint, edgeGroups, edgeStacks, edgeJobs); err != nil { + return httperror.InternalServerError("Unable to update environment relations in the database", err) + } + } + err = tx.Tag().Delete(tagID) if err != nil { return httperror.InternalServerError("Unable to remove the tag from the database", err) @@ -131,7 +140,7 @@ func deleteTag(tx dataservices.DataStoreTx, tagID portainer.TagID) error { return nil } -func updateEndpointRelations(tx dataservices.DataStoreTx, endpoint portainer.Endpoint, edgeGroups []portainer.EdgeGroup, edgeStacks []portainer.EdgeStack) error { +func updateEndpointRelations(tx dataservices.DataStoreTx, endpoint portainer.Endpoint, edgeGroups []portainer.EdgeGroup, edgeStacks []portainer.EdgeStack, edgeJobs []portainer.EdgeJob) error { endpointRelation, err := tx.EndpointRelation().EndpointRelation(endpoint.ID) if err != nil { return err @@ -147,7 +156,28 @@ func updateEndpointRelations(tx dataservices.DataStoreTx, endpoint portainer.End for _, edgeStackID := range endpointStacks { stacksSet[edgeStackID] = true } + endpointRelation.EdgeStacks = stacksSet - return tx.EndpointRelation().UpdateEndpointRelation(endpoint.ID, endpointRelation) + if err := tx.EndpointRelation().UpdateEndpointRelation(endpoint.ID, endpointRelation); err != nil { + return err + } + + for _, edgeJob := range edgeJobs { + endpoints, err := edge.GetEndpointsFromEdgeGroups(edgeJob.EdgeGroups, tx) + if err != nil { + return err + } + if slices.Contains(endpoints, endpoint.ID) { + continue + } + + delete(edgeJob.GroupLogsCollection, endpoint.ID) + + if err := tx.EdgeJob().Update(edgeJob.ID, &edgeJob); err != nil { + return err + } + } + + return nil } diff --git a/api/http/handler/tags/tag_delete_test.go b/api/http/handler/tags/tag_delete_test.go index cabf20963..c933610c5 100644 --- a/api/http/handler/tags/tag_delete_test.go +++ b/api/http/handler/tags/tag_delete_test.go @@ -8,23 +8,20 @@ import ( "testing" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/dataservices" + portainerDsErrors "github.com/portainer/portainer/api/dataservices/errors" "github.com/portainer/portainer/api/datastore" "github.com/portainer/portainer/api/internal/testhelpers" + "github.com/portainer/portainer/api/roar" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTagDeleteEdgeGroupsConcurrently(t *testing.T) { const tagsCount = 100 - _, store := datastore.MustNewTestStore(t, true, false) - - user := &portainer.User{ID: 2, Username: "admin", Role: portainer.AdministratorRole} - if err := store.User().Create(user); err != nil { - t.Fatal("could not create admin user:", err) - } - - handler := NewHandler(testhelpers.NewTestRequestBouncer()) - handler.DataStore = store - + handler, store := setUpHandler(t) // Create all the tags and add them to the same edge group var tagIDs []portainer.TagID @@ -84,3 +81,128 @@ func TestTagDeleteEdgeGroupsConcurrently(t *testing.T) { t.Fatal("the edge group is not consistent") } } + +func TestHandler_tagDelete(t *testing.T) { + t.Run("should delete tag and update related endpoints and edge groups", func(t *testing.T) { + handler, store := setUpHandler(t) + + tag := &portainer.Tag{ + ID: 1, + Name: "tag-1", + Endpoints: make(map[portainer.EndpointID]bool), + EndpointGroups: make(map[portainer.EndpointGroupID]bool), + } + require.NoError(t, store.Tag().Create(tag)) + + endpointGroup := &portainer.EndpointGroup{ + ID: 2, + Name: "endpoint-group-1", + TagIDs: []portainer.TagID{tag.ID}, + } + require.NoError(t, store.EndpointGroup().Create(endpointGroup)) + + endpoint1 := &portainer.Endpoint{ + ID: 1, + Name: "endpoint-1", + GroupID: endpointGroup.ID, + } + require.NoError(t, store.Endpoint().Create(endpoint1)) + + endpoint2 := &portainer.Endpoint{ + ID: 2, + Name: "endpoint-2", + TagIDs: []portainer.TagID{tag.ID}, + } + require.NoError(t, store.Endpoint().Create(endpoint2)) + + tag.Endpoints[endpoint2.ID] = true + tag.EndpointGroups[endpointGroup.ID] = true + require.NoError(t, store.Tag().Update(tag.ID, tag)) + + dynamicEdgeGroup := &portainer.EdgeGroup{ + ID: 1, + Name: "edgegroup-1", + TagIDs: []portainer.TagID{tag.ID}, + Dynamic: true, + } + require.NoError(t, store.EdgeGroup().Create(dynamicEdgeGroup)) + + staticEdgeGroup := &portainer.EdgeGroup{ + ID: 2, + Name: "edgegroup-2", + EndpointIDs: roar.FromSlice([]portainer.EndpointID{endpoint2.ID}), + } + require.NoError(t, store.EdgeGroup().Create(staticEdgeGroup)) + + req, err := http.NewRequest(http.MethodDelete, "/tags/"+strconv.Itoa(int(tag.ID)), nil) + if err != nil { + t.Fail() + + return + } + + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + require.Equal(t, http.StatusNoContent, rec.Code) + + // Check that the tag is deleted + _, err = store.Tag().Read(tag.ID) + require.ErrorIs(t, err, portainerDsErrors.ErrObjectNotFound) + + // Check that the endpoints are updated + endpoint1, err = store.Endpoint().Endpoint(endpoint1.ID) + require.NoError(t, err) + assert.Len(t, endpoint1.TagIDs, 0, "endpoint-1 should not have any tags") + assert.Equal(t, endpoint1.GroupID, endpointGroup.ID, "endpoint-1 should still belong to the endpoint group") + + endpoint2, err = store.Endpoint().Endpoint(endpoint2.ID) + require.NoError(t, err) + assert.Len(t, endpoint2.TagIDs, 0, "endpoint-2 should not have any tags") + + // Check that the dynamic edge group is updated + dynamicEdgeGroup, err = store.EdgeGroup().Read(dynamicEdgeGroup.ID) + require.NoError(t, err) + assert.Len(t, dynamicEdgeGroup.TagIDs, 0, "dynamic edge group should not have any tags") + assert.Equal(t, 0, dynamicEdgeGroup.EndpointIDs.Len(), "dynamic edge group should not have any endpoints") + + // Check that the static edge group is not updated + staticEdgeGroup, err = store.EdgeGroup().Read(staticEdgeGroup.ID) + require.NoError(t, err) + assert.Len(t, staticEdgeGroup.TagIDs, 0, "static edge group should not have any tags") + assert.Equal(t, 1, staticEdgeGroup.EndpointIDs.Len(), "static edge group should have one endpoint") + assert.True(t, staticEdgeGroup.EndpointIDs.Contains(endpoint2.ID), "static edge group should have the endpoint-2") + }) + + // Test the tx.IsErrObjectNotFound logic when endpoint is not found during cleanup + t.Run("should continue gracefully when endpoint not found during cleanup", func(t *testing.T) { + _, store := setUpHandler(t) + // Create a tag with a reference to a non-existent endpoint + tag := &portainer.Tag{ + ID: 1, + Name: "test-tag", + Endpoints: map[portainer.EndpointID]bool{999: true}, // Non-existent endpoint + EndpointGroups: make(map[portainer.EndpointGroupID]bool), + } + + err := store.Tag().Create(tag) + require.NoError(t, err) + + err = deleteTag(store, 1) + require.NoError(t, err) + }) +} + +func setUpHandler(t *testing.T) (*Handler, dataservices.DataStore) { + _, store := datastore.MustNewTestStore(t, true, false) + + user := &portainer.User{ID: 2, Username: "admin", Role: portainer.AdministratorRole} + if err := store.User().Create(user); err != nil { + t.Fatal("could not create admin user:", err) + } + + handler := NewHandler(testhelpers.NewTestRequestBouncer()) + handler.DataStore = store + + return handler, store +} diff --git a/api/http/handler/teammemberships/teammembership_create.go b/api/http/handler/teammemberships/teammembership_create.go index f94d58395..d9acb9b44 100644 --- a/api/http/handler/teammemberships/teammembership_create.go +++ b/api/http/handler/teammemberships/teammembership_create.go @@ -45,7 +45,6 @@ func (payload *teamMembershipCreatePayload) Validate(r *http.Request) error { // @produce json // @param body body teamMembershipCreatePayload true "Team membership details" // @success 200 {object} portainer.TeamMembership "Success" -// @success 204 "Success" // @failure 400 "Invalid request" // @failure 403 "Permission denied to manage memberships" // @failure 409 "Team membership already registered" diff --git a/api/http/handler/teams/team_inspect.go b/api/http/handler/teams/team_inspect.go index ee92b7bc2..857636590 100644 --- a/api/http/handler/teams/team_inspect.go +++ b/api/http/handler/teams/team_inspect.go @@ -21,7 +21,6 @@ import ( // @produce json // @param id path int true "Team identifier" // @success 200 {object} portainer.Team "Success" -// @success 204 "Success" // @failure 400 "Invalid request" // @failure 403 "Permission denied" // @failure 404 "Team not found" diff --git a/api/http/handler/teams/team_update.go b/api/http/handler/teams/team_update.go index 3ad4a3151..5731f294c 100644 --- a/api/http/handler/teams/team_update.go +++ b/api/http/handler/teams/team_update.go @@ -30,7 +30,6 @@ func (payload *teamUpdatePayload) Validate(r *http.Request) error { // @param id path int true "Team identifier" // @param body body teamUpdatePayload true "Team details" // @success 200 {object} portainer.Team "Success" -// @success 204 "Success" // @failure 400 "Invalid request" // @failure 403 "Permission denied" // @failure 404 "Team not found" diff --git a/api/http/handler/templates/handler.go b/api/http/handler/templates/handler.go index e604e8abe..1e7b7f05d 100644 --- a/api/http/handler/templates/handler.go +++ b/api/http/handler/templates/handler.go @@ -29,7 +29,5 @@ func NewHandler(bouncer security.BouncerService) *Handler { bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.templateList))).Methods(http.MethodGet) h.Handle("/templates/{id}/file", bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.templateFile))).Methods(http.MethodPost) - h.Handle("/templates/file", - bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.templateFileOld))).Methods(http.MethodPost) return h } diff --git a/api/http/handler/templates/template_file.go b/api/http/handler/templates/template_file.go index b834eeed9..f9ec0135c 100644 --- a/api/http/handler/templates/template_file.go +++ b/api/http/handler/templates/template_file.go @@ -5,6 +5,7 @@ import ( "slices" portainer "github.com/portainer/portainer/api" + gittypes "github.com/portainer/portainer/api/git/types" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" @@ -71,7 +72,15 @@ func (handler *Handler) templateFile(w http.ResponseWriter, r *http.Request) *ht defer handler.cleanUp(projectPath) - if err := handler.GitService.CloneRepository(projectPath, template.Repository.URL, "", "", "", false); err != nil { + if err := handler.GitService.CloneRepository( + projectPath, + template.Repository.URL, + "", + "", + "", + gittypes.GitCredentialAuthType_Basic, + false, + ); err != nil { return httperror.InternalServerError("Unable to clone git repository", err) } diff --git a/api/http/handler/templates/template_file_old.go b/api/http/handler/templates/template_file_old.go deleted file mode 100644 index 91ac038c5..000000000 --- a/api/http/handler/templates/template_file_old.go +++ /dev/null @@ -1,93 +0,0 @@ -package templates - -import ( - "errors" - "net/http" - - httperror "github.com/portainer/portainer/pkg/libhttp/error" - "github.com/portainer/portainer/pkg/libhttp/request" - "github.com/portainer/portainer/pkg/libhttp/response" - "github.com/rs/zerolog/log" -) - -type filePayload struct { - // URL of a git repository where the file is stored - RepositoryURL string `example:"https://github.com/portainer/portainer-compose" validate:"required"` - // Path to the file inside the git repository - ComposeFilePathInRepository string `example:"./subfolder/docker-compose.yml" validate:"required"` -} - -func (payload *filePayload) Validate(r *http.Request) error { - if len(payload.RepositoryURL) == 0 { - return errors.New("Invalid repository url") - } - - if len(payload.ComposeFilePathInRepository) == 0 { - return errors.New("Invalid file path") - } - - return nil -} - -func (handler *Handler) ifRequestedTemplateExists(payload *filePayload) *httperror.HandlerError { - response, httpErr := handler.fetchTemplates() - if httpErr != nil { - return httpErr - } - - for _, t := range response.Templates { - if t.Repository.URL == payload.RepositoryURL && t.Repository.StackFile == payload.ComposeFilePathInRepository { - return nil - } - } - return httperror.InternalServerError("Invalid template", errors.New("requested template does not exist")) -} - -// @id TemplateFileOld -// @summary Get a template's file -// @deprecated -// @description Get a template's file -// @description **Access policy**: authenticated -// @tags templates -// @security ApiKeyAuth -// @security jwt -// @accept json -// @produce json -// @param body body filePayload true "File details" -// @success 200 {object} fileResponse "Success" -// @failure 400 "Invalid request" -// @failure 500 "Server error" -// @router /templates/file [post] -func (handler *Handler) templateFileOld(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { - log.Warn().Msg("This api is deprecated. Please use /templates/{id}/file instead") - - var payload filePayload - err := request.DecodeAndValidateJSONPayload(r, &payload) - if err != nil { - return httperror.BadRequest("Invalid request payload", err) - } - - if err := handler.ifRequestedTemplateExists(&payload); err != nil { - return err - } - - projectPath, err := handler.FileService.GetTemporaryPath() - if err != nil { - return httperror.InternalServerError("Unable to create temporary folder", err) - } - - defer handler.cleanUp(projectPath) - - err = handler.GitService.CloneRepository(projectPath, payload.RepositoryURL, "", "", "", false) - if err != nil { - return httperror.InternalServerError("Unable to clone git repository", err) - } - - fileContent, err := handler.FileService.GetFileContent(projectPath, payload.ComposeFilePathInRepository) - if err != nil { - return httperror.InternalServerError("Failed loading file content", err) - } - - return response.JSON(w, fileResponse{FileContent: string(fileContent)}) - -} diff --git a/api/http/handler/templates/utils_fetch_templates.go b/api/http/handler/templates/utils_fetch_templates.go index 6feb9edeb..fc5c97125 100644 --- a/api/http/handler/templates/utils_fetch_templates.go +++ b/api/http/handler/templates/utils_fetch_templates.go @@ -4,7 +4,9 @@ import ( "net/http" portainer "github.com/portainer/portainer/api" + libclient "github.com/portainer/portainer/pkg/libhttp/client" httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/rs/zerolog/log" "github.com/segmentio/encoding/json" ) @@ -24,18 +26,27 @@ func (handler *Handler) fetchTemplates() (*listResponse, *httperror.HandlerError templatesURL = portainer.DefaultTemplatesURL } + var body *listResponse + if err := libclient.ExternalRequestDisabled(templatesURL); err != nil { + if templatesURL == portainer.DefaultTemplatesURL { + log.Debug().Err(err).Msg("External request disabled: Default templates") + return body, nil + } + } + resp, err := http.Get(templatesURL) if err != nil { return nil, httperror.InternalServerError("Unable to retrieve templates via the network", err) } defer resp.Body.Close() - var body *listResponse - err = json.NewDecoder(resp.Body).Decode(&body) - if err != nil { + if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { return nil, httperror.InternalServerError("Unable to parse template file", err) } + for i := range body.Templates { + body.Templates[i].ID = portainer.TemplateID(i + 1) + } return body, nil } diff --git a/api/http/handler/users/handler.go b/api/http/handler/users/handler.go index c0abba8c4..54be6a24e 100644 --- a/api/http/handler/users/handler.go +++ b/api/http/handler/users/handler.go @@ -52,12 +52,12 @@ func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimit teamLeaderRouter := h.NewRoute().Subrouter() teamLeaderRouter.Use(bouncer.TeamLeaderAccess) - restrictedRouter := h.NewRoute().Subrouter() - restrictedRouter.Use(bouncer.RestrictedAccess) - authenticatedRouter := h.NewRoute().Subrouter() authenticatedRouter.Use(bouncer.AuthenticatedAccess) + restrictedRouter := h.NewRoute().Subrouter() + restrictedRouter.Use(bouncer.RestrictedAccess) + publicRouter := h.NewRoute().Subrouter() publicRouter.Use(bouncer.PublicAccess) @@ -65,7 +65,6 @@ func NewHandler(bouncer security.BouncerService, rateLimiter *security.RateLimit restrictedRouter.Handle("/users", httperror.LoggerHandler(h.userList)).Methods(http.MethodGet) authenticatedRouter.Handle("/users/me", httperror.LoggerHandler(h.userInspectMe)).Methods(http.MethodGet) - restrictedRouter.Handle("/users/me", httperror.LoggerHandler(h.userInspectMe)).Methods(http.MethodGet) restrictedRouter.Handle("/users/{id}", httperror.LoggerHandler(h.userInspect)).Methods(http.MethodGet) authenticatedRouter.Handle("/users/{id}", httperror.LoggerHandler(h.userUpdate)).Methods(http.MethodPut) adminRouter.Handle("/users/{id}", httperror.LoggerHandler(h.userDelete)).Methods(http.MethodDelete) diff --git a/api/http/handler/users/user_create_access_token.go b/api/http/handler/users/user_create_access_token.go index a2e63cf5c..aa10f6fe0 100644 --- a/api/http/handler/users/user_create_access_token.go +++ b/api/http/handler/users/user_create_access_token.go @@ -11,8 +11,7 @@ import ( httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" "github.com/portainer/portainer/pkg/libhttp/response" - - "github.com/asaskevich/govalidator" + "github.com/portainer/portainer/pkg/validate" ) type userAccessTokenCreatePayload struct { @@ -24,10 +23,10 @@ func (payload *userAccessTokenCreatePayload) Validate(r *http.Request) error { if len(payload.Description) == 0 { return errors.New("invalid description: cannot be empty") } - if govalidator.HasWhitespaceOnly(payload.Description) { + if validate.HasWhitespaceOnly(payload.Description) { return errors.New("invalid description: cannot contain only whitespaces") } - if govalidator.MinStringLength(payload.Description, "128") { + if validate.MinStringLength(payload.Description, 128) { return errors.New("invalid description: cannot be longer than 128 characters") } return nil @@ -50,7 +49,7 @@ type accessTokenResponse struct { // @produce json // @param id path int true "User identifier" // @param body body userAccessTokenCreatePayload true "details" -// @success 200 {object} accessTokenResponse "Created" +// @success 200 {object} accessTokenResponse "Success" // @failure 400 "Invalid request" // @failure 401 "Unauthorized" // @failure 403 "Permission denied" @@ -115,7 +114,7 @@ func (handler *Handler) userCreateAccessToken(w http.ResponseWriter, r *http.Req return httperror.InternalServerError("Internal Server Error", err) } - return response.JSONWithStatus(w, accessTokenResponse{rawAPIKey, *apiKey}, http.StatusCreated) + return response.JSONWithStatus(w, accessTokenResponse{rawAPIKey, *apiKey}, http.StatusOK) } func (handler *Handler) usesInternalAuthentication(userid portainer.UserID) (bool, error) { diff --git a/api/http/handler/users/user_create_access_token_test.go b/api/http/handler/users/user_create_access_token_test.go index 5199366a2..b7e49d96b 100644 --- a/api/http/handler/users/user_create_access_token_test.go +++ b/api/http/handler/users/user_create_access_token_test.go @@ -60,7 +60,7 @@ func Test_userCreateAccessToken(t *testing.T) { rr := httptest.NewRecorder() h.ServeHTTP(rr, req) - is.Equal(http.StatusCreated, rr.Code) + is.Equal(http.StatusOK, rr.Code) body, err := io.ReadAll(rr.Body) is.NoError(err, "ReadAll should not return error") diff --git a/api/http/handler/webhooks/webhook_create.go b/api/http/handler/webhooks/webhook_create.go index b69e93db3..d7edde333 100644 --- a/api/http/handler/webhooks/webhook_create.go +++ b/api/http/handler/webhooks/webhook_create.go @@ -80,7 +80,7 @@ func (handler *Handler) webhookCreate(w http.ResponseWriter, r *http.Request) *h return httperror.InternalServerError("Unable to retrieve user authentication token", err) } - _, err = access.GetAccessibleRegistry(handler.DataStore, tokenData.ID, endpointID, payload.RegistryID) + _, err = access.GetAccessibleRegistry(handler.DataStore, nil, tokenData.ID, endpointID, payload.RegistryID) if err != nil { return httperror.Forbidden("Permission deny to access registry", err) } diff --git a/api/http/handler/webhooks/webhook_update.go b/api/http/handler/webhooks/webhook_update.go index 7a026fcd7..94133c49a 100644 --- a/api/http/handler/webhooks/webhook_update.go +++ b/api/http/handler/webhooks/webhook_update.go @@ -69,7 +69,7 @@ func (handler *Handler) webhookUpdate(w http.ResponseWriter, r *http.Request) *h return httperror.InternalServerError("Unable to retrieve user authentication token", err) } - _, err = access.GetAccessibleRegistry(handler.DataStore, tokenData.ID, webhook.EndpointID, payload.RegistryID) + _, err = access.GetAccessibleRegistry(handler.DataStore, nil, tokenData.ID, webhook.EndpointID, payload.RegistryID) if err != nil { return httperror.Forbidden("Permission deny to access registry", err) } diff --git a/api/http/handler/websocket/attach.go b/api/http/handler/websocket/attach.go index d0cb7746f..96e228418 100644 --- a/api/http/handler/websocket/attach.go +++ b/api/http/handler/websocket/attach.go @@ -9,8 +9,8 @@ import ( "github.com/portainer/portainer/api/ws" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/gorilla/websocket" ) @@ -38,7 +38,7 @@ func (handler *Handler) websocketAttach(w http.ResponseWriter, r *http.Request) if err != nil { return httperror.BadRequest("Invalid query parameter: id", err) } - if !govalidator.IsHexadecimal(attachID) { + if !validate.IsHexadecimal(attachID) { return httperror.BadRequest("Invalid query parameter: id (must be hexadecimal identifier)", err) } diff --git a/api/http/handler/websocket/exec.go b/api/http/handler/websocket/exec.go index ab04b0702..aef5861c8 100644 --- a/api/http/handler/websocket/exec.go +++ b/api/http/handler/websocket/exec.go @@ -8,8 +8,8 @@ import ( "github.com/portainer/portainer/api/ws" httperror "github.com/portainer/portainer/pkg/libhttp/error" "github.com/portainer/portainer/pkg/libhttp/request" + "github.com/portainer/portainer/pkg/validate" - "github.com/asaskevich/govalidator" "github.com/gorilla/websocket" "github.com/segmentio/encoding/json" ) @@ -42,7 +42,7 @@ func (handler *Handler) websocketExec(w http.ResponseWriter, r *http.Request) *h if err != nil { return httperror.BadRequest("Invalid query parameter: id", err) } - if !govalidator.IsHexadecimal(execID) { + if !validate.IsHexadecimal(execID) { return httperror.BadRequest("Invalid query parameter: id (must be hexadecimal identifier)", err) } diff --git a/api/http/handler/websocket/initdial.go b/api/http/handler/websocket/initdial.go index e9160ccf1..1be0e496f 100644 --- a/api/http/handler/websocket/initdial.go +++ b/api/http/handler/websocket/initdial.go @@ -21,16 +21,14 @@ func initDial(endpoint *portainer.Endpoint) (net.Conn, error) { host = url.Path } - if endpoint.TLSConfig.TLS { - tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) - if err != nil { - return nil, err - } - - return tls.Dial(url.Scheme, host, tlsConfig) + if !endpoint.TLSConfig.TLS { + return createDial(url.Scheme, host) } - con, err := createDial(url.Scheme, host) + tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) + if err != nil { + return nil, err + } - return con, err + return tls.Dial(url.Scheme, host, tlsConfig) } diff --git a/api/http/handler/websocket/initdial_test.go b/api/http/handler/websocket/initdial_test.go new file mode 100644 index 000000000..3179389f0 --- /dev/null +++ b/api/http/handler/websocket/initdial_test.go @@ -0,0 +1,64 @@ +package websocket + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + portainer "github.com/portainer/portainer/api" + + "github.com/stretchr/testify/require" +) + +func TestInitDial(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + tlsSrv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer tlsSrv.Close() + + f := func(srvURL string) { + u, err := url.Parse(srvURL) + require.NoError(t, err) + + isTLS := u.Scheme == "https" + + u.Scheme = "tcp" + + endpoint := &portainer.Endpoint{ + URL: u.String(), + TLSConfig: portainer.TLSConfiguration{ + TLS: isTLS, + TLSSkipVerify: true, + }, + } + + // Valid configuration + conn, err := initDial(endpoint) + require.NoError(t, err) + require.NotNil(t, conn) + + err = conn.Close() + require.NoError(t, err) + + if !isTLS { + return + } + + // Invalid TLS configuration + endpoint.TLSConfig.TLSCertPath = "/invalid/path/client.crt" + endpoint.TLSConfig.TLSKeyPath = "/invalid/path/client.key" + + conn, err = initDial(endpoint) + require.Error(t, err) + require.Nil(t, conn) + } + + f(srv.URL) + f(tlsSrv.URL) +} diff --git a/api/http/handler/websocket/proxy.go b/api/http/handler/websocket/proxy.go index c8ee8b82b..9233031ea 100644 --- a/api/http/handler/websocket/proxy.go +++ b/api/http/handler/websocket/proxy.go @@ -73,10 +73,7 @@ func (handler *Handler) doProxyWebsocketRequest( proxy.Dialer = &proxyDialer if enableTLS { - tlsConfig := crypto.CreateTLSConfiguration() - tlsConfig.InsecureSkipVerify = params.endpoint.TLSConfig.TLSSkipVerify - - proxyDialer.TLSClientConfig = tlsConfig + proxyDialer.TLSClientConfig = crypto.CreateTLSConfiguration(params.endpoint.TLSConfig.TLSSkipVerify) } signature, err := handler.SignatureService.CreateSignature(portainer.PortainerAgentSignatureMessage) diff --git a/api/http/middlewares/endpoint.go b/api/http/middlewares/endpoint.go index c88731dd3..0050e4300 100644 --- a/api/http/middlewares/endpoint.go +++ b/api/http/middlewares/endpoint.go @@ -25,12 +25,12 @@ type key int const contextEndpoint key = 0 func WithEndpoint(endpointService dataservices.EndpointService, endpointIDParam string) mux.MiddlewareFunc { + if endpointIDParam == "" { + endpointIDParam = "id" + } + return func(next http.Handler) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, request *http.Request) { - if endpointIDParam == "" { - endpointIDParam = "id" - } - endpointID, err := requesthelpers.RetrieveNumericRouteVariableValue(request, endpointIDParam) if err != nil { httperror.WriteError(rw, http.StatusBadRequest, "Invalid environment identifier route variable", err) @@ -51,7 +51,6 @@ func WithEndpoint(endpointService dataservices.EndpointService, endpointIDParam ctx := context.WithValue(request.Context(), contextEndpoint, endpoint) next.ServeHTTP(rw, request.WithContext(ctx)) - }) } } diff --git a/api/http/middlewares/panic_logger.go b/api/http/middlewares/panic_logger.go new file mode 100644 index 000000000..6f3b2076f --- /dev/null +++ b/api/http/middlewares/panic_logger.go @@ -0,0 +1,25 @@ +package middlewares + +import ( + "net/http" + "runtime/debug" + + "github.com/rs/zerolog/log" +) + +func WithPanicLogger(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Error(). + Any("panic", err). + Str("method", req.Method). + Str("url", req.URL.String()). + Str("stack", string(debug.Stack())). + Msg("Panic in request handler") + } + }() + + next.ServeHTTP(w, req) + }) +} diff --git a/api/http/middlewares/plaintext_http_request.go b/api/http/middlewares/plaintext_http_request.go new file mode 100644 index 000000000..e746fd819 --- /dev/null +++ b/api/http/middlewares/plaintext_http_request.go @@ -0,0 +1,76 @@ +package middlewares + +import ( + "net/http" + "slices" + "strings" + + "github.com/gorilla/csrf" +) + +var ( + // Idempotent (safe) methods as defined by RFC7231 section 4.2.2. + safeMethods = []string{"GET", "HEAD", "OPTIONS", "TRACE"} +) + +type plainTextHTTPRequestHandler struct { + next http.Handler +} + +// parseForwardedHeaderProto parses the Forwarded header and extracts the protocol. +// The Forwarded header format supports: +// - Single proxy: Forwarded: by=;for=;host=;proto= +// - Multiple proxies: Forwarded: for=192.0.2.43, for=198.51.100.17 +// We take the first (leftmost) entry as it represents the original client +func parseForwardedHeaderProto(forwarded string) string { + if forwarded == "" { + return "" + } + + // Parse the first part (leftmost proxy, closest to original client) + firstPart, _, _ := strings.Cut(forwarded, ",") + firstPart = strings.TrimSpace(firstPart) + + // Split by semicolon to get key-value pairs within this proxy entry + // Format: key=value;key=value;key=value + pairs := strings.Split(firstPart, ";") + for _, pair := range pairs { + // Split by equals sign to separate key and value + key, value, found := strings.Cut(pair, "=") + if !found { + continue + } + + if strings.EqualFold(strings.TrimSpace(key), "proto") { + return strings.Trim(strings.TrimSpace(value), `"'`) + } + } + + return "" +} + +// isHTTPSRequest checks if the original request was made over HTTPS +// by examining both X-Forwarded-Proto and Forwarded headers +func isHTTPSRequest(r *http.Request) bool { + return strings.EqualFold(r.Header.Get("X-Forwarded-Proto"), "https") || + strings.EqualFold(parseForwardedHeaderProto(r.Header.Get("Forwarded")), "https") +} + +func (h *plainTextHTTPRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if slices.Contains(safeMethods, r.Method) { + h.next.ServeHTTP(w, r) + return + } + + req := r + // If original request was HTTPS (via proxy), keep CSRF checks. + if !isHTTPSRequest(r) { + req = csrf.PlaintextHTTPRequest(r) + } + + h.next.ServeHTTP(w, req) +} + +func PlaintextHTTPRequest(next http.Handler) http.Handler { + return &plainTextHTTPRequestHandler{next: next} +} diff --git a/api/http/middlewares/plaintext_http_request_test.go b/api/http/middlewares/plaintext_http_request_test.go new file mode 100644 index 000000000..33912be80 --- /dev/null +++ b/api/http/middlewares/plaintext_http_request_test.go @@ -0,0 +1,173 @@ +package middlewares + +import ( + "testing" +) + +var tests = []struct { + name string + forwarded string + expected string +}{ + { + name: "empty header", + forwarded: "", + expected: "", + }, + { + name: "single proxy with proto=https", + forwarded: "proto=https", + expected: "https", + }, + { + name: "single proxy with proto=http", + forwarded: "proto=http", + expected: "http", + }, + { + name: "single proxy with multiple directives", + forwarded: "for=192.0.2.60;proto=https;by=203.0.113.43", + expected: "https", + }, + { + name: "single proxy with proto in middle", + forwarded: "for=192.0.2.60;proto=https;host=example.com", + expected: "https", + }, + { + name: "single proxy with proto at end", + forwarded: "for=192.0.2.60;host=example.com;proto=https", + expected: "https", + }, + { + name: "multiple proxies - takes first", + forwarded: "proto=https, proto=http", + expected: "https", + }, + { + name: "multiple proxies with complex format", + forwarded: "for=192.0.2.43;proto=https, for=198.51.100.17;proto=http", + expected: "https", + }, + { + name: "multiple proxies with for directive only", + forwarded: "for=192.0.2.43, for=198.51.100.17", + expected: "", + }, + { + name: "multiple proxies with proto only in second", + forwarded: "for=192.0.2.43, proto=https", + expected: "", + }, + { + name: "multiple proxies with proto only in first", + forwarded: "proto=https, for=198.51.100.17", + expected: "https", + }, + { + name: "quoted protocol value", + forwarded: "proto=\"https\"", + expected: "https", + }, + { + name: "single quoted protocol value", + forwarded: "proto='https'", + expected: "https", + }, + { + name: "mixed case protocol", + forwarded: "proto=HTTPS", + expected: "HTTPS", + }, + { + name: "no proto directive", + forwarded: "for=192.0.2.60;by=203.0.113.43", + expected: "", + }, + { + name: "empty proto value", + forwarded: "proto=", + expected: "", + }, + { + name: "whitespace around values", + forwarded: " proto = https ", + expected: "https", + }, + { + name: "whitespace around semicolons", + forwarded: "for=192.0.2.60 ; proto=https ; by=203.0.113.43", + expected: "https", + }, + { + name: "whitespace around commas", + forwarded: "proto=https , proto=http", + expected: "https", + }, + { + name: "IPv6 address in for directive", + forwarded: "for=\"[2001:db8:cafe::17]:4711\";proto=https", + expected: "https", + }, + { + name: "complex multiple proxies with IPv6", + forwarded: "for=192.0.2.43;proto=https, for=\"[2001:db8:cafe::17]\";proto=http", + expected: "https", + }, + { + name: "obfuscated identifiers", + forwarded: "for=_mdn;proto=https", + expected: "https", + }, + { + name: "unknown identifier", + forwarded: "for=unknown;proto=https", + expected: "https", + }, + { + name: "malformed key-value pair", + forwarded: "proto", + expected: "", + }, + { + name: "malformed key-value pair with equals", + forwarded: "proto=", + expected: "", + }, + { + name: "multiple equals signs", + forwarded: "proto=https=extra", + expected: "https=extra", + }, + { + name: "mixed case directive name", + forwarded: "PROTO=https", + expected: "https", + }, + { + name: "mixed case directive name with spaces", + forwarded: " Proto = https ", + expected: "https", + }, +} + +func TestParseForwardedHeaderProto(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseForwardedHeaderProto(tt.forwarded) + if result != tt.expected { + t.Errorf("parseForwardedHeader(%q) = %q, want %q", tt.forwarded, result, tt.expected) + } + }) + } +} + +func FuzzParseForwardedHeaderProto(f *testing.F) { + for _, t := range tests { + f.Add(t.forwarded) + } + + f.Fuzz(func(t *testing.T, forwarded string) { + parseForwardedHeaderProto(forwarded) + }) +} diff --git a/api/http/middlewares/withitem.go b/api/http/middlewares/withitem.go index 34e6a9ba2..fce95e86b 100644 --- a/api/http/middlewares/withitem.go +++ b/api/http/middlewares/withitem.go @@ -45,7 +45,7 @@ func WithItem[TId ~int, TObject any](getter ItemGetter[TId, TObject], idParam st } } -func FetchItem[T any](request *http.Request, contextKey string) (*T, error) { +func FetchItem[T any](request *http.Request, contextKey ItemContextKey) (*T, error) { contextData := request.Context().Value(contextKey) if contextData == nil { return nil, errors.New("unable to find item in request context") diff --git a/api/http/models/kubernetes/application.go b/api/http/models/kubernetes/application.go index fcb49b23d..4759d9214 100644 --- a/api/http/models/kubernetes/application.go +++ b/api/http/models/kubernetes/application.go @@ -38,14 +38,30 @@ type K8sApplication struct { Labels map[string]string `json:"Labels,omitempty"` Resource K8sApplicationResource `json:"Resource,omitempty"` HorizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler `json:"HorizontalPodAutoscaler,omitempty"` + CustomResourceMetadata CustomResourceMetadata `json:"CustomResourceMetadata,omitempty"` } type Metadata struct { Labels map[string]string `json:"labels"` } +type CustomResourceMetadata struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + Plural string `json:"plural"` +} + type Pod struct { - Status string `json:"Status"` + Name string `json:"Name"` + ContainerName string `json:"ContainerName"` + Image string `json:"Image"` + ImagePullPolicy string `json:"ImagePullPolicy"` + Status string `json:"Status"` + NodeName string `json:"NodeName"` + PodIP string `json:"PodIP"` + UID string `json:"Uid"` + Resource K8sApplicationResource `json:"Resource,omitempty"` + CreationDate time.Time `json:"CreationDate"` } type Configuration struct { @@ -72,8 +88,8 @@ type TLSInfo struct { // Existing types type K8sApplicationResource struct { - CPURequest float64 `json:"CpuRequest"` - CPULimit float64 `json:"CpuLimit"` - MemoryRequest int64 `json:"MemoryRequest"` - MemoryLimit int64 `json:"MemoryLimit"` + CPURequest float64 `json:"CpuRequest,omitempty"` + CPULimit float64 `json:"CpuLimit,omitempty"` + MemoryRequest int64 `json:"MemoryRequest,omitempty"` + MemoryLimit int64 `json:"MemoryLimit,omitempty"` } diff --git a/api/http/models/kubernetes/event.go b/api/http/models/kubernetes/event.go new file mode 100644 index 000000000..be447b554 --- /dev/null +++ b/api/http/models/kubernetes/event.go @@ -0,0 +1,25 @@ +package kubernetes + +import "time" + +type K8sEvent struct { + Type string `json:"type"` + Name string `json:"name"` + Reason string `json:"reason"` + Message string `json:"message"` + Namespace string `json:"namespace"` + EventTime time.Time `json:"eventTime"` + Kind string `json:"kind,omitempty"` + Count int32 `json:"count"` + FirstTimestamp *time.Time `json:"firstTimestamp,omitempty"` + LastTimestamp *time.Time `json:"lastTimestamp,omitempty"` + UID string `json:"uid"` + InvolvedObjectKind K8sEventInvolvedObject `json:"involvedObject"` +} + +type K8sEventInvolvedObject struct { + Kind string `json:"kind,omitempty"` + UID string `json:"uid"` + Name string `json:"name"` + Namespace string `json:"namespace"` +} diff --git a/api/http/models/kubernetes/services.go b/api/http/models/kubernetes/services.go index 04ac27bf3..bcbddf3ae 100644 --- a/api/http/models/kubernetes/services.go +++ b/api/http/models/kubernetes/services.go @@ -35,8 +35,8 @@ type ( } K8sServiceIngress struct { - IP string `json:"IP"` - Host string `json:"Host"` + IP string `json:"IP"` + Hostname string `json:"Hostname"` } // K8sServiceDeleteRequests is a mapping of namespace names to a slice of diff --git a/api/http/proxy/factory/agent.go b/api/http/proxy/factory/agent.go index bd08efd6c..ca74d2228 100644 --- a/api/http/proxy/factory/agent.go +++ b/api/http/proxy/factory/agent.go @@ -43,7 +43,7 @@ func (factory *ProxyFactory) NewAgentProxy(endpoint *portainer.Endpoint) (*Proxy httpTransport := &http.Transport{} if endpoint.TLSConfig.TLS || endpoint.TLSConfig.TLSSkipVerify { - config, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) + config, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) if err != nil { return nil, errors.WithMessage(err, "failed generating tls configuration") } @@ -52,7 +52,7 @@ func (factory *ProxyFactory) NewAgentProxy(endpoint *portainer.Endpoint) (*Proxy endpointURL.Scheme = "https" } - proxy := newSingleHostReverseProxyWithHostHeader(endpointURL) + proxy := NewSingleHostReverseProxyWithHostHeader(endpointURL) proxy.Transport = agent.NewTransport(factory.signatureService, httpTransport) @@ -63,8 +63,7 @@ func (factory *ProxyFactory) NewAgentProxy(endpoint *portainer.Endpoint) (*Proxy Port: 0, } - err = proxyServer.start() - if err != nil { + if err := proxyServer.start(); err != nil { return nil, errors.Wrap(err, "failed starting proxy server") } diff --git a/api/http/proxy/factory/azure.go b/api/http/proxy/factory/azure.go index eba2fbd6a..e210dec2a 100644 --- a/api/http/proxy/factory/azure.go +++ b/api/http/proxy/factory/azure.go @@ -15,7 +15,7 @@ func newAzureProxy(endpoint *portainer.Endpoint, dataStore dataservices.DataStor return nil, err } - proxy := newSingleHostReverseProxyWithHostHeader(remoteURL) + proxy := NewSingleHostReverseProxyWithHostHeader(remoteURL) proxy.Transport = azure.NewTransport(&endpoint.AzureCredentials, dataStore, endpoint) return proxy, nil } diff --git a/api/http/proxy/factory/docker.go b/api/http/proxy/factory/docker.go index e558bebb9..d832b3678 100644 --- a/api/http/proxy/factory/docker.go +++ b/api/http/proxy/factory/docker.go @@ -50,7 +50,7 @@ func (factory *ProxyFactory) newDockerHTTPProxy(endpoint *portainer.Endpoint) (h httpTransport := &http.Transport{} if endpoint.TLSConfig.TLS || endpoint.TLSConfig.TLSSkipVerify { - config, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) + config, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) if err != nil { return nil, err } @@ -72,7 +72,7 @@ func (factory *ProxyFactory) newDockerHTTPProxy(endpoint *portainer.Endpoint) (h return nil, err } - proxy := newSingleHostReverseProxyWithHostHeader(endpointURL) + proxy := NewSingleHostReverseProxyWithHostHeader(endpointURL) proxy.Transport = dockerTransport return proxy, nil } diff --git a/api/http/proxy/factory/docker/access_control.go b/api/http/proxy/factory/docker/access_control.go index e945d38da..ac25a7b7a 100644 --- a/api/http/proxy/factory/docker/access_control.go +++ b/api/http/proxy/factory/docker/access_control.go @@ -35,7 +35,7 @@ type ( func getUniqueElements(items string) []string { xs := strings.Split(items, ",") xs = slicesx.Map(xs, strings.TrimSpace) - xs = slicesx.Filter(xs, func(x string) bool { return len(x) > 0 }) + xs = slicesx.FilterInPlace(xs, func(x string) bool { return len(x) > 0 }) return slicesx.Unique(xs) } diff --git a/api/http/proxy/factory/docker/networks.go b/api/http/proxy/factory/docker/networks.go index 95c96df81..cd94478d2 100644 --- a/api/http/proxy/factory/docker/networks.go +++ b/api/http/proxy/factory/docker/networks.go @@ -6,7 +6,7 @@ import ( portainer "github.com/portainer/portainer/api" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" @@ -20,7 +20,7 @@ const ( ) func getInheritedResourceControlFromNetworkLabels(dockerClient *client.Client, endpointID portainer.EndpointID, networkID string, resourceControls []portainer.ResourceControl) (*portainer.ResourceControl, error) { - network, err := dockerClient.NetworkInspect(context.Background(), networkID, types.NetworkInspectOptions{}) + network, err := dockerClient.NetworkInspect(context.Background(), networkID, network.InspectOptions{}) if err != nil { return nil, err } diff --git a/api/http/proxy/factory/docker/registry.go b/api/http/proxy/factory/docker/registry.go index ecf7935f1..7036853c7 100644 --- a/api/http/proxy/factory/docker/registry.go +++ b/api/http/proxy/factory/docker/registry.go @@ -55,12 +55,13 @@ func createRegistryAuthenticationHeader( return } - if err = registryutils.EnsureRegTokenValid(dataStore, matchingRegistry); err != nil { + if err = registryutils.PrepareRegistryCredentials(dataStore, matchingRegistry); err != nil { return } authenticationHeader.Serveraddress = matchingRegistry.URL - authenticationHeader.Username, authenticationHeader.Password, err = registryutils.GetRegEffectiveCredential(matchingRegistry) + authenticationHeader.Username = matchingRegistry.Username + authenticationHeader.Password = matchingRegistry.Password return } diff --git a/api/http/proxy/factory/docker/transport.go b/api/http/proxy/factory/docker/transport.go index 49f1cd501..dae72ecc1 100644 --- a/api/http/proxy/factory/docker/transport.go +++ b/api/http/proxy/factory/docker/transport.go @@ -15,6 +15,7 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" + gittypes "github.com/portainer/portainer/api/git/types" "github.com/portainer/portainer/api/http/proxy/factory/utils" "github.com/portainer/portainer/api/http/security" "github.com/portainer/portainer/api/internal/authorization" @@ -418,7 +419,14 @@ func (transport *Transport) updateDefaultGitBranch(request *http.Request) error } repositoryURL := remote[:len(remote)-4] - latestCommitID, err := transport.gitService.LatestCommitID(repositoryURL, "", "", "", false) + latestCommitID, err := transport.gitService.LatestCommitID( + repositoryURL, + "", + "", + "", + gittypes.GitCredentialAuthType_Basic, + false, + ) if err != nil { return err } diff --git a/api/http/proxy/factory/docker/volumes.go b/api/http/proxy/factory/docker/volumes.go index aae0a4602..858a940d8 100644 --- a/api/http/proxy/factory/docker/volumes.go +++ b/api/http/proxy/factory/docker/volumes.go @@ -224,7 +224,7 @@ func (transport *Transport) getDockerID() (string, error) { if transport.snapshotService != nil { endpoint := portainer.Endpoint{ID: transport.endpoint.ID} - if err := transport.snapshotService.FillSnapshotData(&endpoint); err == nil && len(endpoint.Snapshots) > 0 { + if err := transport.snapshotService.FillSnapshotData(&endpoint, true); err == nil && len(endpoint.Snapshots) > 0 { if dockerID, err := snapshot.FetchDockerID(endpoint.Snapshots[0]); err == nil { transport.dockerID = dockerID return dockerID, nil diff --git a/api/http/proxy/factory/factory.go b/api/http/proxy/factory/factory.go index 28d05dec5..b45629630 100644 --- a/api/http/proxy/factory/factory.go +++ b/api/http/proxy/factory/factory.go @@ -24,11 +24,12 @@ type ( kubernetesTokenCacheManager *kubernetes.TokenCacheManager gitService portainer.GitService snapshotService portainer.SnapshotService + jwtService portainer.JWTService } ) // NewProxyFactory returns a pointer to a new instance of a ProxyFactory -func NewProxyFactory(dataStore dataservices.DataStore, signatureService portainer.DigitalSignatureService, tunnelService portainer.ReverseTunnelService, clientFactory *dockerclient.ClientFactory, kubernetesClientFactory *cli.ClientFactory, kubernetesTokenCacheManager *kubernetes.TokenCacheManager, gitService portainer.GitService, snapshotService portainer.SnapshotService) *ProxyFactory { +func NewProxyFactory(dataStore dataservices.DataStore, signatureService portainer.DigitalSignatureService, tunnelService portainer.ReverseTunnelService, clientFactory *dockerclient.ClientFactory, kubernetesClientFactory *cli.ClientFactory, kubernetesTokenCacheManager *kubernetes.TokenCacheManager, gitService portainer.GitService, snapshotService portainer.SnapshotService, jwtService portainer.JWTService) *ProxyFactory { return &ProxyFactory{ dataStore: dataStore, signatureService: signatureService, @@ -38,6 +39,7 @@ func NewProxyFactory(dataStore dataservices.DataStore, signatureService portaine kubernetesTokenCacheManager: kubernetesTokenCacheManager, gitService: gitService, snapshotService: snapshotService, + jwtService: jwtService, } } diff --git a/api/http/proxy/factory/github/client.go b/api/http/proxy/factory/github/client.go new file mode 100644 index 000000000..74dcfb994 --- /dev/null +++ b/api/http/proxy/factory/github/client.go @@ -0,0 +1,108 @@ +package github + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/segmentio/encoding/json" + "oras.land/oras-go/v2/registry/remote/retry" +) + +const GitHubAPIHost = "https://api.github.com" + +// Package represents a GitHub container package +type Package struct { + Name string `json:"name"` + Owner struct { + Login string `json:"login"` + } `json:"owner"` +} + +// Client represents a GitHub API client +type Client struct { + httpClient *http.Client + baseURL string +} + +// NewClient creates a new GitHub API client +func NewClient(token string) *Client { + return &Client{ + httpClient: NewHTTPClient(token), + baseURL: GitHubAPIHost, + } +} + +// GetContainerPackages fetches container packages for the configured namespace +// It's a small http client wrapper instead of using the github client because listing repositories is the only known operation that isn't directly supported by oras +func (c *Client) GetContainerPackages(ctx context.Context, useOrganisation bool, organisationName string) ([]string, error) { + // Determine the namespace (user or organisation) for the request + namespace := "user" + if useOrganisation { + namespace = "orgs/" + organisationName + } + + // Build the full URL for listing container packages + url := fmt.Sprintf("%s/%s/packages?package_type=container", c.baseURL, namespace) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GitHub API returned status %d: %s", resp.StatusCode, resp.Status) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + var packages []Package + if err := json.Unmarshal(body, &packages); err != nil { + return nil, fmt.Errorf("failed to parse response: %w", err) + } + + // Extract repository names in the form "owner/name" + repositories := make([]string, len(packages)) + for i, pkg := range packages { + repositories[i] = fmt.Sprintf("%s/%s", strings.ToLower(pkg.Owner.Login), strings.ToLower(pkg.Name)) + } + + return repositories, nil +} + +// NewHTTPClient creates a new HTTP client configured for GitHub API requests +func NewHTTPClient(token string) *http.Client { + return &http.Client{ + Transport: &tokenTransport{ + token: token, + transport: retry.NewTransport(&http.Transport{}), // Use ORAS retry transport for consistent rate limiting and error handling + }, + Timeout: 1 * time.Minute, + } +} + +// tokenTransport automatically adds the Bearer token header to requests +type tokenTransport struct { + token string + transport http.RoundTripper +} + +func (t *tokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.token != "" { + req.Header.Set("Authorization", "Bearer "+t.token) + req.Header.Set("Accept", "application/vnd.github+json") + } + return t.transport.RoundTrip(req) +} diff --git a/api/http/proxy/factory/gitlab.go b/api/http/proxy/factory/gitlab.go index cbe28411c..7c1ae96e3 100644 --- a/api/http/proxy/factory/gitlab.go +++ b/api/http/proxy/factory/gitlab.go @@ -13,7 +13,7 @@ func newGitlabProxy(uri string) (http.Handler, error) { return nil, err } - proxy := newSingleHostReverseProxyWithHostHeader(url) + proxy := NewSingleHostReverseProxyWithHostHeader(url) proxy.Transport = gitlab.NewTransport() return proxy, nil } diff --git a/api/http/proxy/factory/gitlab/client.go b/api/http/proxy/factory/gitlab/client.go new file mode 100644 index 000000000..13d07e18b --- /dev/null +++ b/api/http/proxy/factory/gitlab/client.go @@ -0,0 +1,130 @@ +package gitlab + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/segmentio/encoding/json" + "oras.land/oras-go/v2/registry/remote/retry" +) + +// Repository represents a GitLab registry repository +type Repository struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + ProjectID int `json:"project_id"` + Location string `json:"location"` + CreatedAt string `json:"created_at"` + Status string `json:"status"` +} + +// Client represents a GitLab API client +type Client struct { + httpClient *http.Client + baseURL string +} + +// NewClient creates a new GitLab API client +// it currently is an http client because only GetRegistryRepositoryNames is needed (oras supports other commands). +// if we need to support other commands, consider using the gitlab client library. +func NewClient(baseURL, token string) *Client { + return &Client{ + httpClient: NewHTTPClient(token), + baseURL: baseURL, + } +} + +// GetRegistryRepositoryNames fetches registry repository names for a given project. +// It's a small http client wrapper instead of using the gitlab client library because listing repositories is the only known operation that isn't directly supported by oras +func (c *Client) GetRegistryRepositoryNames(ctx context.Context, projectID int) ([]string, error) { + url := fmt.Sprintf("%s/api/v4/projects/%d/registry/repositories", c.baseURL, projectID) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GitLab API returned status %d: %s", resp.StatusCode, resp.Status) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + var repositories []Repository + if err := json.Unmarshal(body, &repositories); err != nil { + return nil, fmt.Errorf("failed to parse response: %w", err) + } + + // Extract repository names + names := make([]string, len(repositories)) + for i, repo := range repositories { + // the full path is required for further repo operations + names[i] = repo.Path + } + + return names, nil +} + +type Transport struct { + httpTransport *http.Transport +} + +// NewTransport returns a pointer to a new instance of Transport that implements the HTTP Transport +// interface for proxying requests to the Gitlab API. +func NewTransport() *Transport { + return &Transport{ + httpTransport: &http.Transport{}, + } +} + +// RoundTrip is the implementation of the http.RoundTripper interface +func (transport *Transport) RoundTrip(request *http.Request) (*http.Response, error) { + token := request.Header.Get("Private-Token") + if token == "" { + return nil, errors.New("no gitlab token provided") + } + + r, err := http.NewRequest(request.Method, request.URL.String(), request.Body) + if err != nil { + return nil, err + } + + r.Header.Set("Private-Token", token) + return transport.httpTransport.RoundTrip(r) +} + +// NewHTTPClient creates a new HTTP client configured for GitLab API requests +func NewHTTPClient(token string) *http.Client { + return &http.Client{ + Transport: &tokenTransport{ + token: token, + transport: retry.NewTransport(&http.Transport{}), // Use ORAS retry transport for consistent rate limiting and error handling + }, + Timeout: 1 * time.Minute, + } +} + +// tokenTransport automatically adds the Private-Token header to requests +type tokenTransport struct { + token string + transport http.RoundTripper +} + +func (t *tokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("Private-Token", t.token) + return t.transport.RoundTrip(req) +} diff --git a/api/http/proxy/factory/gitlab/transport.go b/api/http/proxy/factory/gitlab/transport.go deleted file mode 100644 index 7e1804c45..000000000 --- a/api/http/proxy/factory/gitlab/transport.go +++ /dev/null @@ -1,34 +0,0 @@ -package gitlab - -import ( - "errors" - "net/http" -) - -type Transport struct { - httpTransport *http.Transport -} - -// NewTransport returns a pointer to a new instance of Transport that implements the HTTP Transport -// interface for proxying requests to the Gitlab API. -func NewTransport() *Transport { - return &Transport{ - httpTransport: &http.Transport{}, - } -} - -// RoundTrip is the implementation of the http.RoundTripper interface -func (transport *Transport) RoundTrip(request *http.Request) (*http.Response, error) { - token := request.Header.Get("Private-Token") - if token == "" { - return nil, errors.New("no gitlab token provided") - } - - r, err := http.NewRequest(request.Method, request.URL.String(), request.Body) - if err != nil { - return nil, err - } - - r.Header.Set("Private-Token", token) - return transport.httpTransport.RoundTrip(r) -} diff --git a/api/http/proxy/factory/kubernetes.go b/api/http/proxy/factory/kubernetes.go index 0a74fa3f2..43ed7d5b3 100644 --- a/api/http/proxy/factory/kubernetes.go +++ b/api/http/proxy/factory/kubernetes.go @@ -7,7 +7,6 @@ import ( "github.com/portainer/portainer/api/http/proxy/factory/kubernetes" portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/crypto" ) func (factory *ProxyFactory) newKubernetesProxy(endpoint *portainer.Endpoint) (http.Handler, error) { @@ -38,12 +37,12 @@ func (factory *ProxyFactory) newKubernetesLocalProxy(endpoint *portainer.Endpoin return nil, err } - transport, err := kubernetes.NewLocalTransport(tokenManager, endpoint, factory.kubernetesClientFactory, factory.dataStore) + transport, err := kubernetes.NewLocalTransport(tokenManager, endpoint, factory.kubernetesClientFactory, factory.dataStore, factory.jwtService) if err != nil { return nil, err } - proxy := newSingleHostReverseProxyWithHostHeader(remoteURL) + proxy := NewSingleHostReverseProxyWithHostHeader(remoteURL) proxy.Transport = transport return proxy, nil @@ -73,8 +72,8 @@ func (factory *ProxyFactory) newKubernetesEdgeHTTPProxy(endpoint *portainer.Endp } endpointURL.Scheme = "http" - proxy := newSingleHostReverseProxyWithHostHeader(endpointURL) - proxy.Transport = kubernetes.NewEdgeTransport(factory.dataStore, factory.signatureService, factory.reverseTunnelService, endpoint, tokenManager, factory.kubernetesClientFactory) + proxy := NewSingleHostReverseProxyWithHostHeader(endpointURL) + proxy.Transport = kubernetes.NewEdgeTransport(factory.dataStore, factory.signatureService, factory.reverseTunnelService, endpoint, tokenManager, factory.kubernetesClientFactory, factory.jwtService) return proxy, nil } @@ -93,19 +92,19 @@ func (factory *ProxyFactory) newKubernetesAgentHTTPSProxy(endpoint *portainer.En return nil, err } - tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) - if err != nil { - return nil, err - } - tokenCache := factory.kubernetesTokenCacheManager.GetOrCreateTokenCache(endpoint.ID) tokenManager, err := kubernetes.NewTokenManager(kubecli, factory.dataStore, tokenCache, false) if err != nil { return nil, err } - proxy := newSingleHostReverseProxyWithHostHeader(remoteURL) - proxy.Transport = kubernetes.NewAgentTransport(factory.signatureService, tlsConfig, tokenManager, endpoint, factory.kubernetesClientFactory, factory.dataStore) + transport, err := kubernetes.NewAgentTransport(factory.signatureService, tokenManager, endpoint, factory.kubernetesClientFactory, factory.dataStore, factory.jwtService) + if err != nil { + return nil, err + } + + proxy := NewSingleHostReverseProxyWithHostHeader(remoteURL) + proxy.Transport = transport return proxy, nil } diff --git a/api/http/proxy/factory/kubernetes/agent_transport.go b/api/http/proxy/factory/kubernetes/agent_transport.go index b6ab548ae..4a62e2367 100644 --- a/api/http/proxy/factory/kubernetes/agent_transport.go +++ b/api/http/proxy/factory/kubernetes/agent_transport.go @@ -1,11 +1,11 @@ package kubernetes import ( - "crypto/tls" "net/http" "strings" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/crypto" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/kubernetes/cli" ) @@ -16,7 +16,12 @@ type agentTransport struct { } // NewAgentTransport returns a new transport that can be used to send signed requests to a Portainer agent -func NewAgentTransport(signatureService portainer.DigitalSignatureService, tlsConfig *tls.Config, tokenManager *tokenManager, endpoint *portainer.Endpoint, k8sClientFactory *cli.ClientFactory, dataStore dataservices.DataStore) *agentTransport { +func NewAgentTransport(signatureService portainer.DigitalSignatureService, tokenManager *tokenManager, endpoint *portainer.Endpoint, k8sClientFactory *cli.ClientFactory, dataStore dataservices.DataStore, jwtService portainer.JWTService) (*agentTransport, error) { + tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) + if err != nil { + return nil, err + } + transport := &agentTransport{ baseTransport: newBaseTransport( &http.Transport{ @@ -26,11 +31,12 @@ func NewAgentTransport(signatureService portainer.DigitalSignatureService, tlsCo endpoint, k8sClientFactory, dataStore, + jwtService, ), signatureService: signatureService, } - return transport + return transport, nil } // RoundTrip is the implementation of the the http.RoundTripper interface diff --git a/api/http/proxy/factory/kubernetes/edge_transport.go b/api/http/proxy/factory/kubernetes/edge_transport.go index 4eed6934a..73946114e 100644 --- a/api/http/proxy/factory/kubernetes/edge_transport.go +++ b/api/http/proxy/factory/kubernetes/edge_transport.go @@ -16,7 +16,7 @@ type edgeTransport struct { } // NewAgentTransport returns a new transport that can be used to send signed requests to a Portainer Edge agent -func NewEdgeTransport(dataStore dataservices.DataStore, signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService, endpoint *portainer.Endpoint, tokenManager *tokenManager, k8sClientFactory *cli.ClientFactory) *edgeTransport { +func NewEdgeTransport(dataStore dataservices.DataStore, signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService, endpoint *portainer.Endpoint, tokenManager *tokenManager, k8sClientFactory *cli.ClientFactory, jwtService portainer.JWTService) *edgeTransport { transport := &edgeTransport{ reverseTunnelService: reverseTunnelService, signatureService: signatureService, @@ -26,6 +26,7 @@ func NewEdgeTransport(dataStore dataservices.DataStore, signatureService portain endpoint, k8sClientFactory, dataStore, + jwtService, ), } diff --git a/api/http/proxy/factory/kubernetes/local_transport.go b/api/http/proxy/factory/kubernetes/local_transport.go index 4ae4082d9..bc832f35c 100644 --- a/api/http/proxy/factory/kubernetes/local_transport.go +++ b/api/http/proxy/factory/kubernetes/local_transport.go @@ -14,8 +14,8 @@ type localTransport struct { } // NewLocalTransport returns a new transport that can be used to send requests to the local Kubernetes API -func NewLocalTransport(tokenManager *tokenManager, endpoint *portainer.Endpoint, k8sClientFactory *cli.ClientFactory, dataStore dataservices.DataStore) (*localTransport, error) { - config, err := crypto.CreateTLSConfigurationFromBytes(nil, nil, nil, true, true) +func NewLocalTransport(tokenManager *tokenManager, endpoint *portainer.Endpoint, k8sClientFactory *cli.ClientFactory, dataStore dataservices.DataStore, jwtService portainer.JWTService) (*localTransport, error) { + config, err := crypto.CreateTLSConfigurationFromBytes(true, nil, nil, nil, true, true) if err != nil { return nil, err } @@ -29,6 +29,7 @@ func NewLocalTransport(tokenManager *tokenManager, endpoint *portainer.Endpoint, endpoint, k8sClientFactory, dataStore, + jwtService, ), } diff --git a/api/http/proxy/factory/kubernetes/local_transport_test.go b/api/http/proxy/factory/kubernetes/local_transport_test.go new file mode 100644 index 000000000..f346bba50 --- /dev/null +++ b/api/http/proxy/factory/kubernetes/local_transport_test.go @@ -0,0 +1,13 @@ +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewLocalTransport(t *testing.T) { + transport, err := NewLocalTransport(nil, nil, nil, nil, nil) + require.NoError(t, err) + require.True(t, transport.baseTransport.httpTransport.TLSClientConfig.InsecureSkipVerify) //nolint:forbidigo +} diff --git a/api/http/proxy/factory/kubernetes/pods.go b/api/http/proxy/factory/kubernetes/pods.go index 6c36e079a..a2e5f1860 100644 --- a/api/http/proxy/factory/kubernetes/pods.go +++ b/api/http/proxy/factory/kubernetes/pods.go @@ -2,12 +2,18 @@ package kubernetes import ( "net/http" + "strings" ) -func (transport *baseTransport) proxyPodsRequest(request *http.Request, namespace, requestPath string) (*http.Response, error) { +func (transport *baseTransport) proxyPodsRequest(request *http.Request, namespace string) (*http.Response, error) { if request.Method == http.MethodDelete { transport.refreshRegistry(request, namespace) } + if request.Method == http.MethodPost && strings.Contains(request.URL.Path, "/exec") { + if err := transport.addTokenForExec(request); err != nil { + return nil, err + } + } return transport.executeKubernetesRequest(request) } diff --git a/api/http/proxy/factory/kubernetes/transport.go b/api/http/proxy/factory/kubernetes/transport.go index 76e9daa68..b4d06bcce 100644 --- a/api/http/proxy/factory/kubernetes/transport.go +++ b/api/http/proxy/factory/kubernetes/transport.go @@ -26,15 +26,17 @@ type baseTransport struct { endpoint *portainer.Endpoint k8sClientFactory *cli.ClientFactory dataStore dataservices.DataStore + jwtService portainer.JWTService } -func newBaseTransport(httpTransport *http.Transport, tokenManager *tokenManager, endpoint *portainer.Endpoint, k8sClientFactory *cli.ClientFactory, dataStore dataservices.DataStore) *baseTransport { +func newBaseTransport(httpTransport *http.Transport, tokenManager *tokenManager, endpoint *portainer.Endpoint, k8sClientFactory *cli.ClientFactory, dataStore dataservices.DataStore, jwtService portainer.JWTService) *baseTransport { return &baseTransport{ httpTransport: httpTransport, tokenManager: tokenManager, endpoint: endpoint, k8sClientFactory: k8sClientFactory, dataStore: dataStore, + jwtService: jwtService, } } @@ -58,6 +60,7 @@ func (transport *baseTransport) proxyKubernetesRequest(request *http.Request) (* switch { case strings.EqualFold(requestPath, "/namespaces/portainer/configmaps/portainer-config") && (request.Method == "PUT" || request.Method == "POST"): + transport.k8sClientFactory.ClearClientCache() defer transport.tokenManager.UpdateUserServiceAccountsForEndpoint(portainer.EndpointID(endpointID)) return transport.executeKubernetesRequest(request) case strings.EqualFold(requestPath, "/namespaces"): @@ -81,7 +84,7 @@ func (transport *baseTransport) proxyNamespacedRequest(request *http.Request, fu switch { case strings.HasPrefix(requestPath, "pods"): - return transport.proxyPodsRequest(request, namespace, requestPath) + return transport.proxyPodsRequest(request, namespace) case strings.HasPrefix(requestPath, "deployments"): return transport.proxyDeploymentsRequest(request, namespace, requestPath) case requestPath == "" && request.Method == "DELETE": @@ -91,6 +94,23 @@ func (transport *baseTransport) proxyNamespacedRequest(request *http.Request, fu } } +// addTokenForExec injects a kubeconfig token into the request header +// this is only used with kubeconfig for kubectl exec requests +func (transport *baseTransport) addTokenForExec(request *http.Request) error { + tokenData, err := security.RetrieveTokenData(request) + if err != nil { + return err + } + + token, err := transport.jwtService.GenerateTokenForKubeconfig(tokenData) + if err != nil { + return err + } + + request.Header.Set("Authorization", "Bearer "+token) + return nil +} + func (transport *baseTransport) executeKubernetesRequest(request *http.Request) (*http.Response, error) { resp, err := transport.httpTransport.RoundTrip(request) diff --git a/api/http/proxy/factory/kubernetes/transport_test.go b/api/http/proxy/factory/kubernetes/transport_test.go new file mode 100644 index 000000000..713714d93 --- /dev/null +++ b/api/http/proxy/factory/kubernetes/transport_test.go @@ -0,0 +1,359 @@ +package kubernetes + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/jwt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// MockJWTService implements portainer.JWTService for testing +type MockJWTService struct { + generateTokenFunc func(data *portainer.TokenData) (string, error) +} + +func (m *MockJWTService) GenerateToken(data *portainer.TokenData) (string, time.Time, error) { + if m.generateTokenFunc != nil { + token, err := m.generateTokenFunc(data) + return token, time.Now().Add(24 * time.Hour), err + } + return "mock-token", time.Now().Add(24 * time.Hour), nil +} + +func (m *MockJWTService) GenerateTokenForKubeconfig(data *portainer.TokenData) (string, error) { + if m.generateTokenFunc != nil { + return m.generateTokenFunc(data) + } + return "mock-kubeconfig-token", nil +} + +func (m *MockJWTService) ParseAndVerifyToken(token string) (*portainer.TokenData, string, time.Time, error) { + return &portainer.TokenData{ID: 1, Username: "mock", Role: portainer.AdministratorRole}, "mock-id", time.Now().Add(24 * time.Hour), nil +} + +func (m *MockJWTService) SetUserSessionDuration(userSessionDuration time.Duration) { + // Mock implementation - not used in tests +} + +func TestBaseTransport_AddTokenForExec(t *testing.T) { + // Setup test store and JWT service + _, store := datastore.MustNewTestStore(t, true, false) + + // Create test users + adminUser := &portainer.User{ + ID: 1, + Username: "admin", + Role: portainer.AdministratorRole, + } + err := store.User().Create(adminUser) + require.NoError(t, err) + + standardUser := &portainer.User{ + ID: 2, + Username: "standard", + Role: portainer.StandardUserRole, + } + err = store.User().Create(standardUser) + require.NoError(t, err) + + // Create JWT service + jwtService, err := jwt.NewService("24h", store) + require.NoError(t, err) + + // Create base transport + transport := &baseTransport{ + jwtService: jwtService, + } + + tests := []struct { + name string + tokenData *portainer.TokenData + setupRequest func(*http.Request) *http.Request + expectError bool + errorMsg string + expectPanic bool + verifyResponse func(*testing.T, *http.Request, *portainer.TokenData) + }{ + { + name: "admin user - successful token generation", + tokenData: &portainer.TokenData{ + ID: adminUser.ID, + Username: adminUser.Username, + Role: adminUser.Role, + }, + setupRequest: func(req *http.Request) *http.Request { + return req.WithContext(security.StoreTokenData(req, &portainer.TokenData{ + ID: adminUser.ID, + Username: adminUser.Username, + Role: adminUser.Role, + })) + }, + expectError: false, + verifyResponse: func(t *testing.T, req *http.Request, tokenData *portainer.TokenData) { + authHeader := req.Header.Get("Authorization") + assert.NotEmpty(t, authHeader) + assert.True(t, strings.HasPrefix(authHeader, "Bearer ")) + + token := authHeader[7:] // Remove "Bearer " prefix + parsedTokenData, _, _, err := jwtService.ParseAndVerifyToken(token) + assert.NoError(t, err) + assert.Equal(t, tokenData.ID, parsedTokenData.ID) + assert.Equal(t, tokenData.Username, parsedTokenData.Username) + assert.Equal(t, tokenData.Role, parsedTokenData.Role) + }, + }, + { + name: "standard user - successful token generation", + tokenData: &portainer.TokenData{ + ID: standardUser.ID, + Username: standardUser.Username, + Role: standardUser.Role, + }, + setupRequest: func(req *http.Request) *http.Request { + return req.WithContext(security.StoreTokenData(req, &portainer.TokenData{ + ID: standardUser.ID, + Username: standardUser.Username, + Role: standardUser.Role, + })) + }, + expectError: false, + verifyResponse: func(t *testing.T, req *http.Request, tokenData *portainer.TokenData) { + authHeader := req.Header.Get("Authorization") + assert.NotEmpty(t, authHeader) + assert.True(t, strings.HasPrefix(authHeader, "Bearer ")) + + token := authHeader[7:] // Remove "Bearer " prefix + parsedTokenData, _, _, err := jwtService.ParseAndVerifyToken(token) + assert.NoError(t, err) + assert.Equal(t, tokenData.ID, parsedTokenData.ID) + assert.Equal(t, tokenData.Username, parsedTokenData.Username) + assert.Equal(t, tokenData.Role, parsedTokenData.Role) + }, + }, + { + name: "request without token data in context", + tokenData: nil, + setupRequest: func(req *http.Request) *http.Request { + return req // Don't add token data to context + }, + expectError: true, + errorMsg: "Unable to find JWT data in request context", + }, + { + name: "request with nil token data", + tokenData: nil, + setupRequest: func(req *http.Request) *http.Request { + return req.WithContext(security.StoreTokenData(req, nil)) + }, + expectPanic: true, + }, + { + name: "JWT service failure", + tokenData: &portainer.TokenData{ + ID: 1, + Username: "test", + Role: portainer.AdministratorRole, + }, + setupRequest: func(req *http.Request) *http.Request { + return req.WithContext(security.StoreTokenData(req, &portainer.TokenData{ + ID: 1, + Username: "test", + Role: portainer.AdministratorRole, + })) + }, + expectPanic: true, + }, + { + name: "verify authorization header format", + tokenData: &portainer.TokenData{ + ID: adminUser.ID, + Username: adminUser.Username, + Role: adminUser.Role, + }, + setupRequest: func(req *http.Request) *http.Request { + return req.WithContext(security.StoreTokenData(req, &portainer.TokenData{ + ID: adminUser.ID, + Username: adminUser.Username, + Role: adminUser.Role, + })) + }, + expectError: false, + verifyResponse: func(t *testing.T, req *http.Request, tokenData *portainer.TokenData) { + authHeader := req.Header.Get("Authorization") + assert.NotEmpty(t, authHeader) + assert.True(t, strings.HasPrefix(authHeader, "Bearer ")) + + token := authHeader[7:] // Remove "Bearer " prefix + assert.NotEmpty(t, token) + assert.Greater(t, len(token), 0, "Token should not be empty") + }, + }, + { + name: "verify header is overwritten on subsequent calls", + tokenData: &portainer.TokenData{ + ID: adminUser.ID, + Username: adminUser.Username, + Role: adminUser.Role, + }, + setupRequest: func(req *http.Request) *http.Request { + req = req.WithContext(security.StoreTokenData(req, &portainer.TokenData{ + ID: adminUser.ID, + Username: adminUser.Username, + Role: adminUser.Role, + })) + // Set an existing Authorization header + req.Header.Set("Authorization", "Bearer old-token") + return req + }, + expectError: false, + verifyResponse: func(t *testing.T, req *http.Request, tokenData *portainer.TokenData) { + authHeader := req.Header.Get("Authorization") + assert.NotEqual(t, "Bearer old-token", authHeader) + assert.True(t, strings.HasPrefix(authHeader, "Bearer ")) + + token := authHeader[7:] // Remove "Bearer " prefix + parsedTokenData, _, _, err := jwtService.ParseAndVerifyToken(token) + assert.NoError(t, err) + assert.Equal(t, tokenData.ID, parsedTokenData.ID) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create request + request := httptest.NewRequest("GET", "/", nil) + request = tt.setupRequest(request) + + // Determine which transport to use based on test case + var testTransport *baseTransport + if tt.name == "JWT service failure" { + testTransport = &baseTransport{ + jwtService: nil, + } + } else { + testTransport = transport + } + + // Call the function + if tt.expectPanic { + assert.Panics(t, func() { + _ = testTransport.addTokenForExec(request) + }) + return + } + + err := testTransport.addTokenForExec(request) + + // Check results + if tt.expectError { + assert.Error(t, err) + if tt.errorMsg != "" { + assert.Contains(t, err.Error(), tt.errorMsg) + } + } else { + assert.NoError(t, err) + if tt.verifyResponse != nil { + tt.verifyResponse(t, request, tt.tokenData) + } + } + }) + } +} + +func TestBaseTransport_AddTokenForExec_Integration(t *testing.T) { + // Create a test HTTP server to capture requests + var capturedRequest *http.Request + var capturedHeaders http.Header + + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedRequest = r + capturedHeaders = r.Header.Clone() + w.WriteHeader(http.StatusOK) + w.Write([]byte("success")) + })) + defer testServer.Close() + + // Create mock JWT service + mockJWTService := &MockJWTService{ + generateTokenFunc: func(data *portainer.TokenData) (string, error) { + return "mock-token-" + data.Username, nil + }, + } + + // Create base transport + transport := &baseTransport{ + httpTransport: &http.Transport{}, + jwtService: mockJWTService, + } + + tests := []struct { + name string + tokenData *portainer.TokenData + requestPath string + expectedToken string + }{ + { + name: "admin user exec request", + tokenData: &portainer.TokenData{ + ID: 1, + Username: "admin", + Role: portainer.AdministratorRole, + }, + requestPath: "/api/endpoints/1/kubernetes/api/v1/namespaces/default/pods/test-pod/exec", + expectedToken: "mock-token-admin", + }, + { + name: "standard user exec request", + tokenData: &portainer.TokenData{ + ID: 2, + Username: "standard", + Role: portainer.StandardUserRole, + }, + requestPath: "/api/endpoints/1/kubernetes/api/v1/namespaces/default/pods/test-pod/exec", + expectedToken: "mock-token-standard", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset captured data + capturedRequest = nil + capturedHeaders = nil + + // Create request to the test server + request, err := http.NewRequest("POST", testServer.URL+tt.requestPath, strings.NewReader("")) + require.NoError(t, err) + + // Add token data to request context + request = request.WithContext(security.StoreTokenData(request, tt.tokenData)) + + // Call proxyPodsRequest which triggers addTokenForExec for POST /exec requests + resp, err := transport.proxyPodsRequest(request, "default") + require.NoError(t, err) + defer resp.Body.Close() + + // Verify the response + assert.Equal(t, http.StatusOK, resp.StatusCode) + + // Verify the request was captured + assert.NotNil(t, capturedRequest) + assert.Equal(t, "POST", capturedRequest.Method) + assert.Equal(t, tt.requestPath, capturedRequest.URL.Path) + + // Verify the authorization header was set correctly + capturedAuthHeader := capturedHeaders.Get("Authorization") + assert.NotEmpty(t, capturedAuthHeader) + assert.True(t, strings.HasPrefix(capturedAuthHeader, "Bearer ")) + assert.Equal(t, "Bearer "+tt.expectedToken, capturedAuthHeader) + }) + } +} diff --git a/api/http/proxy/factory/reverse_proxy.go b/api/http/proxy/factory/reverse_proxy.go index 93d22f94d..a1bb3fa28 100644 --- a/api/http/proxy/factory/reverse_proxy.go +++ b/api/http/proxy/factory/reverse_proxy.go @@ -7,12 +7,34 @@ import ( "strings" ) +// Note that we discard any non-canonical headers by design +var allowedHeaders = map[string]struct{}{ + "Accept": {}, + "Accept-Encoding": {}, + "Accept-Language": {}, + "Cache-Control": {}, + "Connection": {}, + "Content-Length": {}, + "Content-Type": {}, + "Private-Token": {}, + "Upgrade": {}, + "User-Agent": {}, + "X-Portaineragent-Target": {}, + "X-Portainer-Volumename": {}, + "X-Registry-Auth": {}, + "X-Stream-Protocol-Version": {}, +} + // newSingleHostReverseProxyWithHostHeader is based on NewSingleHostReverseProxy // from golang.org/src/net/http/httputil/reverseproxy.go and merely sets the Host // HTTP header, which NewSingleHostReverseProxy deliberately preserves. -func newSingleHostReverseProxyWithHostHeader(target *url.URL) *httputil.ReverseProxy { +func NewSingleHostReverseProxyWithHostHeader(target *url.URL) *httputil.ReverseProxy { + return &httputil.ReverseProxy{Director: createDirector(target)} +} + +func createDirector(target *url.URL) func(*http.Request) { targetQuery := target.RawQuery - director := func(req *http.Request) { + return func(req *http.Request) { req.URL.Scheme = target.Scheme req.URL.Host = target.Host req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) @@ -26,8 +48,14 @@ func newSingleHostReverseProxyWithHostHeader(target *url.URL) *httputil.ReverseP // explicitly disable User-Agent so it's not set to default value req.Header.Set("User-Agent", "") } + + for k := range req.Header { + if _, ok := allowedHeaders[k]; !ok { + // We use delete here instead of req.Header.Del because we want to delete non canonical headers. + delete(req.Header, k) + } + } } - return &httputil.ReverseProxy{Director: director} } // singleJoiningSlash from golang.org/src/net/http/httputil/reverseproxy.go diff --git a/api/http/proxy/factory/reverse_proxy_test.go b/api/http/proxy/factory/reverse_proxy_test.go new file mode 100644 index 000000000..6f23d75ec --- /dev/null +++ b/api/http/proxy/factory/reverse_proxy_test.go @@ -0,0 +1,190 @@ +package factory + +import ( + "net/http" + "net/url" + "testing" + + "github.com/google/go-cmp/cmp" + portainer "github.com/portainer/portainer/api" +) + +func Test_createDirector(t *testing.T) { + testCases := []struct { + name string + target *url.URL + req *http.Request + expectedReq *http.Request + }{ + { + name: "base case", + target: createURL(t, "https://portainer.io/api/docker?a=5&b=6"), + req: createRequest( + t, + "GET", + "https://agent-portainer.io/test?c=7", + map[string]string{"Accept-Encoding": "gzip", "Accept": "application/json", "User-Agent": "something"}, + true, + ), + expectedReq: createRequest( + t, + "GET", + "https://portainer.io/api/docker/test?a=5&b=6&c=7", + map[string]string{"Accept-Encoding": "gzip", "Accept": "application/json", "User-Agent": "something"}, + true, + ), + }, + { + name: "no User-Agent", + target: createURL(t, "https://portainer.io/api/docker?a=5&b=6"), + req: createRequest( + t, + "GET", + "https://agent-portainer.io/test?c=7", + map[string]string{"Accept-Encoding": "gzip", "Accept": "application/json"}, + true, + ), + expectedReq: createRequest( + t, + "GET", + "https://portainer.io/api/docker/test?a=5&b=6&c=7", + map[string]string{"Accept-Encoding": "gzip", "Accept": "application/json", "User-Agent": ""}, + true, + ), + }, + { + name: "Sensitive Headers", + target: createURL(t, "https://portainer.io/api/docker?a=5&b=6"), + req: createRequest( + t, + "GET", + "https://agent-portainer.io/test?c=7", + map[string]string{ + "Authorization": "secret", + "Proxy-Authorization": "secret", + "Cookie": "secret", + "X-Csrf-Token": "secret", + "X-Api-Key": "secret", + "Accept": "application/json", + "Accept-Encoding": "gzip", + "Accept-Language": "en-GB", + "Cache-Control": "None", + "Content-Length": "100", + "Content-Type": "application/json", + "Private-Token": "test-private-token", + "User-Agent": "test-user-agent", + "X-Portaineragent-Target": "test-agent-1", + "X-Portainer-Volumename": "test-volume-1", + "X-Registry-Auth": "test-registry-auth", + }, + true, + ), + expectedReq: createRequest( + t, + "GET", + "https://portainer.io/api/docker/test?a=5&b=6&c=7", + map[string]string{ + "Accept": "application/json", + "Accept-Encoding": "gzip", + "Accept-Language": "en-GB", + "Cache-Control": "None", + "Content-Length": "100", + "Content-Type": "application/json", + "Private-Token": "test-private-token", + "User-Agent": "test-user-agent", + "X-Portaineragent-Target": "test-agent-1", + "X-Portainer-Volumename": "test-volume-1", + "X-Registry-Auth": "test-registry-auth", + }, + true, + ), + }, + { + name: "Non canonical Headers", + target: createURL(t, "https://portainer.io/api/docker?a=5&b=6"), + req: createRequest( + t, + "GET", + "https://agent-portainer.io/test?c=7", + map[string]string{ + "Accept": "application/json", + "Accept-Encoding": "gzip", + "Accept-Language": "en-GB", + "Cache-Control": "None", + "Content-Length": "100", + "Content-Type": "application/json", + "Private-Token": "test-private-token", + "User-Agent": "test-user-agent", + portainer.PortainerAgentTargetHeader: "test-agent-1", + "X-Portainer-VolumeName": "test-volume-1", + "X-Registry-Auth": "test-registry-auth", + }, + false, + ), + expectedReq: createRequest( + t, + "GET", + "https://portainer.io/api/docker/test?a=5&b=6&c=7", + map[string]string{ + "Accept": "application/json", + "Accept-Encoding": "gzip", + "Accept-Language": "en-GB", + "Cache-Control": "None", + "Content-Length": "100", + "Content-Type": "application/json", + "Private-Token": "test-private-token", + "User-Agent": "test-user-agent", + "X-Registry-Auth": "test-registry-auth", + }, + true, + ), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + director := createDirector(tc.target) + director(tc.req) + + if diff := cmp.Diff(tc.req, tc.expectedReq, cmp.Comparer(compareRequests)); diff != "" { + t.Fatalf("requests are different: \n%s", diff) + } + }) + } +} + +func createURL(t *testing.T, urlString string) *url.URL { + parsedURL, err := url.Parse(urlString) + if err != nil { + t.Fatalf("Failed to create url: %s", err) + } + + return parsedURL +} + +func createRequest(t *testing.T, method, url string, headers map[string]string, canonicalHeaders bool) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + t.Fatalf("Failed to create http request: %s", err) + } else { + for k, v := range headers { + if canonicalHeaders { + req.Header.Add(k, v) + } else { + req.Header[k] = []string{v} + } + } + } + + return req +} + +func compareRequests(a, b *http.Request) bool { + methodEqual := a.Method == b.Method + urlEqual := cmp.Diff(a.URL, b.URL) == "" + hostEqual := a.Host == b.Host + protoEqual := a.Proto == b.Proto && a.ProtoMajor == b.ProtoMajor && a.ProtoMinor == b.ProtoMinor + headersEqual := cmp.Diff(a.Header, b.Header) == "" + + return methodEqual && urlEqual && hostEqual && protoEqual && headersEqual +} diff --git a/api/http/proxy/manager.go b/api/http/proxy/manager.go index 16f822028..477bc547b 100644 --- a/api/http/proxy/manager.go +++ b/api/http/proxy/manager.go @@ -32,8 +32,8 @@ func NewManager(kubernetesClientFactory *cli.ClientFactory) *Manager { } } -func (manager *Manager) NewProxyFactory(dataStore dataservices.DataStore, signatureService portainer.DigitalSignatureService, tunnelService portainer.ReverseTunnelService, clientFactory *dockerclient.ClientFactory, kubernetesClientFactory *cli.ClientFactory, kubernetesTokenCacheManager *kubernetes.TokenCacheManager, gitService portainer.GitService, snapshotService portainer.SnapshotService) { - manager.proxyFactory = factory.NewProxyFactory(dataStore, signatureService, tunnelService, clientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService) +func (manager *Manager) NewProxyFactory(dataStore dataservices.DataStore, signatureService portainer.DigitalSignatureService, tunnelService portainer.ReverseTunnelService, clientFactory *dockerclient.ClientFactory, kubernetesClientFactory *cli.ClientFactory, kubernetesTokenCacheManager *kubernetes.TokenCacheManager, gitService portainer.GitService, snapshotService portainer.SnapshotService, jwtService portainer.JWTService) { + manager.proxyFactory = factory.NewProxyFactory(dataStore, signatureService, tunnelService, clientFactory, kubernetesClientFactory, kubernetesTokenCacheManager, gitService, snapshotService, jwtService) } // CreateAndRegisterEndpointProxy creates a new HTTP reverse proxy based on environment(endpoint) properties and adds it to the registered proxies. diff --git a/api/http/security/bouncer.go b/api/http/security/bouncer.go index 00f69e328..55b7faecc 100644 --- a/api/http/security/bouncer.go +++ b/api/http/security/bouncer.go @@ -35,6 +35,7 @@ type ( JWTAuthLookup(*http.Request) (*portainer.TokenData, error) TrustedEdgeEnvironmentAccess(dataservices.DataStoreTx, *portainer.Endpoint) error RevokeJWT(string) + DisableCSP() } // RequestBouncer represents an entity that manages API request accesses @@ -72,7 +73,7 @@ func NewRequestBouncer(dataStore dataservices.DataStore, jwtService portainer.JW jwtService: jwtService, apiKeyService: apiKeyService, hsts: featureflags.IsEnabled("hsts"), - csp: featureflags.IsEnabled("csp"), + csp: true, } go b.cleanUpExpiredJWT() @@ -80,6 +81,11 @@ func NewRequestBouncer(dataStore dataservices.DataStore, jwtService portainer.JW return b } +// DisableCSP disables Content Security Policy +func (bouncer *RequestBouncer) DisableCSP() { + bouncer.csp = false +} + // PublicAccess defines a security check for public API endpoints. // No authentication is required to access these endpoints. func (bouncer *RequestBouncer) PublicAccess(h http.Handler) http.Handler { @@ -243,8 +249,7 @@ func (bouncer *RequestBouncer) mwCheckPortainerAuthorizations(next http.Handler, return } - _, err = bouncer.dataStore.User().Read(tokenData.ID) - if bouncer.dataStore.IsErrObjectNotFound(err) { + if ok, err := bouncer.dataStore.User().Exists(tokenData.ID); !ok { httperror.WriteError(w, http.StatusUnauthorized, "Unauthorized", httperrors.ErrUnauthorized) return } else if err != nil { @@ -322,9 +327,8 @@ func (bouncer *RequestBouncer) mwAuthenticateFirst(tokenLookups []tokenLookup, n return } - user, _ := bouncer.dataStore.User().Read(token.ID) - if user == nil { - httperror.WriteError(w, http.StatusUnauthorized, "An authorization token is invalid", httperrors.ErrUnauthorized) + if ok, _ := bouncer.dataStore.User().Exists(token.ID); !ok { + httperror.WriteError(w, http.StatusUnauthorized, "The authorization token is invalid", httperrors.ErrUnauthorized) return } @@ -530,7 +534,7 @@ func MWSecureHeaders(next http.Handler, hsts, csp bool) http.Handler { } if csp { - w.Header().Set("Content-Security-Policy", "script-src 'self' cdn.matomo.cloud") + w.Header().Set("Content-Security-Policy", "script-src 'self' cdn.matomo.cloud js.hsforms.net; frame-ancestors 'none';") } w.Header().Set("X-Content-Type-Options", "nosniff") diff --git a/api/http/security/bouncer_test.go b/api/http/security/bouncer_test.go index 9553b90c5..3dd42fdc5 100644 --- a/api/http/security/bouncer_test.go +++ b/api/http/security/bouncer_test.go @@ -344,6 +344,7 @@ func Test_apiKeyLookup(t *testing.T) { req.Header.Add("x-api-key", rawAPIKey) token, err := bouncer.apiKeyLookup(req) + require.NoError(t, err) expectedToken := &portainer.TokenData{ID: user.ID, Username: user.Username, Role: portainer.StandardUserRole} is.Equal(expectedToken, token) @@ -358,6 +359,7 @@ func Test_apiKeyLookup(t *testing.T) { req.Header.Add("x-api-key", rawAPIKey) token, err := bouncer.apiKeyLookup(req) + require.NoError(t, err) expectedToken := &portainer.TokenData{ID: user.ID, Username: user.Username, Role: portainer.StandardUserRole} is.Equal(expectedToken, token) @@ -372,6 +374,7 @@ func Test_apiKeyLookup(t *testing.T) { req.Header.Add("x-api-key", rawAPIKey) token, err := bouncer.apiKeyLookup(req) + require.NoError(t, err) expectedToken := &portainer.TokenData{ID: user.ID, Username: user.Username, Role: portainer.StandardUserRole} is.Equal(expectedToken, token) @@ -527,3 +530,34 @@ func TestJWTRevocation(t *testing.T) { require.Equal(t, 1, revokeLen()) } + +func TestCSPHeaderDefault(t *testing.T) { + b := NewRequestBouncer(nil, nil, nil) + + srv := httptest.NewServer( + b.PublicAccess(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})), + ) + defer srv.Close() + + resp, err := http.Get(srv.URL + "/") + require.NoError(t, err) + defer resp.Body.Close() + + require.Contains(t, resp.Header, "Content-Security-Policy") +} + +func TestCSPHeaderDisabled(t *testing.T) { + b := NewRequestBouncer(nil, nil, nil) + b.DisableCSP() + + srv := httptest.NewServer( + b.PublicAccess(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})), + ) + defer srv.Close() + + resp, err := http.Get(srv.URL + "/") + require.NoError(t, err) + defer resp.Body.Close() + + require.NotContains(t, resp.Header, "Content-Security-Policy") +} diff --git a/api/http/security/rate_limiter_test.go b/api/http/security/rate_limiter_test.go index 351eb6577..6f63028ac 100644 --- a/api/http/security/rate_limiter_test.go +++ b/api/http/security/rate_limiter_test.go @@ -33,8 +33,13 @@ func TestLimitAccess(t *testing.T) { ts := httptest.NewServer(handler) defer ts.Close() - http.Get(ts.URL) + resp, err := http.Get(ts.URL) + if err == nil { + resp.Body.Close() + } + + resp, err = http.Get(ts.URL) if err != nil { t.Fatal(err) } diff --git a/api/http/server.go b/api/http/server.go index ec86e6220..54089a254 100644 --- a/api/http/server.go +++ b/api/http/server.go @@ -24,7 +24,6 @@ import ( "github.com/portainer/portainer/api/http/handler/edgegroups" "github.com/portainer/portainer/api/http/handler/edgejobs" "github.com/portainer/portainer/api/http/handler/edgestacks" - "github.com/portainer/portainer/api/http/handler/edgetemplates" "github.com/portainer/portainer/api/http/handler/endpointedge" "github.com/portainer/portainer/api/http/handler/endpointgroups" "github.com/portainer/portainer/api/http/handler/endpointproxy" @@ -68,7 +67,7 @@ import ( "github.com/portainer/portainer/api/platform" "github.com/portainer/portainer/api/scheduler" "github.com/portainer/portainer/api/stacks/deployments" - "github.com/portainer/portainer/pkg/libhelm" + libhelmtypes "github.com/portainer/portainer/pkg/libhelm/types" "github.com/rs/zerolog/log" ) @@ -78,6 +77,7 @@ type Server struct { AuthorizationService *authorization.Service BindAddress string BindAddressHTTPS string + CSP bool HTTPEnabled bool AssetsPath string Status *portainer.Status @@ -104,7 +104,7 @@ type Server struct { DockerClientFactory *dockerclient.ClientFactory KubernetesClientFactory *cli.ClientFactory KubernetesDeployer portainer.KubernetesDeployer - HelmPackageManager libhelm.HelmPackageManager + HelmPackageManager libhelmtypes.HelmPackageManager Scheduler *scheduler.Scheduler ShutdownCtx context.Context ShutdownTrigger context.CancelFunc @@ -113,6 +113,8 @@ type Server struct { AdminCreationDone chan struct{} PendingActionsService *pendingactions.PendingActionsService PlatformService platform.Service + PullLimitCheckDisabled bool + TrustedOrigins []string } // Start starts the HTTP server @@ -120,13 +122,16 @@ func (server *Server) Start() error { kubernetesTokenCacheManager := server.KubernetesTokenCacheManager requestBouncer := security.NewRequestBouncer(server.DataStore, server.JWTService, server.APIKeyService) + if !server.CSP { + requestBouncer.DisableCSP() + } rateLimiter := security.NewRateLimiter(10, 1*time.Second, 1*time.Hour) offlineGate := offlinegate.NewOfflineGate() passwordStrengthChecker := security.NewPasswordStrengthChecker(server.DataStore.Settings()) - var authHandler = auth.NewHandler(requestBouncer, rateLimiter, passwordStrengthChecker) + var authHandler = auth.NewHandler(requestBouncer, rateLimiter, passwordStrengthChecker, server.KubernetesClientFactory) authHandler.DataStore = server.DataStore authHandler.CryptoService = server.CryptoService authHandler.JWTService = server.JWTService @@ -161,17 +166,11 @@ func (server *Server) Start() error { edgeJobsHandler.FileService = server.FileService edgeJobsHandler.ReverseTunnelService = server.ReverseTunnelService - edgeStackCoordinator := edgestacks.NewEdgeStackStatusUpdateCoordinator(server.DataStore) - go edgeStackCoordinator.Start() - - var edgeStacksHandler = edgestacks.NewHandler(requestBouncer, server.DataStore, server.EdgeStacksService, edgeStackCoordinator) + var edgeStacksHandler = edgestacks.NewHandler(requestBouncer, server.DataStore, server.EdgeStacksService) edgeStacksHandler.FileService = server.FileService edgeStacksHandler.GitService = server.GitService edgeStacksHandler.KubernetesDeployer = server.KubernetesDeployer - var edgeTemplatesHandler = edgetemplates.NewHandler(requestBouncer) - edgeTemplatesHandler.DataStore = server.DataStore - var endpointHandler = endpoints.NewHandler(requestBouncer) endpointHandler.DataStore = server.DataStore endpointHandler.FileService = server.FileService @@ -185,6 +184,7 @@ func (server *Server) Start() error { endpointHandler.BindAddress = server.BindAddress endpointHandler.BindAddressHTTPS = server.BindAddressHTTPS endpointHandler.PendingActionsService = server.PendingActionsService + endpointHandler.PullLimitCheckDisabled = server.PullLimitCheckDisabled var endpointEdgeHandler = endpointedge.NewHandler(requestBouncer, server.DataStore, server.FileService, server.ReverseTunnelService) @@ -204,7 +204,7 @@ func (server *Server) Start() error { var dockerHandler = dockerhandler.NewHandler(requestBouncer, server.AuthorizationService, server.DataStore, server.DockerClientFactory, containerService) - var fileHandler = file.NewHandler(filepath.Join(server.AssetsPath, "public"), adminMonitor.WasInstanceDisabled) + var fileHandler = file.NewHandler(filepath.Join(server.AssetsPath, "public"), server.CSP, adminMonitor.WasInstanceDisabled) var endpointHelmHandler = helm.NewHandler(requestBouncer, server.DataStore, server.JWTService, server.KubernetesDeployer, server.HelmPackageManager, server.KubeClusterAccessService) @@ -306,7 +306,6 @@ func (server *Server) Start() error { EdgeGroupsHandler: edgeGroupsHandler, EdgeJobsHandler: edgeJobsHandler, EdgeStacksHandler: edgeStacksHandler, - EdgeTemplatesHandler: edgeTemplatesHandler, EndpointGroupHandler: endpointGroupHandler, EndpointHandler: endpointHandler, EndpointHelmHandler: endpointHelmHandler, @@ -340,9 +339,9 @@ func (server *Server) Start() error { handler := adminMonitor.WithRedirect(offlineGate.WaitingMiddleware(time.Minute, server.Handler)) - handler = middlewares.WithSlowRequestsLogger(handler) + handler = middlewares.WithPanicLogger(middlewares.WithSlowRequestsLogger(handler)) - handler, err := csrf.WithProtect(handler) + handler, err := csrf.WithProtect(handler, server.TrustedOrigins) if err != nil { return errors.Wrap(err, "failed to create CSRF middleware") } @@ -352,7 +351,7 @@ func (server *Server) Start() error { log.Info().Str("bind_address", server.BindAddress).Msg("starting HTTP server") httpServer := &http.Server{ Addr: server.BindAddress, - Handler: handler, + Handler: middlewares.PlaintextHTTPRequest(handler), ErrorLog: errorLogger, } @@ -373,7 +372,7 @@ func (server *Server) Start() error { TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), // Disable HTTP/2 } - httpsServer.TLSConfig = crypto.CreateTLSConfiguration() + httpsServer.TLSConfig = crypto.CreateTLSConfiguration(false) httpsServer.TLSConfig.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { return server.SSLService.GetRawCertificate(), nil } diff --git a/api/http/utils/filters/filters.go b/api/http/utils/filters/filters.go new file mode 100644 index 000000000..2e22d19a2 --- /dev/null +++ b/api/http/utils/filters/filters.go @@ -0,0 +1,38 @@ +package filters + +import ( + "net/http" + "strconv" +) + +type FilterResult[T any] struct { + Items []T + TotalCount int + TotalAvailable int +} + +type Config[T any] struct { + SearchAccessors []SearchAccessor[T] + SortBindings []SortBinding[T] +} + +func SearchOrderAndPaginate[T any](items []T, params QueryParams, searchConfig Config[T]) FilterResult[T] { + totalAvailable := len(items) + + items = searchFn(items, params.SearchQueryParams, searchConfig.SearchAccessors) + items = sortFn(items, params.SortQueryParams, searchConfig.SortBindings) + + totalCount := len(items) + items = paginateFn(items, params.PaginationQueryParams) + + return FilterResult[T]{ + Items: items, + TotalCount: totalCount, + TotalAvailable: totalAvailable, + } +} + +func ApplyFilterResultsHeaders[T any](w *http.ResponseWriter, result FilterResult[T]) { + (*w).Header().Set("X-Total-Count", strconv.Itoa(result.TotalCount)) + (*w).Header().Set("X-Total-Available", strconv.Itoa(result.TotalAvailable)) +} diff --git a/api/http/utils/filters/filters_test.go b/api/http/utils/filters/filters_test.go new file mode 100644 index 000000000..0b0780fdf --- /dev/null +++ b/api/http/utils/filters/filters_test.go @@ -0,0 +1,465 @@ +package filters + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// Helper functions for creating test data +func createUsers() []User { + return []User{ + {ID: 1, Name: "Alice Johnson", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Smith", Email: "bob@example.com", Age: 30}, + {ID: 3, Name: "Charlie Brown", Email: "charlie@example.com", Age: 35}, + {ID: 4, Name: "Diana Prince", Email: "diana@example.com", Age: 28}, + {ID: 5, Name: "Eve Adams", Email: "eve@example.com", Age: 22}, + } +} + +func createProducts() []Product { + return []Product{ + {ID: 1, Name: "Laptop", Description: "High-performance laptop", Price: 999, Category: "Electronics"}, + {ID: 2, Name: "Mouse", Description: "Wireless mouse", Price: 29, Category: "Electronics"}, + {ID: 3, Name: "Book", Description: "Programming book", Price: 49, Category: "Books"}, + {ID: 4, Name: "Keyboard", Description: "Mechanical keyboard", Price: 129, Category: "Electronics"}, + {ID: 5, Name: "Chair", Description: "Office chair", Price: 199, Category: "Furniture"}, + } +} + +// Sort functions +func userNameSort(a, b User) int { + return strings.Compare(a.Name, b.Name) +} + +func userAgeSort(a, b User) int { + return a.Age - b.Age +} + +func productPriceSort(a, b Product) int { + if a.Price < b.Price { + return -1 + } + if a.Price > b.Price { + return 1 + } + return 0 +} + +func productNameSort(a, b Product) int { + return strings.Compare(a.Name, b.Name) +} + +func TestSearchOrderAndPaginate(t *testing.T) { + users := createUsers() + products := createProducts() + + userConfig := Config[User]{ + SearchAccessors: []SearchAccessor[User]{userNameAccessor, userEmailAccessor}, + SortBindings: []SortBinding[User]{ + {Key: "name", Fn: userNameSort}, + {Key: "age", Fn: userAgeSort}, + }, + } + + productConfig := Config[Product]{ + SearchAccessors: []SearchAccessor[Product]{productNameAccessor, productDescriptionAccessor, productCategoryAccessor}, + SortBindings: []SortBinding[Product]{ + {Key: "price", Fn: productPriceSort}, + {Key: "name", Fn: productNameSort}, + }, + } + + t.Run("no filters applied", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items when no filters applied") + require.Equal(t, 5, result.TotalCount, "TotalCount should equal filtered items") + require.Equal(t, 5, result.TotalAvailable, "TotalAvailable should equal original items") + require.Equal(t, users, result.Items, "Items should be unchanged") + }) + + t.Run("search only", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "alice"}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 1, len(result.Items), "Should find one user matching 'alice'") + require.Equal(t, 1, result.TotalCount, "TotalCount should reflect filtered items") + require.Equal(t, 5, result.TotalAvailable, "TotalAvailable should be original count") + require.Equal(t, "Alice Johnson", result.Items[0].Name, "Should return Alice") + }) + + t.Run("search case insensitive", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "ALICE"}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 1, len(result.Items), "Search should be case insensitive") + require.Equal(t, "Alice Johnson", result.Items[0].Name, "Should return Alice") + }) + + t.Run("search by email", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "bob@example"}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 1, len(result.Items), "Should find user by email") + require.Equal(t, "Bob Smith", result.Items[0].Name, "Should return Bob") + }) + + t.Run("search no matches", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "nonexistent"}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 0, len(result.Items), "Should return empty when no matches") + require.Equal(t, 0, result.TotalCount, "TotalCount should be 0") + require.Equal(t, 5, result.TotalAvailable, "TotalAvailable should remain original count") + }) + + t.Run("search with whitespace", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: " alice "}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 1, len(result.Items), "Should trim whitespace from search") + require.Equal(t, "Alice Johnson", result.Items[0].Name, "Should return Alice") + }) + + t.Run("sort ascending", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "name", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items") + require.Equal(t, "Alice Johnson", result.Items[0].Name, "First should be Alice") + require.Equal(t, "Eve Adams", result.Items[4].Name, "Last should be Eve") + }) + + t.Run("sort descending", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "name", order: SortDesc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items") + require.Equal(t, "Eve Adams", result.Items[0].Name, "First should be Eve (desc order)") + require.Equal(t, "Alice Johnson", result.Items[4].Name, "Last should be Alice (desc order)") + }) + + t.Run("sort by age", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "age", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items") + require.Equal(t, 22, result.Items[0].Age, "First should be youngest (22)") + require.Equal(t, 35, result.Items[4].Age, "Last should be oldest (35)") + }) + + t.Run("sort invalid key", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "invalid", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items") + // Items should remain in original order since no valid sort key + require.Equal(t, users, result.Items, "Should maintain original order with invalid sort key") + }) + + t.Run("pagination basic", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 1, limit: 2}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 2, len(result.Items), "Should return 2 items") + require.Equal(t, 5, result.TotalCount, "TotalCount should be all items") + require.Equal(t, 5, result.TotalAvailable, "TotalAvailable should be original count") + require.Equal(t, users[1], result.Items[0], "Should start from index 1") + require.Equal(t, users[2], result.Items[1], "Should include index 2") + }) + + t.Run("pagination zero limit", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 1, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items when limit is 0") + require.Equal(t, users, result.Items, "Should return all original items") + }) + + t.Run("pagination negative limit", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 1, limit: -1}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 5, len(result.Items), "Should return all items when limit is negative") + }) + + t.Run("pagination start beyond length", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 10, limit: 2}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 0, len(result.Items), "Should return empty slice when start is beyond length") + require.Equal(t, 5, result.TotalCount, "TotalCount should still be original count") + }) + + t.Run("pagination negative start", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: ""}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: -1, limit: 2}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + require.Equal(t, 2, len(result.Items), "Should return 2 items starting from 0") + require.Equal(t, users[0], result.Items[0], "Should start from index 0") + require.Equal(t, users[1], result.Items[1], "Should include index 1") + }) + + t.Run("combined search sort and pagination", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "example.com"}, + SortQueryParams: SortQueryParams{sort: "age", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 1, limit: 2}, + } + + result := SearchOrderAndPaginate(users, params, userConfig) + + // All users have "example.com" in email, so all 5 should match search + // Then sorted by age: Eve(22), Alice(25), Diana(28), Bob(30), Charlie(35) + // Then paginated: start=1, limit=2 should give Alice(25), Diana(28) + require.Equal(t, 2, len(result.Items), "Should return 2 items after pagination") + require.Equal(t, 5, result.TotalCount, "TotalCount should be all filtered items") + require.Equal(t, 5, result.TotalAvailable, "TotalAvailable should be original count") + require.Equal(t, 25, result.Items[0].Age, "First item should be Alice (age 25)") + require.Equal(t, 28, result.Items[1].Age, "Second item should be Diana (age 28)") + }) + + t.Run("products test", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "electronics"}, + SortQueryParams: SortQueryParams{sort: "price", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 2}, + } + + result := SearchOrderAndPaginate(products, params, productConfig) + + // Should find 3 electronics, sorted by price: Mouse(29.99), Keyboard(129.99), Laptop(999.99) + // Paginated to first 2: Mouse, Keyboard + require.Equal(t, 2, len(result.Items), "Should return 2 items") + require.Equal(t, 3, result.TotalCount, "Should find 3 electronics items") + require.Equal(t, 5, result.TotalAvailable, "Should have 5 total products") + require.Equal(t, "Mouse", result.Items[0].Name, "First should be Mouse (cheapest)") + require.Equal(t, "Keyboard", result.Items[1].Name, "Second should be Keyboard") + }) + + t.Run("empty input slice", func(t *testing.T) { + emptyUsers := []User{} + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "test"}, + SortQueryParams: SortQueryParams{sort: "name", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 10}, + } + + result := SearchOrderAndPaginate(emptyUsers, params, userConfig) + + require.Equal(t, 0, len(result.Items), "Should return empty slice") + require.Equal(t, 0, result.TotalCount, "TotalCount should be 0") + require.Equal(t, 0, result.TotalAvailable, "TotalAvailable should be 0") + }) +} + +func TestSearchOrderAndPaginateWithErrors(t *testing.T) { + users := createUsers() + + // Config with error-prone accessor + errorConfig := Config[User]{ + SearchAccessors: []SearchAccessor[User]{errorAccessor[User], userNameAccessor}, + SortBindings: []SortBinding[User]{ + {Key: "name", Fn: userNameSort}, + }, + } + + t.Run("search with accessor errors", func(t *testing.T) { + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "alice"}, + SortQueryParams: SortQueryParams{sort: "", order: ""}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 0}, + } + + result := SearchOrderAndPaginate(users, params, errorConfig) + + // Should still find Alice through the working accessor + require.Equal(t, 1, len(result.Items), "Should find user despite error in first accessor") + require.Equal(t, "Alice Johnson", result.Items[0].Name, "Should return Alice") + }) +} + +func TestApplyFilterResultsHeaders(t *testing.T) { + t.Run("sets headers correctly", func(t *testing.T) { + w := httptest.NewRecorder() + var responseWriter http.ResponseWriter = w + result := FilterResult[User]{ + Items: createUsers()[:3], + TotalCount: 10, + TotalAvailable: 25, + } + + ApplyFilterResultsHeaders(&responseWriter, result) + + require.Equal(t, "10", w.Header().Get("X-Total-Count"), "Should set X-Total-Count header") + require.Equal(t, "25", w.Header().Get("X-Total-Available"), "Should set X-Total-Available header") + }) + + t.Run("sets headers with zero values", func(t *testing.T) { + w := httptest.NewRecorder() + var responseWriter http.ResponseWriter = w + result := FilterResult[User]{ + Items: []User{}, + TotalCount: 0, + TotalAvailable: 0, + } + + ApplyFilterResultsHeaders(&responseWriter, result) + + require.Equal(t, "0", w.Header().Get("X-Total-Count"), "Should set X-Total-Count to 0") + require.Equal(t, "0", w.Header().Get("X-Total-Available"), "Should set X-Total-Available to 0") + }) + + t.Run("overwrites existing headers", func(t *testing.T) { + w := httptest.NewRecorder() + var responseWriter http.ResponseWriter = w + w.Header().Set("X-Total-Count", "999") + w.Header().Set("X-Total-Available", "999") + + result := FilterResult[User]{ + Items: createUsers()[:2], + TotalCount: 5, + TotalAvailable: 15, + } + + ApplyFilterResultsHeaders(&responseWriter, result) + + require.Equal(t, "5", w.Header().Get("X-Total-Count"), "Should overwrite existing X-Total-Count") + require.Equal(t, "15", w.Header().Get("X-Total-Available"), "Should overwrite existing X-Total-Available") + }) + + t.Run("simulates real handler usage", func(t *testing.T) { + // Simulate how it's actually used in handlers + handler := func(w http.ResponseWriter, r *http.Request) { + result := FilterResult[Product]{ + Items: createProducts(), + TotalCount: 5, + TotalAvailable: 10, + } + ApplyFilterResultsHeaders(&w, result) + } + + w := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/test", nil) + + handler(w, req) + + require.Equal(t, "5", w.Header().Get("X-Total-Count"), "Should work in handler context") + require.Equal(t, "10", w.Header().Get("X-Total-Available"), "Should work in handler context") + }) +} + +// Benchmark tests +func BenchmarkSearchOrderAndPaginate(b *testing.B) { + users := createUsers() + config := Config[User]{ + SearchAccessors: []SearchAccessor[User]{userNameAccessor, userEmailAccessor}, + SortBindings: []SortBinding[User]{ + {Key: "name", Fn: userNameSort}, + {Key: "age", Fn: userAgeSort}, + }, + } + params := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "example"}, + SortQueryParams: SortQueryParams{sort: "name", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 0, limit: 10}, + } + + for b.Loop() { + SearchOrderAndPaginate(users, params, config) + } +} + +func BenchmarkApplyFilterResultsHeaders(b *testing.B) { + w := httptest.NewRecorder() + var responseWriter http.ResponseWriter = w + result := FilterResult[User]{ + Items: createUsers(), + TotalCount: 100, + TotalAvailable: 500, + } + + for b.Loop() { + ApplyFilterResultsHeaders(&responseWriter, result) + } +} diff --git a/api/http/utils/filters/pagination.go b/api/http/utils/filters/pagination.go new file mode 100644 index 000000000..fd01eb89a --- /dev/null +++ b/api/http/utils/filters/pagination.go @@ -0,0 +1,22 @@ +package filters + +type PaginationQueryParams struct { + start int + limit int +} + +func paginateFn[T any](items []T, params PaginationQueryParams) []T { + if params.limit <= 0 { + return items + } + + itemsCount := len(items) + + // enforce start in [0, len(items)] + start := min(max(params.start, 0), itemsCount) + + // enforce end <= len(items) (max is unnecessary since limit > 0 and start >= 0) + end := min(start+params.limit, itemsCount) + + return items[start:end] +} diff --git a/api/http/utils/filters/pagination_test.go b/api/http/utils/filters/pagination_test.go new file mode 100644 index 000000000..4b09ac9aa --- /dev/null +++ b/api/http/utils/filters/pagination_test.go @@ -0,0 +1,245 @@ +package filters + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPaginateFn_BasicPagination(t *testing.T) { + items := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + + // First page + params := PaginationQueryParams{start: 0, limit: 3} + result := paginateFn(items, params) + require.Equal(t, []int{1, 2, 3}, result) + + // Second page + params = PaginationQueryParams{start: 3, limit: 3} + result = paginateFn(items, params) + require.Equal(t, []int{4, 5, 6}, result) + + // Third page + params = PaginationQueryParams{start: 6, limit: 3} + result = paginateFn(items, params) + require.Equal(t, []int{7, 8, 9}, result) + + // Last partial page + params = PaginationQueryParams{start: 9, limit: 3} + result = paginateFn(items, params) + require.Equal(t, []int{10}, result) +} + +func TestPaginateFn_ZeroLimit(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: 2, limit: 0} + result := paginateFn(items, params) + + // Should return all items when limit is 0 + require.Equal(t, items, result) +} + +func TestPaginateFn_NegativeLimit(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: 2, limit: -5} + result := paginateFn(items, params) + + // Should return all items when limit is negative + require.Equal(t, items, result) +} + +func TestPaginateFn_NegativeStart(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: -3, limit: 2} + result := paginateFn(items, params) + + // Should start from index 0 when start is negative + require.Equal(t, []int{1, 2}, result) +} + +func TestPaginateFn_StartBeyondLength(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: 10, limit: 3} + result := paginateFn(items, params) + + // Should return empty slice when start is beyond length + require.Empty(t, result) +} + +func TestPaginateFn_StartAtLength(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: 5, limit: 3} + result := paginateFn(items, params) + + // Should return empty slice when start equals length + require.Empty(t, result) +} + +func TestPaginateFn_LimitLargerThanRemaining(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: 3, limit: 10} + result := paginateFn(items, params) + + // Should return remaining items when limit exceeds available items + require.Equal(t, []int{4, 5}, result) +} + +func TestPaginateFn_EmptySlice(t *testing.T) { + items := []int{} + + params := PaginationQueryParams{start: 0, limit: 5} + result := paginateFn(items, params) + + // Should return empty slice + require.Empty(t, result) +} + +func TestPaginateFn_EmptySliceWithNegativeStart(t *testing.T) { + items := []int{} + + params := PaginationQueryParams{start: -5, limit: 3} + result := paginateFn(items, params) + + // Should return empty slice + require.Empty(t, result) +} + +func TestPaginateFn_SingleElement(t *testing.T) { + items := []int{42} + + // Take the single element + params := PaginationQueryParams{start: 0, limit: 1} + result := paginateFn(items, params) + require.Equal(t, []int{42}, result) + + // Start beyond the single element + params = PaginationQueryParams{start: 1, limit: 1} + result = paginateFn(items, params) + require.Empty(t, result) +} + +func TestPaginateFn_LimitOfOne(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + results := [][]int{} + for i := range items { + params := PaginationQueryParams{start: i, limit: 1} + result := paginateFn(items, params) + results = append(results, result) + } + + expected := [][]int{ + {1}, {2}, {3}, {4}, {5}, + } + require.Equal(t, expected, results) +} + +func TestPaginateFn_StringSlice(t *testing.T) { + items := []string{"apple", "banana", "cherry", "date", "elderberry"} + + params := PaginationQueryParams{start: 1, limit: 3} + result := paginateFn(items, params) + + require.Equal(t, []string{"banana", "cherry", "date"}, result) +} + +func TestPaginateFn_StructSlice(t *testing.T) { + type User struct { + ID int + Name string + } + + users := []User{ + {ID: 1, Name: "Alice"}, + {ID: 2, Name: "Bob"}, + {ID: 3, Name: "Charlie"}, + {ID: 4, Name: "David"}, + } + + params := PaginationQueryParams{start: 1, limit: 2} + result := paginateFn(users, params) + + expected := []User{ + {ID: 2, Name: "Bob"}, + {ID: 3, Name: "Charlie"}, + } + require.Equal(t, expected, result) +} + +func TestPaginateFn_BoundaryConditions(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + testCases := []struct { + name string + start int + limit int + expected []int + }{ + {"start=0, limit=0", 0, 0, []int{1, 2, 3, 4, 5}}, + {"start=0, limit=5", 0, 5, []int{1, 2, 3, 4, 5}}, + {"start=0, limit=6", 0, 6, []int{1, 2, 3, 4, 5}}, + {"start=4, limit=1", 4, 1, []int{5}}, + {"start=4, limit=2", 4, 2, []int{5}}, + {"start=5, limit=1", 5, 1, []int{}}, + {"start=-1, limit=1", -1, 1, []int{1}}, + {"start=-10, limit=3", -10, 3, []int{1, 2, 3}}, + {"start=100, limit=1", 100, 1, []int{}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + params := PaginationQueryParams{start: tc.start, limit: tc.limit} + result := paginateFn(items, params) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestPaginateFn_ReturnsSliceView(t *testing.T) { + items := []int{1, 2, 3, 4, 5} + + params := PaginationQueryParams{start: 1, limit: 3} + result := paginateFn(items, params) + + // Result should be a slice view of the original + require.Equal(t, []int{2, 3, 4}, result) + + // Modifying result WILL affect the original slice (shares underlying array) + if len(result) > 0 { + result[0] = 999 + require.Equal(t, 999, items[1]) // Original is modified because they share memory + } +} + +func TestPaginateFn_TypicalAPIUseCases(t *testing.T) { + // Simulate API responses with different page sizes + items := make([]int, 100) + for i := range items { + items[i] = i + 1 + } + + // Page size 10 + params := PaginationQueryParams{start: 0, limit: 10} + page1 := paginateFn(items, params) + require.Len(t, page1, 10) + require.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, page1) + + // Page size 20, offset 20 + params = PaginationQueryParams{start: 20, limit: 20} + page2 := paginateFn(items, params) + require.Len(t, page2, 20) + require.Equal(t, 21, page2[0]) + require.Equal(t, 40, page2[19]) + + // Last page (partial) + params = PaginationQueryParams{start: 95, limit: 10} + lastPage := paginateFn(items, params) + require.Len(t, lastPage, 5) + require.Equal(t, []int{96, 97, 98, 99, 100}, lastPage) +} diff --git a/api/http/utils/filters/query_params.go b/api/http/utils/filters/query_params.go new file mode 100644 index 000000000..7dec80365 --- /dev/null +++ b/api/http/utils/filters/query_params.go @@ -0,0 +1,38 @@ +package filters + +import ( + "net/http" + + "github.com/portainer/portainer/pkg/libhttp/request" +) + +type QueryParams struct { + SearchQueryParams + SortQueryParams + PaginationQueryParams +} + +func ExtractListModifiersQueryParams(r *http.Request) QueryParams { + // search + search, _ := request.RetrieveQueryParameter(r, "search", true) + // sorting + sortField, _ := request.RetrieveQueryParameter(r, "sort", true) + sortOrder, _ := request.RetrieveQueryParameter(r, "order", true) + // pagination + start, _ := request.RetrieveNumericQueryParameter(r, "start", true) + limit, _ := request.RetrieveNumericQueryParameter(r, "limit", true) + + return QueryParams{ + SearchQueryParams{ + search: search, + }, + SortQueryParams{ + sort: sortField, + order: SortOrder(sortOrder), + }, + PaginationQueryParams{ + start: start, + limit: limit, + }, + } +} diff --git a/api/http/utils/filters/query_params_test.go b/api/http/utils/filters/query_params_test.go new file mode 100644 index 000000000..5865e11d8 --- /dev/null +++ b/api/http/utils/filters/query_params_test.go @@ -0,0 +1,300 @@ +package filters + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestExtractListModifiersQueryParams(t *testing.T) { + tests := []struct { + name string + queryParams map[string]string + expectedResult QueryParams + description string + }{ + { + name: "all parameters provided", + queryParams: map[string]string{ + "search": "test query", + "sort": "name", + "order": "asc", + "start": "10", + "limit": "25", + }, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "test query", + }, + SortQueryParams: SortQueryParams{ + sort: "name", + order: SortAsc, + }, + PaginationQueryParams: PaginationQueryParams{ + start: 10, + limit: 25, + }, + }, + description: "Should correctly parse all query parameters when provided", + }, + { + name: "descending sort order", + queryParams: map[string]string{ + "search": "another test", + "sort": "date", + "order": "desc", + "start": "0", + "limit": "50", + }, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "another test", + }, + SortQueryParams: SortQueryParams{ + sort: "date", + order: SortDesc, + }, + PaginationQueryParams: PaginationQueryParams{ + start: 0, + limit: 50, + }, + }, + description: "Should correctly handle descending sort order", + }, + { + name: "no parameters provided", + queryParams: map[string]string{}, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "", + }, + SortQueryParams: SortQueryParams{ + sort: "", + order: SortOrder(""), + }, + PaginationQueryParams: PaginationQueryParams{ + start: 0, + limit: 0, + }, + }, + description: "Should return zero values when no parameters are provided", + }, + { + name: "partial parameters - search only", + queryParams: map[string]string{ + "search": "partial test", + }, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "partial test", + }, + SortQueryParams: SortQueryParams{ + sort: "", + order: SortOrder(""), + }, + PaginationQueryParams: PaginationQueryParams{ + start: 0, + limit: 0, + }, + }, + description: "Should handle partial parameters correctly", + }, + { + name: "partial parameters - pagination only", + queryParams: map[string]string{ + "start": "5", + "limit": "15", + }, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "", + }, + SortQueryParams: SortQueryParams{ + sort: "", + order: SortOrder(""), + }, + PaginationQueryParams: PaginationQueryParams{ + start: 5, + limit: 15, + }, + }, + description: "Should handle pagination parameters when other params are missing", + }, + { + name: "invalid sort order", + queryParams: map[string]string{ + "search": "test", + "sort": "name", + "order": "invalid", + "start": "0", + "limit": "10", + }, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "test", + }, + SortQueryParams: SortQueryParams{ + sort: "name", + order: SortOrder("invalid"), + }, + PaginationQueryParams: PaginationQueryParams{ + start: 0, + limit: 10, + }, + }, + description: "Should accept invalid sort order as SortOrder type", + }, + { + name: "empty string values", + queryParams: map[string]string{ + "search": "", + "sort": "", + "order": "", + "start": "0", + "limit": "0", + }, + expectedResult: QueryParams{ + SearchQueryParams: SearchQueryParams{ + search: "", + }, + SortQueryParams: SortQueryParams{ + sort: "", + order: SortOrder(""), + }, + PaginationQueryParams: PaginationQueryParams{ + start: 0, + limit: 0, + }, + }, + description: "Should handle empty string values correctly", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create HTTP request with query parameters + req := createRequestWithParams(tt.queryParams) + + // Execute the function + result := ExtractListModifiersQueryParams(req) + + // Assertions + require.Equal(t, tt.expectedResult.SearchQueryParams.search, result.SearchQueryParams.search, + "Search parameter should match expected value") + require.Equal(t, tt.expectedResult.SortQueryParams.sort, result.SortQueryParams.sort, + "Sort parameter should match expected value") + require.Equal(t, tt.expectedResult.SortQueryParams.order, result.SortQueryParams.order, + "Order parameter should match expected value") + require.Equal(t, tt.expectedResult.PaginationQueryParams.start, result.PaginationQueryParams.start, + "Start parameter should match expected value") + require.Equal(t, tt.expectedResult.PaginationQueryParams.limit, result.PaginationQueryParams.limit, + "Limit parameter should match expected value") + + // Verify the complete struct + require.Equal(t, tt.expectedResult, result, tt.description) + }) + } +} + +func TestSortOrderConstants(t *testing.T) { + t.Run("sort order constants", func(t *testing.T) { + require.Equal(t, SortOrder("asc"), SortAsc, "SortAsc constant should equal 'asc'") + require.Equal(t, SortOrder("desc"), SortDesc, "SortDesc constant should equal 'desc'") + }) +} + +func TestQueryParamsStructEmbedding(t *testing.T) { + t.Run("struct embedding", func(t *testing.T) { + qp := QueryParams{ + SearchQueryParams: SearchQueryParams{search: "test"}, + SortQueryParams: SortQueryParams{sort: "name", order: SortAsc}, + PaginationQueryParams: PaginationQueryParams{start: 10, limit: 20}, + } + + // Test that embedded fields are accessible + require.Equal(t, "test", qp.search, "Embedded search field should be accessible") + require.Equal(t, "name", qp.sort, "Embedded sort field should be accessible") + require.Equal(t, SortAsc, qp.order, "Embedded order field should be accessible") + require.Equal(t, 10, qp.start, "Embedded start field should be accessible") + require.Equal(t, 20, qp.limit, "Embedded limit field should be accessible") + }) +} + +func TestExtractListModifiersQueryParamsEdgeCases(t *testing.T) { + t.Run("special characters in search", func(t *testing.T) { + req := createRequestWithParams(map[string]string{ + "search": "test & special chars %20", + }) + + result := ExtractListModifiersQueryParams(req) + require.Equal(t, "test & special chars %20", result.search, + "Should handle special characters in search parameter") + }) + + t.Run("unicode characters", func(t *testing.T) { + req := createRequestWithParams(map[string]string{ + "search": "test 测试 🔍", + "sort": "título", + }) + + result := ExtractListModifiersQueryParams(req) + require.Equal(t, "test 测试 🔍", result.search, "Should handle unicode in search") + require.Equal(t, "título", result.sort, "Should handle unicode in sort field") + }) + + t.Run("very long values", func(t *testing.T) { + longSearch := "a very long search query that contains many words and goes on for quite some time to test handling of long strings" + req := createRequestWithParams(map[string]string{ + "search": longSearch, + }) + + result := ExtractListModifiersQueryParams(req) + require.Equal(t, longSearch, result.search, "Should handle long search strings") + }) +} + +// Helper function to create HTTP request with query parameters +func createRequestWithParams(params map[string]string) *http.Request { + // Create URL with query parameters + u := &url.URL{ + Scheme: "https", + Host: "example.com", + Path: "/test", + } + + // Add query parameters + q := u.Query() + for key, value := range params { + q.Set(key, value) + } + u.RawQuery = q.Encode() + + // Create request + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// Benchmark tests +func BenchmarkExtractListModifiersQueryParams(b *testing.B) { + req := createRequestWithParams(map[string]string{ + "search": "benchmark test", + "sort": "name", + "order": "asc", + "start": "10", + "limit": "25", + }) + + for b.Loop() { + ExtractListModifiersQueryParams(req) + } +} + +func BenchmarkExtractListModifiersQueryParamsEmpty(b *testing.B) { + req := createRequestWithParams(map[string]string{}) + + for b.Loop() { + ExtractListModifiersQueryParams(req) + } +} diff --git a/api/http/utils/filters/search.go b/api/http/utils/filters/search.go new file mode 100644 index 000000000..b88422da1 --- /dev/null +++ b/api/http/utils/filters/search.go @@ -0,0 +1,36 @@ +package filters + +import ( + "strings" +) + +// Return any error to skip the field (for when matching an unknown state on an enum) +// +// Note: returning ("", nil) will match! +type SearchAccessor[T any] = func(T) (string, error) + +type SearchQueryParams struct { + search string +} + +func searchFn[T any](items []T, params SearchQueryParams, accessors []SearchAccessor[T]) []T { + search := strings.TrimSpace(params.search) + + if search == "" { + return items + } + + results := []T{} + + for iIdx := range items { + for aIdx := range accessors { + value, err := accessors[aIdx](items[iIdx]) + if err == nil && strings.Contains(strings.ToLower(value), strings.ToLower(search)) { + results = append(results, items[iIdx]) + break + } + } + } + + return results +} diff --git a/api/http/utils/filters/search_test.go b/api/http/utils/filters/search_test.go new file mode 100644 index 000000000..d8f552563 --- /dev/null +++ b/api/http/utils/filters/search_test.go @@ -0,0 +1,283 @@ +package filters + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSearchFn_BasicSearch(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + {ID: 3, Name: "Charlie Brown", Email: "charlie@test.org", Age: 35}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: "Alice"} + + result := searchFn(users, params, accessors) + + require.Len(t, result, 1) + require.Equal(t, "Alice Smith", result[0].Name) +} + +func TestSearchFn_EmptySearch(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: ""} + + result := searchFn(users, params, accessors) + + // Should return all items when search is empty + require.Equal(t, users, result) +} + +func TestSearchFn_NoMatches(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: "nonexistent"} + + result := searchFn(users, params, accessors) + + require.Empty(t, result) +} + +func TestSearchFn_MultipleMatches(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Smith", Email: "bob@company.com", Age: 30}, + {ID: 3, Name: "Charlie Brown", Email: "charlie@smith.org", Age: 35}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: "Smith"} + + result := searchFn(users, params, accessors) + + require.Len(t, result, 3) + require.Equal(t, "Alice Smith", result[0].Name) + require.Equal(t, "Bob Smith", result[1].Name) + require.Equal(t, "Charlie Brown", result[2].Name) // Matches via email +} + +func TestSearchFn_MultipleAccessors(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + {ID: 3, Name: "Charlie Brown", Email: "charlie@test.org", Age: 35}, + } + + // Search across name, email, and ID + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor, userIDAccessor} + + // Search by ID + params := SearchQueryParams{search: "2"} + result := searchFn(users, params, accessors) + require.Len(t, result, 1) + require.Equal(t, 2, result[0].ID) + + // Search by email domain + params = SearchQueryParams{search: "company.com"} + result = searchFn(users, params, accessors) + require.Len(t, result, 1) + require.Equal(t, "Bob Johnson", result[0].Name) +} + +func TestSearchFn_CaseSensitive(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + + // Case sensitive search - should not match + params := SearchQueryParams{search: "alice"} + result := searchFn(users, params, accessors) + require.Len(t, result, 1) // Matches email which is lowercase + + // Exact case match + params = SearchQueryParams{search: "Alice"} + result = searchFn(users, params, accessors) + require.Len(t, result, 1) + require.Equal(t, "Alice Smith", result[0].Name) +} + +func TestSearchFn_PartialMatches(t *testing.T) { + products := []Product{ + {ID: 1, Name: "Wireless Mouse", Description: "Ergonomic wireless mouse", Price: 25, Category: "Electronics"}, + {ID: 2, Name: "Mechanical Keyboard", Description: "RGB gaming keyboard", Price: 150, Category: "Electronics"}, + {ID: 3, Name: "Coffee Mug", Description: "Ceramic coffee mug", Price: 15, Category: "Kitchen"}, + } + + accessors := []SearchAccessor[Product]{productNameAccessor, productDescriptionAccessor} + + // Partial word match + params := SearchQueryParams{search: "wire"} + result := searchFn(products, params, accessors) + require.Len(t, result, 1) + require.Equal(t, "Wireless Mouse", result[0].Name) + + // Match in description + params = SearchQueryParams{search: "RGB"} + result = searchFn(products, params, accessors) + require.Len(t, result, 1) + require.Equal(t, "Mechanical Keyboard", result[0].Name) +} + +func TestSearchFn_EmptySlice(t *testing.T) { + users := []User{} + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: "anything"} + + result := searchFn(users, params, accessors) + + require.Empty(t, result) +} + +func TestSearchFn_EmptyAccessors(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + } + + accessors := []SearchAccessor[User]{} // No accessors + params := SearchQueryParams{search: "Alice"} + + result := searchFn(users, params, accessors) + + // Should return empty since no accessors to search through + require.Empty(t, result) +} + +func TestSearchFn_SingleAccessor(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + } + + // Only search by name + accessors := []SearchAccessor[User]{userNameAccessor} + params := SearchQueryParams{search: "company.com"} + + result := searchFn(users, params, accessors) + + // Should not match since we're only searching names, not emails + require.Empty(t, result) +} + +func TestSearchFn_NumericSearch(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + {ID: 3, Name: "Charlie Brown", Email: "charlie@test.org", Age: 35}, + } + + // Search by age (converted to string) + accessors := []SearchAccessor[User]{userAgeAccessor} + params := SearchQueryParams{search: "30"} + + result := searchFn(users, params, accessors) + + require.Len(t, result, 1) + require.Equal(t, 30, result[0].Age) +} + +func TestSearchFn_FormattedAccessor(t *testing.T) { + products := []Product{ + {ID: 1, Name: "Mouse", Description: "Wireless mouse", Price: 25, Category: "Electronics"}, + {ID: 2, Name: "Keyboard", Description: "Gaming keyboard", Price: 150, Category: "Electronics"}, + } + + // Search by formatted price (e.g., "$25") + accessors := []SearchAccessor[Product]{productPriceAccessor} + params := SearchQueryParams{search: "$25"} + + result := searchFn(products, params, accessors) + + require.Len(t, result, 1) + require.Equal(t, "Mouse", result[0].Name) +} + +func TestSearchFn_FirstMatchOnly(t *testing.T) { + users := []User{ + {ID: 1, Name: "test user", Email: "test@example.com", Age: 25}, + } + + // Both accessors would match the search term + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: "test"} + + result := searchFn(users, params, accessors) + + // Should only include the item once, even though multiple accessors match + require.Len(t, result, 1) + require.Equal(t, "test user", result[0].Name) +} + +func TestSearchFn_PreservesOrder(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Test", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@test.com", Age: 30}, + {ID: 3, Name: "Charlie Test", Email: "charlie@example.com", Age: 35}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + params := SearchQueryParams{search: "Test"} + + result := searchFn(users, params, accessors) + + require.Len(t, result, 3) + // Should preserve original order + require.Equal(t, 1, result[0].ID) + require.Equal(t, 2, result[1].ID) + require.Equal(t, 3, result[2].ID) +} + +func TestSearchFn_ComplexSearch(t *testing.T) { + products := []Product{ + {ID: 1, Name: "Gaming Mouse", Description: "High-DPI gaming mouse", Price: 75, Category: "Gaming"}, + {ID: 2, Name: "Office Mouse", Description: "Ergonomic office mouse", Price: 25, Category: "Office"}, + {ID: 3, Name: "Gaming Keyboard", Description: "Mechanical gaming keyboard", Price: 150, Category: "Gaming"}, + {ID: 4, Name: "Wireless Headset", Description: "Gaming headset with mic", Price: 100, Category: "Gaming"}, + } + + // Search across multiple fields + accessors := []SearchAccessor[Product]{ + productNameAccessor, + productDescriptionAccessor, + productCategoryAccessor, + } + + params := SearchQueryParams{search: "Gaming"} + + result := searchFn(products, params, accessors) + + require.Len(t, result, 3) + require.Equal(t, "Gaming Mouse", result[0].Name) + require.Equal(t, "Gaming Keyboard", result[1].Name) + require.Equal(t, "Wireless Headset", result[2].Name) +} + +func TestSearchFn_WhitespaceSearch(t *testing.T) { + users := []User{ + {ID: 1, Name: "Alice Smith", Email: "alice@example.com", Age: 25}, + {ID: 2, Name: "Bob Johnson", Email: "bob@company.com", Age: 30}, + } + + accessors := []SearchAccessor[User]{userNameAccessor, userEmailAccessor} + + // Search with just whitespace should be treated as empty + params := SearchQueryParams{search: " "} + result := searchFn(users, params, accessors) + + require.Len(t, result, 2) +} diff --git a/api/http/utils/filters/sort.go b/api/http/utils/filters/sort.go new file mode 100644 index 000000000..f7d937b4e --- /dev/null +++ b/api/http/utils/filters/sort.go @@ -0,0 +1,40 @@ +package filters + +import "slices" + +type SortOrder string + +const ( + SortAsc SortOrder = "asc" + SortDesc SortOrder = "desc" +) + +type SortQueryParams struct { + sort string + order SortOrder +} + +type SortOption[T any] func(a, b T) int +type SortBinding[T any] struct { + Key string + Fn SortOption[T] +} + +func sortFn[T any](items []T, params SortQueryParams, sorts []SortBinding[T]) []T { + for _, sort := range sorts { + if sort.Key == params.sort { + fn := sort.Fn + if params.order == SortDesc { + fn = reverSortFn(fn) + } + slices.SortStableFunc(items, fn) + } + } + return items +} + +func reverSortFn[T any](fn SortOption[T]) SortOption[T] { + return func(a, b T) int { + return -1 * fn(a, b) + } +} diff --git a/api/http/utils/filters/sort_test.go b/api/http/utils/filters/sort_test.go new file mode 100644 index 000000000..44f4c5d72 --- /dev/null +++ b/api/http/utils/filters/sort_test.go @@ -0,0 +1,287 @@ +package filters + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// Helper sort functions +func compareUserByName(a, b User) int { + return strings.Compare(a.Name, b.Name) +} + +func compareUserByAge(a, b User) int { + return a.Age - b.Age +} + +func compareProductByName(a, b Product) int { + return strings.Compare(a.Name, b.Name) +} + +func compareProductByPrice(a, b Product) int { + return a.Price - b.Price +} + +func TestSortFn_BasicAscending(t *testing.T) { + users := []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + } + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + {Key: "age", Fn: compareUserByAge}, + } + + params := SortQueryParams{sort: "name", order: SortAsc} + result := sortFn(users, params, sorts) + + require.Equal(t, []User{ + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + {Name: "Charlie", Age: 25}, + }, result) +} + +func TestSortFn_BasicDescending(t *testing.T) { + users := []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + } + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + {Key: "age", Fn: compareUserByAge}, + } + + params := SortQueryParams{sort: "name", order: SortDesc} + result := sortFn(users, params, sorts) + + require.Equal(t, []User{ + {Name: "Charlie", Age: 25}, + {Name: "Bob", Age: 20}, + {Name: "Alice", Age: 30}, + }, result) +} + +func TestSortFn_SortByAge(t *testing.T) { + users := []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + } + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + {Key: "age", Fn: compareUserByAge}, + } + + // Test ascending by age + params := SortQueryParams{sort: "age", order: SortAsc} + result := sortFn(users, params, sorts) + + require.Equal(t, []User{ + {Name: "Bob", Age: 20}, + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + }, result) + + // Test descending by age + params = SortQueryParams{sort: "age", order: SortDesc} + result = sortFn(users, params, sorts) + + require.Equal(t, []User{ + {Name: "Alice", Age: 30}, + {Name: "Charlie", Age: 25}, + {Name: "Bob", Age: 20}, + }, result) +} + +func TestSortFn_UnknownSortKey(t *testing.T) { + users := []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + } + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + } + + params := SortQueryParams{sort: "unknown", order: SortAsc} + result := sortFn(users, params, sorts) + + // Should return original slice unchanged + require.Equal(t, []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + }, result) +} + +func TestSortFn_EmptySlice(t *testing.T) { + users := []User{} + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + } + + params := SortQueryParams{sort: "name", order: SortAsc} + result := sortFn(users, params, sorts) + + require.Empty(t, result) +} + +func TestSortFn_SingleElement(t *testing.T) { + users := []User{{Name: "Alice", Age: 30}} + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + } + + params := SortQueryParams{sort: "name", order: SortAsc} + result := sortFn(users, params, sorts) + + require.Equal(t, []User{{Name: "Alice", Age: 30}}, result) +} + +func TestSortFn_EmptySortBindings(t *testing.T) { + users := []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + } + + sorts := []SortBinding[User]{} // Empty sorts + + params := SortQueryParams{sort: "name", order: SortAsc} + result := sortFn(users, params, sorts) + + // Should return original slice unchanged + require.Equal(t, []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + }, result) +} + +func TestSortFn_DifferentType(t *testing.T) { + products := []Product{ + {Name: "Laptop", Price: 1000}, + {Name: "Mouse", Price: 25}, + {Name: "Keyboard", Price: 100}, + } + + sorts := []SortBinding[Product]{ + {Key: "name", Fn: compareProductByName}, + {Key: "price", Fn: compareProductByPrice}, + } + + // Test by price ascending + params := SortQueryParams{sort: "price", order: SortAsc} + result := sortFn(products, params, sorts) + + require.Equal(t, []Product{ + {Name: "Mouse", Price: 25}, + {Name: "Keyboard", Price: 100}, + {Name: "Laptop", Price: 1000}, + }, result) + + // Test by name descending + params = SortQueryParams{sort: "name", order: SortDesc} + result = sortFn(products, params, sorts) + + require.Equal(t, []Product{ + {Name: "Mouse", Price: 25}, + {Name: "Laptop", Price: 1000}, + {Name: "Keyboard", Price: 100}, + }, result) +} + +func TestSortFn_StableSort(t *testing.T) { + // Test that sorting is stable (maintains relative order of equal elements) + users := []User{ + {Name: "Alice", Age: 25}, + {Name: "Bob", Age: 25}, + {Name: "Charlie", Age: 25}, + {Name: "David", Age: 30}, + } + + sorts := []SortBinding[User]{ + {Key: "age", Fn: compareUserByAge}, + } + + params := SortQueryParams{sort: "age", order: SortAsc} + result := sortFn(users, params, sorts) + + // All users with age 25 should maintain their original relative order + require.Equal(t, []User{ + {Name: "Alice", Age: 25}, + {Name: "Bob", Age: 25}, + {Name: "Charlie", Age: 25}, + {Name: "David", Age: 30}, + }, result) +} + +func TestReverseSortFn(t *testing.T) { + originalFn := compareUserByAge + reversedFn := reverSortFn(originalFn) + + userA := User{Name: "Alice", Age: 20} + userB := User{Name: "Bob", Age: 30} + + // Original function: A < B (returns negative) + require.Less(t, originalFn(userA, userB), 0) + + // Reversed function: A > B (returns positive) + require.Greater(t, reversedFn(userA, userB), 0) + + // Test symmetry + require.Equal(t, -originalFn(userA, userB), reversedFn(userA, userB)) + require.Equal(t, -originalFn(userB, userA), reversedFn(userB, userA)) +} + +func TestSortFn_CaseSensitive(t *testing.T) { + users := []User{ + {Name: "alice", Age: 25}, + {Name: "Bob", Age: 30}, + {Name: "Charlie", Age: 20}, + } + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + } + + params := SortQueryParams{sort: "name", order: SortAsc} + result := sortFn(users, params, sorts) + + // strings.Compare is case-sensitive, uppercase comes before lowercase + require.Equal(t, []User{ + {Name: "Bob", Age: 30}, + {Name: "Charlie", Age: 20}, + {Name: "alice", Age: 25}, + }, result) +} + +func TestSortFn_ModifiesOriginalSlice(t *testing.T) { + users := []User{ + {Name: "Charlie", Age: 25}, + {Name: "Alice", Age: 30}, + {Name: "Bob", Age: 20}, + } + original := make([]User, len(users)) + copy(original, users) + + sorts := []SortBinding[User]{ + {Key: "name", Fn: compareUserByName}, + } + + params := SortQueryParams{sort: "name", order: SortAsc} + result := sortFn(users, params, sorts) + + // The function modifies the original slice + require.Equal(t, result, users) + require.NotEqual(t, original, users) +} diff --git a/api/http/utils/filters/types_test.go b/api/http/utils/filters/types_test.go new file mode 100644 index 000000000..2dc9b9ac9 --- /dev/null +++ b/api/http/utils/filters/types_test.go @@ -0,0 +1,63 @@ +package filters + +import ( + "errors" + "fmt" + "strconv" +) + +// Test data structures +type User struct { + ID int + Name string + Email string + Age int +} + +type Product struct { + ID int + Name string + Description string + Price int + Category string +} + +// User accessors +func userIDAccessor(u User) (string, error) { + return strconv.Itoa(u.ID), nil +} + +func userNameAccessor(u User) (string, error) { + return u.Name, nil +} + +func userEmailAccessor(u User) (string, error) { + return u.Email, nil +} + +func userAgeAccessor(u User) (string, error) { + return strconv.Itoa(u.Age), nil +} + +// Product accessors + +func productNameAccessor(p Product) (string, error) { + return p.Name, nil +} + +func productDescriptionAccessor(p Product) (string, error) { + return p.Description, nil +} + +func productPriceAccessor(p Product) (string, error) { + return fmt.Sprintf("$%d", p.Price), nil +} + +func productCategoryAccessor(p Product) (string, error) { + return p.Category, nil +} + +// Other accessors +func errorAccessor[T any](t T) (string, error) { + return "", errors.New("accessor error") +} diff --git a/api/internal/edge/edgegroup.go b/api/internal/edge/edgegroup.go index 255f5f55f..b9a1b899d 100644 --- a/api/internal/edge/edgegroup.go +++ b/api/internal/edge/edgegroup.go @@ -4,13 +4,27 @@ import ( portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/internal/endpointutils" + "github.com/portainer/portainer/api/roar" "github.com/portainer/portainer/api/tag" ) // EdgeGroupRelatedEndpoints returns a list of environments(endpoints) related to this Edge group func EdgeGroupRelatedEndpoints(edgeGroup *portainer.EdgeGroup, endpoints []portainer.Endpoint, endpointGroups []portainer.EndpointGroup) []portainer.EndpointID { if !edgeGroup.Dynamic { - return edgeGroup.Endpoints + var r roar.Roar[portainer.EndpointID] + + for _, endpoint := range endpoints { + if edgeGroup.EndpointIDs.Contains(endpoint.ID) { + r.Add(endpoint.ID) + } + } + + return r.ToSlice() + } + + endpointGroupsMap := map[portainer.EndpointGroupID]*portainer.EndpointGroup{} + for i, group := range endpointGroups { + endpointGroupsMap[group.ID] = &endpointGroups[i] } endpointIDs := []portainer.EndpointID{} @@ -19,15 +33,8 @@ func EdgeGroupRelatedEndpoints(edgeGroup *portainer.EdgeGroup, endpoints []porta continue } - var endpointGroup portainer.EndpointGroup - for _, group := range endpointGroups { - if endpoint.GroupID == group.ID { - endpointGroup = group - break - } - } - - if edgeGroupRelatedToEndpoint(edgeGroup, &endpoint, &endpointGroup) { + endpointGroup := endpointGroupsMap[endpoint.GroupID] + if edgeGroupRelatedToEndpoint(edgeGroup, &endpoint, endpointGroup) { endpointIDs = append(endpointIDs, endpoint.ID) } } @@ -72,17 +79,11 @@ func GetEndpointsFromEdgeGroups(edgeGroupIDs []portainer.EdgeGroupID, datastore // edgeGroupRelatedToEndpoint returns true if edgeGroup is associated with environment(endpoint) func edgeGroupRelatedToEndpoint(edgeGroup *portainer.EdgeGroup, endpoint *portainer.Endpoint, endpointGroup *portainer.EndpointGroup) bool { if !edgeGroup.Dynamic { - for _, endpointID := range edgeGroup.Endpoints { - if endpoint.ID == endpointID { - return true - } - } - - return false + return edgeGroup.EndpointIDs.Contains(endpoint.ID) } endpointTags := tag.Set(endpoint.TagIDs) - if endpointGroup.TagIDs != nil { + if endpointGroup != nil && endpointGroup.TagIDs != nil { endpointTags = tag.Union(endpointTags, tag.Set(endpointGroup.TagIDs)) } diff --git a/api/internal/edge/edgegroup_benchmark_test.go b/api/internal/edge/edgegroup_benchmark_test.go new file mode 100644 index 000000000..861db09fc --- /dev/null +++ b/api/internal/edge/edgegroup_benchmark_test.go @@ -0,0 +1,104 @@ +package edge + +import ( + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/roar" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +const n = 1_000_000 + +func BenchmarkWriteEdgeGroupOld(b *testing.B) { + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + _, store := datastore.MustNewTestStore(b, false, false) + + var endpointIDs []portainer.EndpointID + + for i := range n { + endpointIDs = append(endpointIDs, portainer.EndpointID(i+1)) + } + + for b.Loop() { + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + Name: "Test Edge Group", + Endpoints: endpointIDs, + }) + require.NoError(b, err) + } +} + +func BenchmarkWriteEdgeGroupNew(b *testing.B) { + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + _, store := datastore.MustNewTestStore(b, false, false) + + var ts []portainer.EndpointID + + for i := range n { + ts = append(ts, portainer.EndpointID(i+1)) + } + + endpointIDs := roar.FromSlice(ts) + + for b.Loop() { + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + Name: "Test Edge Group", + EndpointIDs: endpointIDs, + }) + require.NoError(b, err) + } +} + +func BenchmarkReadEdgeGroupOld(b *testing.B) { + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + _, store := datastore.MustNewTestStore(b, false, false) + + var endpointIDs []portainer.EndpointID + + for i := range n { + endpointIDs = append(endpointIDs, portainer.EndpointID(i+1)) + } + + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + Name: "Test Edge Group", + Endpoints: endpointIDs, + }) + require.NoError(b, err) + + for b.Loop() { + _, err := store.EdgeGroup().ReadAll() + require.NoError(b, err) + } +} + +func BenchmarkReadEdgeGroupNew(b *testing.B) { + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + _, store := datastore.MustNewTestStore(b, false, false) + + var ts []portainer.EndpointID + + for i := range n { + ts = append(ts, portainer.EndpointID(i+1)) + } + + endpointIDs := roar.FromSlice(ts) + + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + Name: "Test Edge Group", + EndpointIDs: endpointIDs, + }) + require.NoError(b, err) + + for b.Loop() { + _, err := store.EdgeGroup().ReadAll() + require.NoError(b, err) + } +} diff --git a/api/internal/edge/edgestacks/service.go b/api/internal/edge/edgestacks/service.go index 4fc6943af..c0ecb5caf 100644 --- a/api/internal/edge/edgestacks/service.go +++ b/api/internal/edge/edgestacks/service.go @@ -49,7 +49,6 @@ func (service *Service) BuildEdgeStack( DeploymentType: deploymentType, CreationDate: time.Now().Unix(), EdgeGroups: edgeGroups, - Status: make(map[portainer.EndpointID]portainer.EdgeStackStatus, 0), Version: 1, UseManifestNamespaces: useManifestNamespaces, }, nil @@ -99,12 +98,23 @@ func (service *Service) PersistEdgeStack( stack.ManifestPath = manifestPath stack.ProjectPath = projectPath stack.EntryPoint = composePath - stack.NumDeployments = len(relatedEndpointIds) if err := tx.EdgeStack().Create(stack.ID, stack); err != nil { return nil, err } + for _, endpointID := range relatedEndpointIds { + status := &portainer.EdgeStackStatusForEnv{EndpointID: endpointID} + + if err := tx.EdgeStackStatus().Create(stack.ID, endpointID, status); err != nil { + return nil, err + } + } + + if err := tx.EndpointRelation().AddEndpointRelationsForEdgeStack(relatedEndpointIds, stack.ID); err != nil { + return nil, fmt.Errorf("unable to add endpoint relations: %w", err) + } + if err := service.updateEndpointRelations(tx, stack.ID, relatedEndpointIds); err != nil { return nil, fmt.Errorf("unable to update endpoint relations: %w", err) } @@ -144,22 +154,17 @@ func (service *Service) DeleteEdgeStack(tx dataservices.DataStoreTx, edgeStackID return errors.WithMessage(err, "Unable to retrieve edge stack related environments from database") } - for _, endpointID := range relatedEndpointIds { - relation, err := tx.EndpointRelation().EndpointRelation(endpointID) - if err != nil { - return errors.WithMessage(err, "Unable to find environment relation in database") - } - - delete(relation.EdgeStacks, edgeStackID) - - if err := tx.EndpointRelation().UpdateEndpointRelation(endpointID, relation); err != nil { - return errors.WithMessage(err, "Unable to persist environment relation in database") - } + if err := tx.EndpointRelation().RemoveEndpointRelationsForEdgeStack(relatedEndpointIds, edgeStackID); err != nil { + return errors.WithMessage(err, "unable to remove environment relation in database") } if err := tx.EdgeStack().DeleteEdgeStack(edgeStackID); err != nil { return errors.WithMessage(err, "Unable to remove the edge stack from the database") } + if err := tx.EdgeStackStatus().DeleteAll(edgeStackID); err != nil { + return errors.WithMessage(err, "unable to remove edge stack statuses from the database") + } + return nil } diff --git a/api/internal/edge/edgestacks/status.go b/api/internal/edge/edgestacks/status.go deleted file mode 100644 index 25629d958..000000000 --- a/api/internal/edge/edgestacks/status.go +++ /dev/null @@ -1,26 +0,0 @@ -package edgestacks - -import ( - portainer "github.com/portainer/portainer/api" -) - -// NewStatus returns a new status object for an Edge stack -func NewStatus(oldStatus map[portainer.EndpointID]portainer.EdgeStackStatus, relatedEnvironmentIDs []portainer.EndpointID) map[portainer.EndpointID]portainer.EdgeStackStatus { - status := map[portainer.EndpointID]portainer.EdgeStackStatus{} - - for _, environmentID := range relatedEnvironmentIDs { - newEnvStatus := portainer.EdgeStackStatus{ - Status: []portainer.EdgeStackDeploymentStatus{}, - EndpointID: environmentID, - } - - oldEnvStatus, ok := oldStatus[environmentID] - if ok { - newEnvStatus.DeploymentInfo = oldEnvStatus.DeploymentInfo - } - - status[environmentID] = newEnvStatus - } - - return status -} diff --git a/api/internal/edge/endpoint.go b/api/internal/edge/endpoint.go index 27cc50b3b..7901b0e88 100644 --- a/api/internal/edge/endpoint.go +++ b/api/internal/edge/endpoint.go @@ -1,12 +1,9 @@ package edge import ( - "slices" - portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" - - "github.com/rs/zerolog/log" + "github.com/portainer/portainer/api/internal/endpointutils" ) // EndpointRelatedEdgeStacks returns a list of Edge stacks related to this Environment(Endpoint) @@ -47,27 +44,22 @@ func EffectiveCheckinInterval(tx dataservices.DataStoreTx, endpoint *portainer.E // EndpointInEdgeGroup returns true and the edge group name if the endpoint is in the edge group func EndpointInEdgeGroup( tx dataservices.DataStoreTx, - endpointID portainer.EndpointID, + endpoint *portainer.Endpoint, edgeGroupID portainer.EdgeGroupID, + endpointGroups []portainer.EndpointGroup, ) (bool, string, error) { - endpointIDs, err := GetEndpointsFromEdgeGroups( - []portainer.EdgeGroupID{edgeGroupID}, tx, - ) + if !endpointutils.IsEdgeEndpoint(endpoint) || !endpoint.UserTrusted { + return false, "", nil + } + + edgeGroup, err := tx.EdgeGroup().Read(edgeGroupID) if err != nil { return false, "", err } - if slices.Contains(endpointIDs, endpointID) { - edgeGroup, err := tx.EdgeGroup().Read(edgeGroupID) - if err != nil { - log.Warn(). - Err(err). - Int("edgeGroupID", int(edgeGroupID)). - Msg("Unable to retrieve edge group") - - return false, "", err - } + r := EdgeGroupRelatedEndpoints(edgeGroup, []portainer.Endpoint{*endpoint}, endpointGroups) + if len(r) > 0 { return true, edgeGroup.Name, nil } diff --git a/api/internal/edge/endpoint_test.go b/api/internal/edge/endpoint_test.go new file mode 100644 index 000000000..241408515 --- /dev/null +++ b/api/internal/edge/endpoint_test.go @@ -0,0 +1,82 @@ +package edge + +import ( + "testing" + + portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/datastore" + "github.com/portainer/portainer/api/roar" + + "github.com/stretchr/testify/require" +) + +func TestEndpointInEdgeGroup(t *testing.T) { + _, store := datastore.MustNewTestStore(t, true, false) + + endpointGroups := []portainer.EndpointGroup{{ID: 1, Name: "test-group"}} + + endpoint := &portainer.Endpoint{ + ID: 1, + Name: "test-endpoint", + Type: portainer.EdgeAgentOnDockerEnvironment, + UserTrusted: true, + GroupID: endpointGroups[0].ID, + } + edgeGroupID := portainer.EdgeGroupID(1) + + untrustedEndpoint := &portainer.Endpoint{ + ID: 2, + Name: "untrusted-endpoint", + Type: portainer.EdgeAgentOnDockerEnvironment, + UserTrusted: false, + GroupID: endpointGroups[0].ID, + } + + nonEdgeEndpoint := &portainer.Endpoint{ + ID: 2, + Name: "untrusted-endpoint", + Type: portainer.AgentOnDockerEnvironment, + UserTrusted: true, + GroupID: endpointGroups[0].ID, + } + + err := store.EdgeGroup().Create(&portainer.EdgeGroup{ + ID: edgeGroupID, + Name: "test-edge-group", + Dynamic: false, + EndpointIDs: roar.FromSlice([]portainer.EndpointID{endpoint.ID, untrustedEndpoint.ID}), + }) + require.NoError(t, err) + + // Related endpoint in a static edge group + + inEdgeGroup, _, err := EndpointInEdgeGroup(store, endpoint, edgeGroupID, endpointGroups) + require.NoError(t, err) + require.True(t, inEdgeGroup) + + // Unrelated endpoint in a static edge group + + unrelatedEndpoint := &portainer.Endpoint{ + ID: 3, + Name: "unrelated-endpoint", + Type: portainer.EdgeAgentOnDockerEnvironment, + UserTrusted: true, + GroupID: 0, + } + + inEdgeGroup, _, err = EndpointInEdgeGroup(store, unrelatedEndpoint, edgeGroupID, endpointGroups) + require.NoError(t, err) + require.False(t, inEdgeGroup) + + // Untrusted endpoint + + inEdgeGroup, _, err = EndpointInEdgeGroup(store, untrustedEndpoint, edgeGroupID, endpointGroups) + require.NoError(t, err) + require.False(t, inEdgeGroup) + + // Non-edge endpoint + + inEdgeGroup, _, err = EndpointInEdgeGroup(store, nonEdgeEndpoint, edgeGroupID, endpointGroups) + require.NoError(t, err) + require.False(t, inEdgeGroup) +} diff --git a/api/internal/endpointutils/endpoint_setup.go b/api/internal/endpointutils/endpoint_setup.go index 3242d8875..0f6a7ca7f 100644 --- a/api/internal/endpointutils/endpoint_setup.go +++ b/api/internal/endpointutils/endpoint_setup.go @@ -5,7 +5,6 @@ import ( "strings" portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/crypto" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/http/client" @@ -108,12 +107,7 @@ func createTLSSecuredEndpoint(flags *portainer.CLIFlags, dataStore dataservices. } if strings.HasPrefix(endpoint.URL, "tcp://") { - tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(tlsConfiguration.TLSCACertPath, tlsConfiguration.TLSCertPath, tlsConfiguration.TLSKeyPath, tlsConfiguration.TLSSkipVerify) - if err != nil { - return err - } - - agentOnDockerEnvironment, err := client.ExecutePingOperation(endpoint.URL, tlsConfig) + agentOnDockerEnvironment, err := client.ExecutePingOperation(endpoint.URL, tlsConfiguration) if err != nil { return err } @@ -136,7 +130,7 @@ func createTLSSecuredEndpoint(flags *portainer.CLIFlags, dataStore dataservices. func createUnsecuredEndpoint(endpointURL string, dataStore dataservices.DataStore, snapshotService portainer.SnapshotService) error { if strings.HasPrefix(endpointURL, "tcp://") { - if _, err := client.ExecutePingOperation(endpointURL, nil); err != nil { + if _, err := client.ExecutePingOperation(endpointURL, portainer.TLSConfiguration{}); err != nil { return err } } diff --git a/api/internal/endpointutils/endpoint_setup_test.go b/api/internal/endpointutils/endpoint_setup_test.go new file mode 100644 index 000000000..64be9e254 --- /dev/null +++ b/api/internal/endpointutils/endpoint_setup_test.go @@ -0,0 +1,12 @@ +package endpointutils + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateOfflineUnsecuredEndpoint(t *testing.T) { + err := createUnsecuredEndpoint("tcp://localhost:1", nil, nil) + require.Error(t, err) +} diff --git a/api/internal/endpointutils/endpointutils.go b/api/internal/endpointutils/endpointutils.go index 6b7eb1c2d..f596ae0d5 100644 --- a/api/internal/endpointutils/endpointutils.go +++ b/api/internal/endpointutils/endpointutils.go @@ -249,3 +249,19 @@ func getEndpointCheckinInterval(endpoint *portainer.Endpoint, settings *portaine return defaultInterval } + +func InitializeEdgeEndpointRelation(endpoint *portainer.Endpoint, tx dataservices.DataStoreTx) error { + if !IsEdgeEndpoint(endpoint) { + return nil + } + + relation := &portainer.EndpointRelation{ + EndpointID: endpoint.ID, + EdgeStacks: make(map[portainer.EdgeStackID]bool), + } + + if err := tx.EndpointRelation().Create(relation); err != nil { + return err + } + return nil +} diff --git a/api/internal/registryutils/access/access.go b/api/internal/registryutils/access/access.go index 0d14cba39..bfa5181c0 100644 --- a/api/internal/registryutils/access/access.go +++ b/api/internal/registryutils/access/access.go @@ -2,40 +2,82 @@ package access import ( "errors" + "fmt" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/http/security" + "github.com/portainer/portainer/api/internal/endpointutils" + "github.com/portainer/portainer/api/kubernetes" + "github.com/portainer/portainer/api/kubernetes/cli" ) func hasPermission( dataStore dataservices.DataStore, + k8sClientFactory *cli.ClientFactory, userID portainer.UserID, endpointID portainer.EndpointID, registry *portainer.Registry, ) (hasPermission bool, err error) { user, err := dataStore.User().Read(userID) if err != nil { - return + return false, err } if user.Role == portainer.AdministratorRole { - return true, err + return true, nil + } + + endpoint, err := dataStore.Endpoint().Endpoint(endpointID) + if err != nil { + return false, err } teamMemberships, err := dataStore.TeamMembership().TeamMembershipsByUserID(userID) if err != nil { - return + return false, err } + // validate access for kubernetes namespaces (leverage registry.RegistryAccesses[endpointId].Namespaces) + if endpointutils.IsKubernetesEndpoint(endpoint) && k8sClientFactory != nil { + kcl, err := k8sClientFactory.GetPrivilegedKubeClient(endpoint) + if err != nil { + return false, fmt.Errorf("unable to retrieve kubernetes client to validate registry access: %w", err) + } + accessPolicies, err := kcl.GetNamespaceAccessPolicies() + if err != nil { + return false, fmt.Errorf("unable to retrieve environment's namespaces policies to validate registry access: %w", err) + } + + authorizedNamespaces := registry.RegistryAccesses[endpointID].Namespaces + + for _, namespace := range authorizedNamespaces { + // when the default namespace is authorized to use a registry, all users have the ability to use it + // unless the default namespace is restricted: in this case continue to search for other potential accesses authorizations + if namespace == kubernetes.DefaultNamespace && !endpoint.Kubernetes.Configuration.RestrictDefaultNamespace { + return true, nil + } + + namespacePolicy := accessPolicies[namespace] + if security.AuthorizedAccess(user.ID, teamMemberships, namespacePolicy.UserAccessPolicies, namespacePolicy.TeamAccessPolicies) { + return true, nil + } + } + return false, nil + } + + // validate access for docker environments + // leverage registry.RegistryAccesses[endpointId].UserAccessPolicies (direct access) + // and registry.RegistryAccesses[endpointId].TeamAccessPolicies (indirect access via his teams) hasPermission = security.AuthorizedRegistryAccess(registry, user, teamMemberships, endpointID) - return + return hasPermission, nil } // GetAccessibleRegistry get the registry if the user has permission func GetAccessibleRegistry( dataStore dataservices.DataStore, + k8sClientFactory *cli.ClientFactory, userID portainer.UserID, endpointID portainer.EndpointID, registryID portainer.RegistryID, @@ -46,7 +88,7 @@ func GetAccessibleRegistry( return } - hasPermission, err := hasPermission(dataStore, userID, endpointID, registry) + hasPermission, err := hasPermission(dataStore, k8sClientFactory, userID, endpointID, registry) if err != nil { return } diff --git a/api/internal/registryutils/ecr_reg_token.go b/api/internal/registryutils/ecr_reg_token.go index cbcceb982..6e9a754bf 100644 --- a/api/internal/registryutils/ecr_reg_token.go +++ b/api/internal/registryutils/ecr_reg_token.go @@ -62,3 +62,26 @@ func GetRegEffectiveCredential(registry *portainer.Registry) (username, password return } + +// PrepareRegistryCredentials consolidates the common pattern of ensuring valid ECR token +// and setting effective credentials on the registry when authentication is enabled. +// This function modifies the registry in-place by setting Username and Password to the effective values. +func PrepareRegistryCredentials(tx dataservices.DataStoreTx, registry *portainer.Registry) error { + if !registry.Authentication { + return nil + } + + if err := EnsureRegTokenValid(tx, registry); err != nil { + return err + } + + username, password, err := GetRegEffectiveCredential(registry) + if err != nil { + return err + } + + registry.Username = username + registry.Password = password + + return nil +} diff --git a/api/internal/snapshot/snapshot.go b/api/internal/snapshot/snapshot.go index 019dec359..205a82216 100644 --- a/api/internal/snapshot/snapshot.go +++ b/api/internal/snapshot/snapshot.go @@ -2,7 +2,6 @@ package snapshot import ( "context" - "crypto/tls" "errors" "time" @@ -138,14 +137,9 @@ func SupportDirectSnapshot(endpoint *portainer.Endpoint) bool { // If the snapshot is a success, it will be associated to the environment(endpoint). func (service *Service) SnapshotEndpoint(endpoint *portainer.Endpoint) error { if endpoint.Type == portainer.AgentOnDockerEnvironment || endpoint.Type == portainer.AgentOnKubernetesEnvironment { - var err error - var tlsConfig *tls.Config - - if endpoint.TLSConfig.TLS { - tlsConfig, err = crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) - if err != nil { - return err - } + tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) + if err != nil { + return err } _, version, err := agent.GetAgentVersionAndPlatform(endpoint.URL, tlsConfig) @@ -170,8 +164,8 @@ func (service *Service) Create(snapshot portainer.Snapshot) error { return service.dataStore.Snapshot().Create(&snapshot) } -func (service *Service) FillSnapshotData(endpoint *portainer.Endpoint) error { - return FillSnapshotData(service.dataStore, endpoint) +func (service *Service) FillSnapshotData(endpoint *portainer.Endpoint, includeRaw bool) error { + return FillSnapshotData(service.dataStore, endpoint, includeRaw) } func (service *Service) snapshotKubernetesEndpoint(endpoint *portainer.Endpoint) error { @@ -328,8 +322,16 @@ func FetchDockerID(snapshot portainer.DockerSnapshot) (string, error) { return info.Swarm.Cluster.ID, nil } -func FillSnapshotData(tx dataservices.DataStoreTx, endpoint *portainer.Endpoint) error { - snapshot, err := tx.Snapshot().Read(endpoint.ID) +func FillSnapshotData(tx dataservices.DataStoreTx, endpoint *portainer.Endpoint, includeRaw bool) error { + var snapshot *portainer.Snapshot + var err error + + if includeRaw { + snapshot, err = tx.Snapshot().Read(endpoint.ID) + } else { + snapshot, err = tx.Snapshot().ReadWithoutSnapshotRaw(endpoint.ID) + } + if tx.IsErrObjectNotFound(err) { endpoint.Snapshots = []portainer.DockerSnapshot{} endpoint.Kubernetes.Snapshots = []portainer.KubernetesSnapshot{} diff --git a/api/internal/testhelpers/datastore.go b/api/internal/testhelpers/datastore.go index f0bba23fd..19254f540 100644 --- a/api/internal/testhelpers/datastore.go +++ b/api/internal/testhelpers/datastore.go @@ -7,13 +7,17 @@ import ( "github.com/portainer/portainer/api/database" "github.com/portainer/portainer/api/dataservices" "github.com/portainer/portainer/api/dataservices/errors" + "github.com/portainer/portainer/api/slicesx" ) +var _ dataservices.DataStore = &testDatastore{} + type testDatastore struct { customTemplate dataservices.CustomTemplateService edgeGroup dataservices.EdgeGroupService edgeJob dataservices.EdgeJobService edgeStack dataservices.EdgeStackService + edgeStackStatus dataservices.EdgeStackStatusService endpoint dataservices.EndpointService endpointGroup dataservices.EndpointGroupService endpointRelation dataservices.EndpointRelationService @@ -51,8 +55,11 @@ func (d *testDatastore) CustomTemplate() dataservices.CustomTemplateService { re func (d *testDatastore) EdgeGroup() dataservices.EdgeGroupService { return d.edgeGroup } func (d *testDatastore) EdgeJob() dataservices.EdgeJobService { return d.edgeJob } func (d *testDatastore) EdgeStack() dataservices.EdgeStackService { return d.edgeStack } -func (d *testDatastore) Endpoint() dataservices.EndpointService { return d.endpoint } -func (d *testDatastore) EndpointGroup() dataservices.EndpointGroupService { return d.endpointGroup } +func (d *testDatastore) EdgeStackStatus() dataservices.EdgeStackStatusService { + return d.edgeStackStatus +} +func (d *testDatastore) Endpoint() dataservices.EndpointService { return d.endpoint } +func (d *testDatastore) EndpointGroup() dataservices.EndpointGroupService { return d.endpointGroup } func (d *testDatastore) EndpointRelation() dataservices.EndpointRelationService { return d.endpointRelation @@ -108,9 +115,11 @@ type datastoreOption = func(d *testDatastore) func NewDatastore(options ...datastoreOption) *testDatastore { conn, _ := database.NewDatabase("boltdb", "", nil) d := testDatastore{connection: conn} + for _, o := range options { o(&d) } + return &d } @@ -126,6 +135,7 @@ func (s *stubSettingsService) Settings() (*portainer.Settings, error) { func (s *stubSettingsService) UpdateSettings(settings *portainer.Settings) error { s.settings = settings + return nil } @@ -138,19 +148,25 @@ func WithSettingsService(settings *portainer.Settings) datastoreOption { } type stubUserService struct { + dataservices.UserService + users []portainer.User } -func (s *stubUserService) BucketName() string { return "users" } -func (s *stubUserService) Read(ID portainer.UserID) (*portainer.User, error) { return nil, nil } -func (s *stubUserService) UserByUsername(username string) (*portainer.User, error) { return nil, nil } -func (s *stubUserService) ReadAll() ([]portainer.User, error) { return s.users, nil } +func (s *stubUserService) BucketName() string { return "users" } +func (s *stubUserService) ReadAll(predicates ...func(portainer.User) bool) ([]portainer.User, error) { + filtered := s.users + + for _, p := range predicates { + filtered = slicesx.Filter(filtered, p) + } + + return filtered, nil +} + func (s *stubUserService) UsersByRole(role portainer.UserRole) ([]portainer.User, error) { return s.users, nil } -func (s *stubUserService) Create(user *portainer.User) error { return nil } -func (s *stubUserService) Update(ID portainer.UserID, user *portainer.User) error { return nil } -func (s *stubUserService) Delete(ID portainer.UserID) error { return nil } // WithUsers testDatastore option that will instruct testDatastore to return provided users func WithUsers(us []portainer.User) datastoreOption { @@ -160,33 +176,22 @@ func WithUsers(us []portainer.User) datastoreOption { } type stubEdgeJobService struct { + dataservices.EdgeJobService + jobs []portainer.EdgeJob } -func (s *stubEdgeJobService) BucketName() string { return "edgejobs" } -func (s *stubEdgeJobService) ReadAll() ([]portainer.EdgeJob, error) { return s.jobs, nil } -func (s *stubEdgeJobService) Read(ID portainer.EdgeJobID) (*portainer.EdgeJob, error) { - return nil, nil -} +func (s *stubEdgeJobService) BucketName() string { return "edgejobs" } +func (s *stubEdgeJobService) ReadAll(predicates ...func(portainer.EdgeJob) bool) ([]portainer.EdgeJob, error) { + filtered := s.jobs -func (s *stubEdgeJobService) Create(edgeJob *portainer.EdgeJob) error { - return nil -} + for _, p := range predicates { + filtered = slicesx.Filter(filtered, p) + } -func (s *stubEdgeJobService) CreateWithID(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error { - return nil + return filtered, nil } -func (s *stubEdgeJobService) Update(ID portainer.EdgeJobID, edgeJob *portainer.EdgeJob) error { - return nil -} - -func (s *stubEdgeJobService) UpdateEdgeJobFunc(ID portainer.EdgeJobID, updateFunc func(edgeJob *portainer.EdgeJob)) error { - return nil -} -func (s *stubEdgeJobService) Delete(ID portainer.EdgeJobID) error { return nil } -func (s *stubEdgeJobService) GetNextIdentifier() int { return 0 } - // WithEdgeJobs option will instruct testDatastore to return provided jobs func WithEdgeJobs(js []portainer.EdgeJob) datastoreOption { return func(d *testDatastore) { @@ -195,6 +200,8 @@ func WithEdgeJobs(js []portainer.EdgeJob) datastoreOption { } type stubEndpointRelationService struct { + dataservices.EndpointRelationService + relations []portainer.EndpointRelation } @@ -213,10 +220,6 @@ func (s *stubEndpointRelationService) EndpointRelation(ID portainer.EndpointID) return nil, errors.ErrObjectNotFound } -func (s *stubEndpointRelationService) Create(EndpointRelation *portainer.EndpointRelation) error { - return nil -} - func (s *stubEndpointRelationService) UpdateEndpointRelation(ID portainer.EndpointID, relation *portainer.EndpointRelation) error { for i, r := range s.relations { if r.EndpointID == ID { @@ -227,10 +230,29 @@ func (s *stubEndpointRelationService) UpdateEndpointRelation(ID portainer.Endpoi return nil } -func (s *stubEndpointRelationService) DeleteEndpointRelation(ID portainer.EndpointID) error { +func (s *stubEndpointRelationService) AddEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error { + for _, endpointID := range endpointIDs { + for i, r := range s.relations { + if r.EndpointID == endpointID { + s.relations[i].EdgeStacks[edgeStackID] = true + } + } + } + + return nil +} + +func (s *stubEndpointRelationService) RemoveEndpointRelationsForEdgeStack(endpointIDs []portainer.EndpointID, edgeStackID portainer.EdgeStackID) error { + for _, endpointID := range endpointIDs { + for i, r := range s.relations { + if r.EndpointID == endpointID { + delete(s.relations[i].EdgeStacks, edgeStackID) + } + } + } + return nil } -func (s *stubEndpointRelationService) GetNextIdentifier() int { return 0 } // WithEndpointRelations option will instruct testDatastore to return provided jobs func WithEndpointRelations(relations []portainer.EndpointRelation) datastoreOption { @@ -330,6 +352,7 @@ func (s *stubEndpointService) EndpointsByTeamID(teamID portainer.TeamID) ([]port } } } + return endpoints, nil } @@ -341,34 +364,30 @@ func WithEndpoints(endpoints []portainer.Endpoint) datastoreOption { } type stubStacksService struct { + dataservices.StackService stacks []portainer.Stack } func (s *stubStacksService) BucketName() string { return "stacks" } -func (s *stubStacksService) Create(stack *portainer.Stack) error { - return nil -} - -func (s *stubStacksService) Update(ID portainer.StackID, stack *portainer.Stack) error { - return nil -} - -func (s *stubStacksService) Delete(ID portainer.StackID) error { - return nil -} - func (s *stubStacksService) Read(ID portainer.StackID) (*portainer.Stack, error) { for _, stack := range s.stacks { if stack.ID == ID { return &stack, nil } } + return nil, errors.ErrObjectNotFound } -func (s *stubStacksService) ReadAll() ([]portainer.Stack, error) { - return s.stacks, nil +func (s *stubStacksService) ReadAll(predicates ...func(portainer.Stack) bool) ([]portainer.Stack, error) { + filtered := s.stacks + + for _, p := range predicates { + filtered = slicesx.Filter(filtered, p) + } + + return filtered, nil } func (s *stubStacksService) StacksByEndpointID(endpointID portainer.EndpointID) ([]portainer.Stack, error) { @@ -379,6 +398,7 @@ func (s *stubStacksService) StacksByEndpointID(endpointID portainer.EndpointID) result = append(result, stack) } } + return result, nil } @@ -390,6 +410,7 @@ func (s *stubStacksService) RefreshableStacks() ([]portainer.Stack, error) { result = append(result, stack) } } + return result, nil } @@ -399,6 +420,7 @@ func (s *stubStacksService) StackByName(name string) (*portainer.Stack, error) { return &stack, nil } } + return nil, errors.ErrObjectNotFound } @@ -410,6 +432,7 @@ func (s *stubStacksService) StacksByName(name string) ([]portainer.Stack, error) result = append(result, stack) } } + return result, nil } @@ -419,6 +442,7 @@ func (s *stubStacksService) StackByWebhookID(webhookID string) (*portainer.Stack return &stack, nil } } + return nil, errors.ErrObjectNotFound } @@ -426,6 +450,10 @@ func (s *stubStacksService) GetNextIdentifier() int { return len(s.stacks) } +func (s *stubStacksService) Exists(ID portainer.StackID) (bool, error) { + return false, nil +} + // WithStacks option will instruct testDatastore to return provided stacks func WithStacks(stacks []portainer.Stack) datastoreOption { return func(d *testDatastore) { diff --git a/api/internal/testhelpers/git_service.go b/api/internal/testhelpers/git_service.go index 6af1b6459..6b1a352ee 100644 --- a/api/internal/testhelpers/git_service.go +++ b/api/internal/testhelpers/git_service.go @@ -1,6 +1,9 @@ package testhelpers -import portainer "github.com/portainer/portainer/api" +import ( + portainer "github.com/portainer/portainer/api" + gittypes "github.com/portainer/portainer/api/git/types" +) type gitService struct { cloneErr error @@ -15,18 +18,50 @@ func NewGitService(cloneErr error, id string) portainer.GitService { } } -func (g *gitService) CloneRepository(destination, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error { +func (g *gitService) CloneRepository( + destination, + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) error { return g.cloneErr } -func (g *gitService) LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) { +func (g *gitService) LatestCommitID( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, +) (string, error) { return g.id, nil } -func (g *gitService) ListRefs(repositoryURL, username, password string, hardRefresh bool, tlsSkipVerify bool) ([]string, error) { +func (g *gitService) ListRefs( + repositoryURL, + username, + password string, + authType gittypes.GitCredentialAuthType, + hardRefresh bool, + tlsSkipVerify bool, +) ([]string, error) { return nil, nil } -func (g *gitService) ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includedExts []string, tlsSkipVerify bool) ([]string, error) { +func (g *gitService) ListFiles( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + dirOnly, + hardRefresh bool, + includedExts []string, + tlsSkipVerify bool, +) ([]string, error) { return nil, nil } diff --git a/api/internal/testhelpers/kube_client.go b/api/internal/testhelpers/kube_client.go new file mode 100644 index 000000000..550e7ce92 --- /dev/null +++ b/api/internal/testhelpers/kube_client.go @@ -0,0 +1,19 @@ +package testhelpers + +import ( + portainer "github.com/portainer/portainer/api" + models "github.com/portainer/portainer/api/http/models/kubernetes" +) + +type testKubeClient struct { + portainer.KubeClient +} + +func NewKubernetesClient() portainer.KubeClient { + return &testKubeClient{} +} + +// Event +func (kcl *testKubeClient) GetEvents(namespace string, resourceId string) ([]models.K8sEvent, error) { + return nil, nil +} diff --git a/api/internal/testhelpers/request_bouncer.go b/api/internal/testhelpers/request_bouncer.go index b89154549..0586dffef 100644 --- a/api/internal/testhelpers/request_bouncer.go +++ b/api/internal/testhelpers/request_bouncer.go @@ -60,6 +60,8 @@ func (testRequestBouncer) JWTAuthLookup(r *http.Request) (*portainer.TokenData, func (testRequestBouncer) RevokeJWT(jti string) {} +func (testRequestBouncer) DisableCSP() {} + // AddTestSecurityCookie adds a security cookie to the request func AddTestSecurityCookie(r *http.Request, jwt string) { r.AddCookie(&http.Cookie{ diff --git a/api/kubernetes/cli/access.go b/api/kubernetes/cli/access.go index 73f8d50af..6f254c296 100644 --- a/api/kubernetes/cli/access.go +++ b/api/kubernetes/cli/access.go @@ -143,3 +143,23 @@ func (kcl *KubeClient) GetNonAdminNamespaces(userID int, teamIDs []int, isRestri return nonAdminNamespaces, nil } + +// GetIsKubeAdmin retrieves true if client is admin +func (client *KubeClient) GetIsKubeAdmin() bool { + return client.IsKubeAdmin +} + +// UpdateIsKubeAdmin sets whether the kube client is admin +func (client *KubeClient) SetIsKubeAdmin(isKubeAdmin bool) { + client.IsKubeAdmin = isKubeAdmin +} + +// GetClientNonAdminNamespaces retrieves non-admin namespaces +func (client *KubeClient) GetClientNonAdminNamespaces() []string { + return client.NonAdminNamespaces +} + +// UpdateClientNonAdminNamespaces sets the client non admin namespace list +func (client *KubeClient) SetClientNonAdminNamespaces(nonAdminNamespaces []string) { + client.NonAdminNamespaces = nonAdminNamespaces +} diff --git a/api/kubernetes/cli/applications.go b/api/kubernetes/cli/applications.go index e68137c69..8dd4d72b2 100644 --- a/api/kubernetes/cli/applications.go +++ b/api/kubernetes/cli/applications.go @@ -12,45 +12,58 @@ import ( labels "k8s.io/apimachinery/pkg/labels" ) +// PortainerApplicationResources contains collections of various Kubernetes resources +// associated with a Portainer application. +type PortainerApplicationResources struct { + Pods []corev1.Pod + ReplicaSets []appsv1.ReplicaSet + Deployments []appsv1.Deployment + StatefulSets []appsv1.StatefulSet + DaemonSets []appsv1.DaemonSet + Services []corev1.Service + HorizontalPodAutoscalers []autoscalingv2.HorizontalPodAutoscaler +} + // GetAllKubernetesApplications gets a list of kubernetes workloads (or applications) across all namespaces in the cluster // if the user is an admin, all namespaces in the current k8s environment(endpoint) are fetched using the fetchApplications function. // otherwise, namespaces the non-admin user has access to will be used to filter the applications based on the allowed namespaces. -func (kcl *KubeClient) GetApplications(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) { +func (kcl *KubeClient) GetApplications(namespace, nodeName string) ([]models.K8sApplication, error) { if kcl.IsKubeAdmin { - return kcl.fetchApplications(namespace, nodeName, withDependencies) + return kcl.fetchApplications(namespace, nodeName) } - return kcl.fetchApplicationsForNonAdmin(namespace, nodeName, withDependencies) + return kcl.fetchApplicationsForNonAdmin(namespace, nodeName) } // fetchApplications fetches the applications in the namespaces the user has access to. // This function is called when the user is an admin. -func (kcl *KubeClient) fetchApplications(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) { +func (kcl *KubeClient) fetchApplications(namespace, nodeName string) ([]models.K8sApplication, error) { podListOptions := metav1.ListOptions{} if nodeName != "" { podListOptions.FieldSelector = "spec.nodeName=" + nodeName } - if !withDependencies { - // TODO: make sure not to fetch services in fetchAllApplicationsListResources from this call - pods, replicaSets, deployments, statefulSets, daemonSets, _, _, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions) - if err != nil { - return nil, err - } - return kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, nil, nil) - } - - pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions) + portainerApplicationResources, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions) if err != nil { return nil, err } - return kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas) + applications, err := kcl.convertPodsToApplications(portainerApplicationResources) + if err != nil { + return nil, err + } + + unhealthyApplications, err := fetchUnhealthyApplications(portainerApplicationResources) + if err != nil { + return nil, err + } + + return append(applications, unhealthyApplications...), nil } // fetchApplicationsForNonAdmin fetches the applications in the namespaces the user has access to. // This function is called when the user is not an admin. -func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string, withDependencies bool) ([]models.K8sApplication, error) { +func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string) ([]models.K8sApplication, error) { log.Debug().Msgf("Fetching applications for non-admin user: %v", kcl.NonAdminNamespaces) if len(kcl.NonAdminNamespaces) == 0 { @@ -62,28 +75,24 @@ func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string, podListOptions.FieldSelector = "spec.nodeName=" + nodeName } - if !withDependencies { - pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets(namespace, podListOptions) - if err != nil { - return nil, err - } - - return kcl.convertPodsToApplications(pods, replicaSets, nil, nil, nil, nil, nil) - } - - pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions) + portainerApplicationResources, err := kcl.fetchAllApplicationsListResources(namespace, podListOptions) if err != nil { return nil, err } - applications, err := kcl.convertPodsToApplications(pods, replicaSets, deployments, statefulSets, daemonSets, services, hpas) + applications, err := kcl.convertPodsToApplications(portainerApplicationResources) + if err != nil { + return nil, err + } + + unhealthyApplications, err := fetchUnhealthyApplications(portainerApplicationResources) if err != nil { return nil, err } nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap() results := make([]models.K8sApplication, 0) - for _, application := range applications { + for _, application := range append(applications, unhealthyApplications...) { if _, ok := nonAdminNamespaceSet[application.ResourcePool]; ok { results = append(results, application) } @@ -93,11 +102,11 @@ func (kcl *KubeClient) fetchApplicationsForNonAdmin(namespace, nodeName string, } // convertPodsToApplications processes pods and converts them to applications, ensuring uniqueness by owner reference. -func (kcl *KubeClient) convertPodsToApplications(pods []corev1.Pod, replicaSets []appsv1.ReplicaSet, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, hpas []autoscalingv2.HorizontalPodAutoscaler) ([]models.K8sApplication, error) { +func (kcl *KubeClient) convertPodsToApplications(portainerApplicationResources PortainerApplicationResources) ([]models.K8sApplication, error) { applications := []models.K8sApplication{} processedOwners := make(map[string]struct{}) - for _, pod := range pods { + for _, pod := range portainerApplicationResources.Pods { if len(pod.OwnerReferences) > 0 { ownerUID := string(pod.OwnerReferences[0].UID) if _, exists := processedOwners[ownerUID]; exists { @@ -106,7 +115,7 @@ func (kcl *KubeClient) convertPodsToApplications(pods []corev1.Pod, replicaSets processedOwners[ownerUID] = struct{}{} } - application, err := kcl.ConvertPodToApplication(pod, replicaSets, deployments, statefulSets, daemonSets, services, hpas, true) + application, err := kcl.ConvertPodToApplication(pod, portainerApplicationResources, true) if err != nil { return nil, err } @@ -144,49 +153,13 @@ func (kcl *KubeClient) GetApplicationsResource(namespace, node string) (models.K return resource, nil } -// GetApplicationsFromConfigMap gets a list of applications that use a specific ConfigMap -// by checking all pods in the same namespace as the ConfigMap -func (kcl *KubeClient) GetApplicationNamesFromConfigMap(configMap models.K8sConfigMap, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]string, error) { - applications := []string{} - for _, pod := range pods { - if pod.Namespace == configMap.Namespace { - if isPodUsingConfigMap(&pod, configMap.Name) { - application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false) - if err != nil { - return nil, err - } - applications = append(applications, application.Name) - } - } - } - - return applications, nil -} - -func (kcl *KubeClient) GetApplicationNamesFromSecret(secret models.K8sSecret, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]string, error) { - applications := []string{} - for _, pod := range pods { - if pod.Namespace == secret.Namespace { - if isPodUsingSecret(&pod, secret.Name) { - application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false) - if err != nil { - return nil, err - } - applications = append(applications, application.Name) - } - } - } - - return applications, nil -} - // ConvertPodToApplication converts a pod to an application, updating owner references if necessary -func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, replicaSets []appsv1.ReplicaSet, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, hpas []autoscalingv2.HorizontalPodAutoscaler, withResource bool) (*models.K8sApplication, error) { +func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, portainerApplicationResources PortainerApplicationResources, withResource bool) (*models.K8sApplication, error) { if isReplicaSetOwner(pod) { - updateOwnerReferenceToDeployment(&pod, replicaSets) + updateOwnerReferenceToDeployment(&pod, portainerApplicationResources.ReplicaSets) } - application := createApplication(&pod, deployments, statefulSets, daemonSets, services, hpas) + application := createApplicationFromPod(&pod, portainerApplicationResources) if application.ID == "" && application.Name == "" { return nil, nil } @@ -203,9 +176,9 @@ func (kcl *KubeClient) ConvertPodToApplication(pod corev1.Pod, replicaSets []app return &application, nil } -// createApplication creates a K8sApplication object from a pod +// createApplicationFromPod creates a K8sApplication object from a pod // it sets the application name, namespace, kind, image, stack id, stack name, and labels -func createApplication(pod *corev1.Pod, deployments []appsv1.Deployment, statefulSets []appsv1.StatefulSet, daemonSets []appsv1.DaemonSet, services []corev1.Service, hpas []autoscalingv2.HorizontalPodAutoscaler) models.K8sApplication { +func createApplicationFromPod(pod *corev1.Pod, portainerApplicationResources PortainerApplicationResources) models.K8sApplication { kind := "Pod" name := pod.Name @@ -221,120 +194,172 @@ func createApplication(pod *corev1.Pod, deployments []appsv1.Deployment, statefu switch kind { case "Deployment": - for _, deployment := range deployments { + for _, deployment := range portainerApplicationResources.Deployments { if deployment.Name == name && deployment.Namespace == pod.Namespace { - application.ApplicationType = "Deployment" - application.Kind = "Deployment" - application.ID = string(deployment.UID) - application.ResourcePool = deployment.Namespace - application.Name = name - application.Image = deployment.Spec.Template.Spec.Containers[0].Image - application.ApplicationOwner = deployment.Labels["io.portainer.kubernetes.application.owner"] - application.StackID = deployment.Labels["io.portainer.kubernetes.application.stackid"] - application.StackName = deployment.Labels["io.portainer.kubernetes.application.stack"] - application.Labels = deployment.Labels - application.MatchLabels = deployment.Spec.Selector.MatchLabels - application.CreationDate = deployment.CreationTimestamp.Time - application.TotalPodsCount = int(deployment.Status.Replicas) - application.RunningPodsCount = int(deployment.Status.ReadyReplicas) - application.DeploymentType = "Replicated" - application.Metadata = &models.Metadata{ - Labels: deployment.Labels, - } - + populateApplicationFromDeployment(&application, deployment) break } } - case "StatefulSet": - for _, statefulSet := range statefulSets { + for _, statefulSet := range portainerApplicationResources.StatefulSets { if statefulSet.Name == name && statefulSet.Namespace == pod.Namespace { - application.Kind = "StatefulSet" - application.ApplicationType = "StatefulSet" - application.ID = string(statefulSet.UID) - application.ResourcePool = statefulSet.Namespace - application.Name = name - application.Image = statefulSet.Spec.Template.Spec.Containers[0].Image - application.ApplicationOwner = statefulSet.Labels["io.portainer.kubernetes.application.owner"] - application.StackID = statefulSet.Labels["io.portainer.kubernetes.application.stackid"] - application.StackName = statefulSet.Labels["io.portainer.kubernetes.application.stack"] - application.Labels = statefulSet.Labels - application.MatchLabels = statefulSet.Spec.Selector.MatchLabels - application.CreationDate = statefulSet.CreationTimestamp.Time - application.TotalPodsCount = int(statefulSet.Status.Replicas) - application.RunningPodsCount = int(statefulSet.Status.ReadyReplicas) - application.DeploymentType = "Replicated" - application.Metadata = &models.Metadata{ - Labels: statefulSet.Labels, - } - + populateApplicationFromStatefulSet(&application, statefulSet) break } } - case "DaemonSet": - for _, daemonSet := range daemonSets { + for _, daemonSet := range portainerApplicationResources.DaemonSets { if daemonSet.Name == name && daemonSet.Namespace == pod.Namespace { - application.Kind = "DaemonSet" - application.ApplicationType = "DaemonSet" - application.ID = string(daemonSet.UID) - application.ResourcePool = daemonSet.Namespace - application.Name = name - application.Image = daemonSet.Spec.Template.Spec.Containers[0].Image - application.ApplicationOwner = daemonSet.Labels["io.portainer.kubernetes.application.owner"] - application.StackID = daemonSet.Labels["io.portainer.kubernetes.application.stackid"] - application.StackName = daemonSet.Labels["io.portainer.kubernetes.application.stack"] - application.Labels = daemonSet.Labels - application.MatchLabels = daemonSet.Spec.Selector.MatchLabels - application.CreationDate = daemonSet.CreationTimestamp.Time - application.TotalPodsCount = int(daemonSet.Status.DesiredNumberScheduled) - application.RunningPodsCount = int(daemonSet.Status.NumberReady) - application.DeploymentType = "Global" - application.Metadata = &models.Metadata{ - Labels: daemonSet.Labels, - } - + populateApplicationFromDaemonSet(&application, daemonSet) break } } - case "Pod": - runningPodsCount := 1 - if pod.Status.Phase != corev1.PodRunning { - runningPodsCount = 0 - } - - application.ApplicationType = "Pod" - application.Kind = "Pod" - application.ID = string(pod.UID) - application.ResourcePool = pod.Namespace - application.Name = pod.Name - application.Image = pod.Spec.Containers[0].Image - application.ApplicationOwner = pod.Labels["io.portainer.kubernetes.application.owner"] - application.StackID = pod.Labels["io.portainer.kubernetes.application.stackid"] - application.StackName = pod.Labels["io.portainer.kubernetes.application.stack"] - application.Labels = pod.Labels - application.MatchLabels = pod.Labels - application.CreationDate = pod.CreationTimestamp.Time - application.TotalPodsCount = 1 - application.RunningPodsCount = runningPodsCount - application.DeploymentType = string(pod.Status.Phase) - application.Metadata = &models.Metadata{ - Labels: pod.Labels, - } + populateApplicationFromPod(&application, *pod) } - if application.ID != "" && application.Name != "" && len(services) > 0 { - updateApplicationWithService(&application, services) + if application.ID != "" && application.Name != "" && len(portainerApplicationResources.Services) > 0 { + updateApplicationWithService(&application, portainerApplicationResources.Services) } - if application.ID != "" && application.Name != "" && len(hpas) > 0 { - updateApplicationWithHorizontalPodAutoscaler(&application, hpas) + if application.ID != "" && application.Name != "" && len(portainerApplicationResources.HorizontalPodAutoscalers) > 0 { + updateApplicationWithHorizontalPodAutoscaler(&application, portainerApplicationResources.HorizontalPodAutoscalers) } return application } +// createApplicationFromDeployment creates a K8sApplication from a Deployment +func createApplicationFromDeployment(deployment appsv1.Deployment) models.K8sApplication { + var app models.K8sApplication + populateApplicationFromDeployment(&app, deployment) + return app +} + +// createApplicationFromStatefulSet creates a K8sApplication from a StatefulSet +func createApplicationFromStatefulSet(statefulSet appsv1.StatefulSet) models.K8sApplication { + var app models.K8sApplication + populateApplicationFromStatefulSet(&app, statefulSet) + return app +} + +// createApplicationFromDaemonSet creates a K8sApplication from a DaemonSet +func createApplicationFromDaemonSet(daemonSet appsv1.DaemonSet) models.K8sApplication { + var app models.K8sApplication + populateApplicationFromDaemonSet(&app, daemonSet) + return app +} + +func populateApplicationFromDeployment(application *models.K8sApplication, deployment appsv1.Deployment) { + application.ApplicationType = "Deployment" + application.Kind = "Deployment" + application.ID = string(deployment.UID) + application.ResourcePool = deployment.Namespace + application.Name = deployment.Name + application.ApplicationOwner = deployment.Labels["io.portainer.kubernetes.application.owner"] + application.StackID = deployment.Labels["io.portainer.kubernetes.application.stackid"] + application.StackName = deployment.Labels["io.portainer.kubernetes.application.stack"] + application.Labels = deployment.Labels + application.MatchLabels = deployment.Spec.Selector.MatchLabels + application.CreationDate = deployment.CreationTimestamp.Time + application.TotalPodsCount = 0 + if deployment.Spec.Replicas != nil { + application.TotalPodsCount = int(*deployment.Spec.Replicas) + } + application.RunningPodsCount = int(deployment.Status.ReadyReplicas) + application.DeploymentType = "Replicated" + application.Metadata = &models.Metadata{ + Labels: deployment.Labels, + } + + // If the deployment has containers, use the first container's image + if len(deployment.Spec.Template.Spec.Containers) > 0 { + application.Image = deployment.Spec.Template.Spec.Containers[0].Image + } +} + +func populateApplicationFromStatefulSet(application *models.K8sApplication, statefulSet appsv1.StatefulSet) { + application.Kind = "StatefulSet" + application.ApplicationType = "StatefulSet" + application.ID = string(statefulSet.UID) + application.ResourcePool = statefulSet.Namespace + application.Name = statefulSet.Name + application.ApplicationOwner = statefulSet.Labels["io.portainer.kubernetes.application.owner"] + application.StackID = statefulSet.Labels["io.portainer.kubernetes.application.stackid"] + application.StackName = statefulSet.Labels["io.portainer.kubernetes.application.stack"] + application.Labels = statefulSet.Labels + application.MatchLabels = statefulSet.Spec.Selector.MatchLabels + application.CreationDate = statefulSet.CreationTimestamp.Time + application.TotalPodsCount = 0 + if statefulSet.Spec.Replicas != nil { + application.TotalPodsCount = int(*statefulSet.Spec.Replicas) + } + application.RunningPodsCount = int(statefulSet.Status.ReadyReplicas) + application.DeploymentType = "Replicated" + application.Metadata = &models.Metadata{ + Labels: statefulSet.Labels, + } + + // If the statefulSet has containers, use the first container's image + if len(statefulSet.Spec.Template.Spec.Containers) > 0 { + application.Image = statefulSet.Spec.Template.Spec.Containers[0].Image + } +} + +func populateApplicationFromDaemonSet(application *models.K8sApplication, daemonSet appsv1.DaemonSet) { + application.Kind = "DaemonSet" + application.ApplicationType = "DaemonSet" + application.ID = string(daemonSet.UID) + application.ResourcePool = daemonSet.Namespace + application.Name = daemonSet.Name + application.ApplicationOwner = daemonSet.Labels["io.portainer.kubernetes.application.owner"] + application.StackID = daemonSet.Labels["io.portainer.kubernetes.application.stackid"] + application.StackName = daemonSet.Labels["io.portainer.kubernetes.application.stack"] + application.Labels = daemonSet.Labels + application.MatchLabels = daemonSet.Spec.Selector.MatchLabels + application.CreationDate = daemonSet.CreationTimestamp.Time + application.TotalPodsCount = int(daemonSet.Status.DesiredNumberScheduled) + application.RunningPodsCount = int(daemonSet.Status.NumberReady) + application.DeploymentType = "Global" + application.Metadata = &models.Metadata{ + Labels: daemonSet.Labels, + } + + if len(daemonSet.Spec.Template.Spec.Containers) > 0 { + application.Image = daemonSet.Spec.Template.Spec.Containers[0].Image + } +} + +func populateApplicationFromPod(application *models.K8sApplication, pod corev1.Pod) { + runningPodsCount := 1 + if pod.Status.Phase != corev1.PodRunning { + runningPodsCount = 0 + } + + application.ApplicationType = "Pod" + application.Kind = "Pod" + application.ID = string(pod.UID) + application.ResourcePool = pod.Namespace + application.Name = pod.Name + application.ApplicationOwner = pod.Labels["io.portainer.kubernetes.application.owner"] + application.StackID = pod.Labels["io.portainer.kubernetes.application.stackid"] + application.StackName = pod.Labels["io.portainer.kubernetes.application.stack"] + application.Labels = pod.Labels + application.MatchLabels = pod.Labels + application.CreationDate = pod.CreationTimestamp.Time + application.TotalPodsCount = 1 + application.RunningPodsCount = runningPodsCount + application.DeploymentType = string(pod.Status.Phase) + application.Metadata = &models.Metadata{ + Labels: pod.Labels, + } + + // If the pod has containers, use the first container's image + if len(pod.Spec.Containers) > 0 { + application.Image = pod.Spec.Containers[0].Image + } +} + // updateApplicationWithService updates the application with the services that match the application's selector match labels // and are in the same namespace as the application func updateApplicationWithService(application *models.K8sApplication, services []corev1.Service) { @@ -408,21 +433,23 @@ func (kcl *KubeClient) GetApplicationFromServiceSelector(pods []corev1.Pod, serv func (kcl *KubeClient) GetApplicationConfigurationOwnersFromConfigMap(configMap models.K8sConfigMap, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]models.K8sConfigurationOwnerResource, error) { configurationOwners := []models.K8sConfigurationOwnerResource{} for _, pod := range pods { - if pod.Namespace == configMap.Namespace { - if isPodUsingConfigMap(&pod, configMap.Name) { - application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false) - if err != nil { - return nil, err - } + if isPodUsingConfigMap(&pod, configMap) { + kind := "Pod" + name := pod.Name - if application != nil { - configurationOwners = append(configurationOwners, models.K8sConfigurationOwnerResource{ - Name: application.Name, - ResourceKind: application.Kind, - Id: application.UID, - }) - } + if len(pod.OwnerReferences) > 0 { + kind = pod.OwnerReferences[0].Kind + name = pod.OwnerReferences[0].Name } + + if isReplicaSetOwner(pod) { + updateOwnerReferenceToDeployment(&pod, replicaSets) + } + + configurationOwners = append(configurationOwners, models.K8sConfigurationOwnerResource{ + Name: name, + ResourceKind: kind, + }) } } @@ -434,23 +461,106 @@ func (kcl *KubeClient) GetApplicationConfigurationOwnersFromConfigMap(configMap func (kcl *KubeClient) GetApplicationConfigurationOwnersFromSecret(secret models.K8sSecret, pods []corev1.Pod, replicaSets []appsv1.ReplicaSet) ([]models.K8sConfigurationOwnerResource, error) { configurationOwners := []models.K8sConfigurationOwnerResource{} for _, pod := range pods { - if pod.Namespace == secret.Namespace { - if isPodUsingSecret(&pod, secret.Name) { - application, err := kcl.ConvertPodToApplication(pod, replicaSets, nil, nil, nil, nil, nil, false) - if err != nil { - return nil, err - } + if isPodUsingSecret(&pod, secret) { + kind := "Pod" + name := pod.Name - if application != nil { - configurationOwners = append(configurationOwners, models.K8sConfigurationOwnerResource{ - Name: application.Name, - ResourceKind: application.Kind, - Id: application.UID, - }) - } + if len(pod.OwnerReferences) > 0 { + kind = pod.OwnerReferences[0].Kind + name = pod.OwnerReferences[0].Name } + + if isReplicaSetOwner(pod) { + updateOwnerReferenceToDeployment(&pod, replicaSets) + } + + configurationOwners = append(configurationOwners, models.K8sConfigurationOwnerResource{ + Name: name, + ResourceKind: kind, + }) } } return configurationOwners, nil } + +// fetchUnhealthyApplications fetches applications that failed to schedule any pods +// due to issues like missing resource limits or other scheduling constraints +func fetchUnhealthyApplications(resources PortainerApplicationResources) ([]models.K8sApplication, error) { + var unhealthyApplications []models.K8sApplication + + // Process Deployments + for _, deployment := range resources.Deployments { + if hasNoScheduledPods(deployment) { + app := createApplicationFromDeployment(deployment) + addRelatedResourcesToApplication(&app, resources) + unhealthyApplications = append(unhealthyApplications, app) + } + } + + // Process StatefulSets + for _, statefulSet := range resources.StatefulSets { + if hasNoScheduledPods(statefulSet) { + app := createApplicationFromStatefulSet(statefulSet) + addRelatedResourcesToApplication(&app, resources) + unhealthyApplications = append(unhealthyApplications, app) + } + } + + // Process DaemonSets + for _, daemonSet := range resources.DaemonSets { + if hasNoScheduledPods(daemonSet) { + app := createApplicationFromDaemonSet(daemonSet) + addRelatedResourcesToApplication(&app, resources) + unhealthyApplications = append(unhealthyApplications, app) + } + } + + return unhealthyApplications, nil +} + +// addRelatedResourcesToApplication adds Services and HPA information to the application +func addRelatedResourcesToApplication(app *models.K8sApplication, resources PortainerApplicationResources) { + if app.ID == "" || app.Name == "" { + return + } + + if len(resources.Services) > 0 { + updateApplicationWithService(app, resources.Services) + } + + if len(resources.HorizontalPodAutoscalers) > 0 { + updateApplicationWithHorizontalPodAutoscaler(app, resources.HorizontalPodAutoscalers) + } +} + +// hasNoScheduledPods checks if a workload has completely failed to schedule any pods +// it checks for no replicas desired, i.e. nothing to schedule and see if any pods are running +// if any pods exist at all (even if not ready), it returns false +func hasNoScheduledPods(obj interface{}) bool { + switch resource := obj.(type) { + case appsv1.Deployment: + if resource.Status.Replicas > 0 { + return false + } + + return resource.Status.ReadyReplicas == 0 && resource.Status.AvailableReplicas == 0 + + case appsv1.StatefulSet: + if resource.Status.Replicas > 0 { + return false + } + + return resource.Status.ReadyReplicas == 0 && resource.Status.CurrentReplicas == 0 + + case appsv1.DaemonSet: + if resource.Status.CurrentNumberScheduled > 0 || resource.Status.NumberMisscheduled > 0 { + return false + } + + return resource.Status.NumberReady == 0 && resource.Status.DesiredNumberScheduled > 0 + + default: + return false + } +} diff --git a/api/kubernetes/cli/applications_test.go b/api/kubernetes/cli/applications_test.go new file mode 100644 index 000000000..81a5cfb71 --- /dev/null +++ b/api/kubernetes/cli/applications_test.go @@ -0,0 +1,461 @@ +package cli + +import ( + "context" + "testing" + + models "github.com/portainer/portainer/api/http/models/kubernetes" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/fake" +) + +// Helper functions to create test resources +func createTestDeployment(name, namespace string, replicas int32) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("deploy-" + name), + Labels: map[string]string{ + "app": name, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: name, + Image: "nginx:latest", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + }, + }, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, + } +} + +func createTestReplicaSet(name, namespace, deploymentName string) *appsv1.ReplicaSet { + return &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("rs-" + name), + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Name: deploymentName, + UID: types.UID("deploy-" + deploymentName), + }, + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": deploymentName, + }, + }, + }, + } +} + +func createTestStatefulSet(name, namespace string, replicas int32) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("sts-" + name), + Labels: map[string]string{ + "app": name, + }, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: name, + Image: "redis:latest", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + }, + }, + }, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, + } +} + +func createTestDaemonSet(name, namespace string) *appsv1.DaemonSet { + return &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("ds-" + name), + Labels: map[string]string{ + "app": name, + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: name, + Image: "fluentd:latest", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + }, + }, + }, + }, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 2, + NumberReady: 2, + }, + } +} + +func createTestPod(name, namespace, ownerKind, ownerName string, isRunning bool) *corev1.Pod { + phase := corev1.PodPending + if isRunning { + phase = corev1.PodRunning + } + + var ownerReferences []metav1.OwnerReference + if ownerKind != "" && ownerName != "" { + ownerReferences = []metav1.OwnerReference{ + { + Kind: ownerKind, + Name: ownerName, + UID: types.UID(ownerKind + "-" + ownerName), + }, + } + } + + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("pod-" + name), + OwnerReferences: ownerReferences, + Labels: map[string]string{ + "app": ownerName, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container-" + name, + Image: "busybox:latest", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: phase, + }, + } +} + +func createTestService(name, namespace string, selector map[string]string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("svc-" + name), + }, + Spec: corev1.ServiceSpec{ + Selector: selector, + Type: corev1.ServiceTypeClusterIP, + }, + } +} + +func TestGetApplications(t *testing.T) { + t.Run("Admin user - Mix of deployments, statefulsets and daemonsets with and without pods", func(t *testing.T) { + // Create a fake K8s client + fakeClient := fake.NewSimpleClientset() + + // Setup the test namespace + namespace := "test-namespace" + defaultNamespace := "default" + + // Create resources in the test namespace + // 1. Deployment with pods + deployWithPods := createTestDeployment("deploy-with-pods", namespace, 2) + _, err := fakeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployWithPods, metav1.CreateOptions{}) + assert.NoError(t, err) + + replicaSet := createTestReplicaSet("rs-deploy-with-pods", namespace, "deploy-with-pods") + _, err = fakeClient.AppsV1().ReplicaSets(namespace).Create(context.TODO(), replicaSet, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod1 := createTestPod("pod1-deploy", namespace, "ReplicaSet", "rs-deploy-with-pods", true) + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod1, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod2 := createTestPod("pod2-deploy", namespace, "ReplicaSet", "rs-deploy-with-pods", true) + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod2, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 2. Deployment without pods (scaled to 0) + deployNoPods := createTestDeployment("deploy-no-pods", namespace, 0) + _, err = fakeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployNoPods, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 3. StatefulSet with pods + stsWithPods := createTestStatefulSet("sts-with-pods", namespace, 1) + _, err = fakeClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), stsWithPods, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod3 := createTestPod("pod1-sts", namespace, "StatefulSet", "sts-with-pods", true) + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod3, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 4. StatefulSet without pods + stsNoPods := createTestStatefulSet("sts-no-pods", namespace, 0) + _, err = fakeClient.AppsV1().StatefulSets(namespace).Create(context.TODO(), stsNoPods, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 5. DaemonSet with pods + dsWithPods := createTestDaemonSet("ds-with-pods", namespace) + _, err = fakeClient.AppsV1().DaemonSets(namespace).Create(context.TODO(), dsWithPods, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod4 := createTestPod("pod1-ds", namespace, "DaemonSet", "ds-with-pods", true) + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod4, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod5 := createTestPod("pod2-ds", namespace, "DaemonSet", "ds-with-pods", true) + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod5, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 6. Naked Pod (no owner reference) + nakedPod := createTestPod("naked-pod", namespace, "", "", true) + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), nakedPod, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 7. Resources in another namespace + deployOtherNs := createTestDeployment("deploy-other-ns", defaultNamespace, 1) + _, err = fakeClient.AppsV1().Deployments(defaultNamespace).Create(context.TODO(), deployOtherNs, metav1.CreateOptions{}) + assert.NoError(t, err) + + podOtherNs := createTestPod("pod-other-ns", defaultNamespace, "Deployment", "deploy-other-ns", true) + _, err = fakeClient.CoreV1().Pods(defaultNamespace).Create(context.TODO(), podOtherNs, metav1.CreateOptions{}) + assert.NoError(t, err) + + // 8. Add a service (dependency) + service := createTestService("svc-deploy", namespace, map[string]string{"app": "deploy-with-pods"}) + _, err = fakeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create the KubeClient with admin privileges + kubeClient := &KubeClient{ + cli: fakeClient, + instanceID: "test-instance", + IsKubeAdmin: true, + } + + // Test cases + + // 1. All resources, no filtering + t.Run("All resources with dependencies", func(t *testing.T) { + apps, err := kubeClient.GetApplications("", "") + assert.NoError(t, err) + + // We expect 7 resources: 2 deployments + 2 statefulsets + 1 daemonset + 1 naked pod + 1 deployment in other namespace + // Note: Each controller with pods should count once, not per pod + assert.Equal(t, 7, len(apps)) + + // Verify one of the deployments has services attached + appsWithServices := []models.K8sApplication{} + for _, app := range apps { + if len(app.Services) > 0 { + appsWithServices = append(appsWithServices, app) + } + } + assert.Equal(t, 1, len(appsWithServices)) + assert.Equal(t, "deploy-with-pods", appsWithServices[0].Name) + }) + + // 2. Filter by namespace + t.Run("Filter by namespace", func(t *testing.T) { + apps, err := kubeClient.GetApplications(namespace, "") + assert.NoError(t, err) + + // We expect 6 resources in the test namespace + assert.Equal(t, 6, len(apps)) + + // Verify resources from other namespaces are not included + for _, app := range apps { + assert.Equal(t, namespace, app.ResourcePool) + } + }) + }) + + t.Run("Non-admin user - Resources filtered by accessible namespaces", func(t *testing.T) { + // Create a fake K8s client + fakeClient := fake.NewSimpleClientset() + + // Setup the test namespaces + namespace1 := "allowed-ns" + namespace2 := "restricted-ns" + + // Create resources in the allowed namespace + sts1 := createTestStatefulSet("sts-allowed", namespace1, 1) + _, err := fakeClient.AppsV1().StatefulSets(namespace1).Create(context.TODO(), sts1, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod1 := createTestPod("pod-allowed", namespace1, "StatefulSet", "sts-allowed", true) + _, err = fakeClient.CoreV1().Pods(namespace1).Create(context.TODO(), pod1, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Add a StatefulSet without pods in the allowed namespace + stsNoPods := createTestStatefulSet("sts-no-pods-allowed", namespace1, 0) + _, err = fakeClient.AppsV1().StatefulSets(namespace1).Create(context.TODO(), stsNoPods, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create resources in the restricted namespace + sts2 := createTestStatefulSet("sts-restricted", namespace2, 1) + _, err = fakeClient.AppsV1().StatefulSets(namespace2).Create(context.TODO(), sts2, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod2 := createTestPod("pod-restricted", namespace2, "StatefulSet", "sts-restricted", true) + _, err = fakeClient.CoreV1().Pods(namespace2).Create(context.TODO(), pod2, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create the KubeClient with non-admin privileges (only allowed namespace1) + kubeClient := &KubeClient{ + cli: fakeClient, + instanceID: "test-instance", + IsKubeAdmin: false, + NonAdminNamespaces: []string{namespace1}, + } + + // Test that only resources from allowed namespace are returned + apps, err := kubeClient.GetApplications("", "") + assert.NoError(t, err) + + // We expect 2 resources from the allowed namespace (1 sts with pod + 1 sts without pod) + assert.Equal(t, 2, len(apps)) + + // Verify resources are from the allowed namespace + for _, app := range apps { + assert.Equal(t, namespace1, app.ResourcePool) + assert.Equal(t, "StatefulSet", app.Kind) + } + + // Verify names of returned resources + stsNames := make(map[string]bool) + for _, app := range apps { + stsNames[app.Name] = true + } + + assert.True(t, stsNames["sts-allowed"], "Expected StatefulSet 'sts-allowed' was not found") + assert.True(t, stsNames["sts-no-pods-allowed"], "Expected StatefulSet 'sts-no-pods-allowed' was not found") + }) + + t.Run("Filter by node name", func(t *testing.T) { + // Create a fake K8s client + fakeClient := fake.NewSimpleClientset() + + // Setup test namespace + namespace := "node-filter-ns" + nodeName := "worker-node-1" + + // Create a deployment with pods on specific node + deploy := createTestDeployment("node-deploy", namespace, 2) + _, err := fakeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deploy, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create ReplicaSet for the deployment + rs := createTestReplicaSet("rs-node-deploy", namespace, "node-deploy") + _, err = fakeClient.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create 2 pods, one on the specified node, one on a different node + pod1 := createTestPod("pod-on-node", namespace, "ReplicaSet", "rs-node-deploy", true) + pod1.Spec.NodeName = nodeName + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod1, metav1.CreateOptions{}) + assert.NoError(t, err) + + pod2 := createTestPod("pod-other-node", namespace, "ReplicaSet", "rs-node-deploy", true) + pod2.Spec.NodeName = "worker-node-2" + _, err = fakeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod2, metav1.CreateOptions{}) + assert.NoError(t, err) + + // Create the KubeClient + kubeClient := &KubeClient{ + cli: fakeClient, + instanceID: "test-instance", + IsKubeAdmin: true, + } + + // Test filtering by node name + apps, err := kubeClient.GetApplications(namespace, nodeName) + assert.NoError(t, err) + + // We expect to find only the pod on the specified node + assert.Equal(t, 1, len(apps)) + if len(apps) > 0 { + assert.Equal(t, "node-deploy", apps[0].Name) + } + }) +} diff --git a/api/kubernetes/cli/client.go b/api/kubernetes/cli/client.go index ce76f725f..550ade1d3 100644 --- a/api/kubernetes/cli/client.go +++ b/api/kubernetes/cli/client.go @@ -47,13 +47,6 @@ type ( } ) -func NewKubeClientFromClientset(cli *kubernetes.Clientset) *KubeClient { - return &KubeClient{ - cli: cli, - instanceID: "", - } -} - // NewClientFactory returns a new instance of a ClientFactory func NewClientFactory(signatureService portainer.DigitalSignatureService, reverseTunnelService portainer.ReverseTunnelService, dataStore dataservices.DataStore, instanceID, addrHTTPS, userSessionTimeout string) (*ClientFactory, error) { if userSessionTimeout == "" { @@ -84,9 +77,30 @@ func (factory *ClientFactory) ClearClientCache() { factory.endpointProxyClients.Flush() } +// ClearClientCache removes all cached kube clients for a userId +func (factory *ClientFactory) ClearUserClientCache(userID string) { + for key := range factory.endpointProxyClients.Items() { + if strings.HasSuffix(key, "."+userID) { + factory.endpointProxyClients.Delete(key) + } + } +} + // Remove the cached kube client so a new one can be created func (factory *ClientFactory) RemoveKubeClient(endpointID portainer.EndpointID) { factory.endpointProxyClients.Delete(strconv.Itoa(int(endpointID))) + + endpointPrefix := strconv.Itoa(int(endpointID)) + "." + + for key := range factory.endpointProxyClients.Items() { + if strings.HasPrefix(key, endpointPrefix) { + factory.endpointProxyClients.Delete(key) + } + } +} + +func (factory *ClientFactory) GetAddrHTTPS() string { + return factory.AddrHTTPS } // GetPrivilegedKubeClient checks if an existing client is already registered for the environment(endpoint) and returns it if one is found. @@ -107,6 +121,24 @@ func (factory *ClientFactory) GetPrivilegedKubeClient(endpoint *portainer.Endpoi return kcl, nil } +// GetPrivilegedUserKubeClient checks if an existing admin client is already registered for the environment(endpoint) and user and returns it if one is found. +// If no client is registered, it will create a new client, register it, and returns it. +func (factory *ClientFactory) GetPrivilegedUserKubeClient(endpoint *portainer.Endpoint, userID string) (*KubeClient, error) { + key := strconv.Itoa(int(endpoint.ID)) + ".admin." + userID + pcl, ok := factory.endpointProxyClients.Get(key) + if ok { + return pcl.(*KubeClient), nil + } + + kcl, err := factory.createCachedPrivilegedKubeClient(endpoint) + if err != nil { + return nil, err + } + + factory.endpointProxyClients.Set(key, kcl, cache.DefaultExpiration) + return kcl, nil +} + // GetProxyKubeClient retrieves a KubeClient from the cache. You should be // calling SetProxyKubeClient before first. It is normally, called the // kubernetes middleware. @@ -159,8 +191,9 @@ func (factory *ClientFactory) createCachedPrivilegedKubeClient(endpoint *portain } return &KubeClient{ - cli: cli, - instanceID: factory.instanceID, + cli: cli, + instanceID: factory.instanceID, + IsKubeAdmin: true, }, nil } diff --git a/api/kubernetes/cli/client_test.go b/api/kubernetes/cli/client_test.go new file mode 100644 index 000000000..993a966e3 --- /dev/null +++ b/api/kubernetes/cli/client_test.go @@ -0,0 +1,22 @@ +package cli + +import ( + "testing" +) + +func TestClearUserClientCache(t *testing.T) { + factory, _ := NewClientFactory(nil, nil, nil, "", "", "") + kcl := &KubeClient{} + factory.endpointProxyClients.Set("12.1", kcl, 0) + factory.endpointProxyClients.Set("12.12", kcl, 0) + factory.endpointProxyClients.Set("12", kcl, 0) + + factory.ClearUserClientCache("12") + + if len(factory.endpointProxyClients.Items()) != 2 { + t.Errorf("Incorrect clients cached after clearUserClientCache;\ngot=\n%d\nwant=\n%d", len(factory.endpointProxyClients.Items()), 2) + } + if _, ok := factory.GetProxyKubeClient("12", "12"); ok { + t.Errorf("Expected not to find client cache for user after clear") + } +} diff --git a/api/kubernetes/cli/configmap.go b/api/kubernetes/cli/configmap.go index 57f36cf74..fafa81346 100644 --- a/api/kubernetes/cli/configmap.go +++ b/api/kubernetes/cli/configmap.go @@ -7,6 +7,7 @@ import ( models "github.com/portainer/portainer/api/http/models/kubernetes" "github.com/rs/zerolog/log" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -24,7 +25,7 @@ func (kcl *KubeClient) GetConfigMaps(namespace string) ([]models.K8sConfigMap, e // fetchConfigMapsForNonAdmin fetches the configMaps in the namespaces the user has access to. // This function is called when the user is not an admin. func (kcl *KubeClient) fetchConfigMapsForNonAdmin(namespace string) ([]models.K8sConfigMap, error) { - log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces) + log.Debug().Msgf("Fetching configMaps for non-admin user: %v", kcl.NonAdminNamespaces) if len(kcl.NonAdminNamespaces) == 0 { return nil, nil @@ -95,35 +96,28 @@ func parseConfigMap(configMap *corev1.ConfigMap, withData bool) models.K8sConfig return result } -// CombineConfigMapsWithApplications combines the config maps with the applications that use them. +// SetConfigMapsIsUsed combines the config maps with the applications that use them. // the function fetches all the pods and replica sets in the cluster and checks if the config map is used by any of the pods. // if the config map is used by a pod, the application that uses the pod is added to the config map. // otherwise, the config map is returned as is. -func (kcl *KubeClient) CombineConfigMapsWithApplications(configMaps []models.K8sConfigMap) ([]models.K8sConfigMap, error) { - updatedConfigMaps := make([]models.K8sConfigMap, len(configMaps)) - - pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{}) +func (kcl *KubeClient) SetConfigMapsIsUsed(configMaps *[]models.K8sConfigMap) error { + portainerApplicationResources, err := kcl.fetchAllApplicationsListResources("", metav1.ListOptions{}) if err != nil { - return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err) + return fmt.Errorf("an error occurred during the SetConfigMapsIsUsed operation, unable to fetch Portainer application resources. Error: %w", err) } - for index, configMap := range configMaps { - updatedConfigMap := configMap + for i := range *configMaps { + configMap := &(*configMaps)[i] - applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, pods, replicaSets) - if err != nil { - return nil, fmt.Errorf("an error occurred during the CombineConfigMapsWithApplications operation, unable to get applications from config map. Error: %w", err) + for _, pod := range portainerApplicationResources.Pods { + if isPodUsingConfigMap(&pod, *configMap) { + configMap.IsUsed = true + break + } } - - if len(applicationConfigurationOwners) > 0 { - updatedConfigMap.ConfigurationOwnerResources = applicationConfigurationOwners - updatedConfigMap.IsUsed = true - } - - updatedConfigMaps[index] = updatedConfigMap } - return updatedConfigMaps, nil + return nil } // CombineConfigMapWithApplications combines the config map with the applications that use it. @@ -141,20 +135,22 @@ func (kcl *KubeClient) CombineConfigMapWithApplications(configMap models.K8sConf break } + var replicaSets *appsv1.ReplicaSetList if containsReplicaSetOwner { - replicaSets, err := kcl.cli.AppsV1().ReplicaSets(configMap.Namespace).List(context.Background(), metav1.ListOptions{}) + replicaSets, err = kcl.cli.AppsV1().ReplicaSets(configMap.Namespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return models.K8sConfigMap{}, fmt.Errorf("an error occurred during the CombineConfigMapWithApplications operation, unable to get replica sets. Error: %w", err) } + } - applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, pods.Items, replicaSets.Items) - if err != nil { - return models.K8sConfigMap{}, fmt.Errorf("an error occurred during the CombineConfigMapWithApplications operation, unable to get applications from config map. Error: %w", err) - } + applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromConfigMap(configMap, pods.Items, replicaSets.Items) + if err != nil { + return models.K8sConfigMap{}, fmt.Errorf("an error occurred during the CombineConfigMapWithApplications operation, unable to get applications from config map. Error: %w", err) + } - if len(applicationConfigurationOwners) > 0 { - configMap.ConfigurationOwnerResources = applicationConfigurationOwners - } + if len(applicationConfigurationOwners) > 0 { + configMap.ConfigurationOwnerResources = applicationConfigurationOwners + configMap.IsUsed = true } return configMap, nil diff --git a/api/kubernetes/cli/event.go b/api/kubernetes/cli/event.go new file mode 100644 index 000000000..03472fca6 --- /dev/null +++ b/api/kubernetes/cli/event.go @@ -0,0 +1,93 @@ +package cli + +import ( + "context" + + models "github.com/portainer/portainer/api/http/models/kubernetes" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetEvents gets all the Events for a given namespace and resource +// If the user is a kube admin, it returns all events in the namespace +// Otherwise, it returns only the events in the non-admin namespaces +func (kcl *KubeClient) GetEvents(namespace string, resourceId string) ([]models.K8sEvent, error) { + if kcl.IsKubeAdmin { + return kcl.fetchAllEvents(namespace, resourceId) + } + + return kcl.fetchEventsForNonAdmin(namespace, resourceId) +} + +// fetchEventsForNonAdmin returns all events in the given namespace and resource +// It returns only the events in the non-admin namespaces +func (kcl *KubeClient) fetchEventsForNonAdmin(namespace string, resourceId string) ([]models.K8sEvent, error) { + if len(kcl.NonAdminNamespaces) == 0 { + return nil, nil + } + + events, err := kcl.fetchAllEvents(namespace, resourceId) + if err != nil { + return nil, err + } + + nonAdminNamespaceSet := kcl.buildNonAdminNamespacesMap() + results := make([]models.K8sEvent, 0) + for _, event := range events { + if _, ok := nonAdminNamespaceSet[event.Namespace]; ok { + results = append(results, event) + } + } + + return results, nil +} + +// fetchEventsForNonAdmin returns all events in the given namespace and resource +// It returns all events in the namespace and resource +func (kcl *KubeClient) fetchAllEvents(namespace string, resourceId string) ([]models.K8sEvent, error) { + options := metav1.ListOptions{} + if resourceId != "" { + options.FieldSelector = "involvedObject.uid=" + resourceId + } + + list, err := kcl.cli.CoreV1().Events(namespace).List(context.TODO(), options) + if err != nil { + return nil, err + } + + results := make([]models.K8sEvent, 0) + for _, event := range list.Items { + results = append(results, parseEvent(&event)) + } + + return results, nil +} + +func parseEvent(event *corev1.Event) models.K8sEvent { + result := models.K8sEvent{ + Type: event.Type, + Name: event.Name, + Message: event.Message, + Reason: event.Reason, + Namespace: event.Namespace, + EventTime: event.EventTime.UTC(), + Kind: event.Kind, + Count: event.Count, + UID: string(event.ObjectMeta.GetUID()), + InvolvedObjectKind: models.K8sEventInvolvedObject{ + Kind: event.InvolvedObject.Kind, + UID: string(event.InvolvedObject.UID), + Name: event.InvolvedObject.Name, + Namespace: event.InvolvedObject.Namespace, + }, + } + + if !event.LastTimestamp.Time.IsZero() { + result.LastTimestamp = &event.LastTimestamp.Time + } + if !event.FirstTimestamp.Time.IsZero() { + result.FirstTimestamp = &event.FirstTimestamp.Time + } + + return result +} diff --git a/api/kubernetes/cli/event_test.go b/api/kubernetes/cli/event_test.go new file mode 100644 index 000000000..926928317 --- /dev/null +++ b/api/kubernetes/cli/event_test.go @@ -0,0 +1,108 @@ +package cli + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kfake "k8s.io/client-go/kubernetes/fake" +) + +// TestGetEvents tests the GetEvents method +// It creates a fake Kubernetes client and passes it to the GetEvents method +// It then logs the fetched events and validated the data returned +func TestGetEvents(t *testing.T) { + t.Run("can get events for resource id when admin", func(t *testing.T) { + kcl := &KubeClient{ + cli: kfake.NewSimpleClientset(), + instanceID: "instance", + IsKubeAdmin: true, + } + event := corev1.Event{ + InvolvedObject: corev1.ObjectReference{UID: "resourceId"}, + Action: "something", + ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "myEvent"}, + EventTime: metav1.NowMicro(), + Type: "warning", + Message: "This event has a very serious warning", + } + _, err := kcl.cli.CoreV1().Events("default").Create(context.TODO(), &event, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create Event: %v", err) + } + + events, err := kcl.GetEvents("default", "resourceId") + + if err != nil { + t.Fatalf("Failed to fetch Cron Jobs: %v", err) + } + t.Logf("Fetched Events: %v", events) + require.Equal(t, 1, len(events), "Expected to return 1 event") + assert.Equal(t, event.Message, events[0].Message, "Expected Message to be equal to event message created") + assert.Equal(t, event.Type, events[0].Type, "Expected Type to be equal to event type created") + assert.Equal(t, event.EventTime.UTC(), events[0].EventTime, "Expected EventTime to be saved as a string from event time created") + }) + t.Run("can get kubernetes events for non admin namespace when non admin", func(t *testing.T) { + kcl := &KubeClient{ + cli: kfake.NewSimpleClientset(), + instanceID: "instance", + IsKubeAdmin: false, + NonAdminNamespaces: []string{"nonAdmin"}, + } + event := corev1.Event{ + InvolvedObject: corev1.ObjectReference{UID: "resourceId"}, + Action: "something", + ObjectMeta: metav1.ObjectMeta{Namespace: "nonAdmin", Name: "myEvent"}, + EventTime: metav1.NowMicro(), + Type: "warning", + Message: "This event has a very serious warning", + } + _, err := kcl.cli.CoreV1().Events("nonAdmin").Create(context.TODO(), &event, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create Event: %v", err) + } + + events, err := kcl.GetEvents("nonAdmin", "resourceId") + + if err != nil { + t.Fatalf("Failed to fetch Cron Jobs: %v", err) + } + t.Logf("Fetched Events: %v", events) + require.Equal(t, 1, len(events), "Expected to return 1 event") + assert.Equal(t, event.Message, events[0].Message, "Expected Message to be equal to event message created") + assert.Equal(t, event.Type, events[0].Type, "Expected Type to be equal to event type created") + assert.Equal(t, event.EventTime.UTC(), events[0].EventTime, "Expected EventTime to be saved as a string from event time created") + }) + + t.Run("cannot get kubernetes events for admin namespace when non admin", func(t *testing.T) { + kcl := &KubeClient{ + cli: kfake.NewSimpleClientset(), + instanceID: "instance", + IsKubeAdmin: false, + NonAdminNamespaces: []string{"nonAdmin"}, + } + event := corev1.Event{ + InvolvedObject: corev1.ObjectReference{UID: "resourceId"}, + Action: "something", + ObjectMeta: metav1.ObjectMeta{Namespace: "admin", Name: "myEvent"}, + EventTime: metav1.NowMicro(), + Type: "warning", + Message: "This event has a very serious warning", + } + _, err := kcl.cli.CoreV1().Events("admin").Create(context.TODO(), &event, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create Event: %v", err) + } + + events, err := kcl.GetEvents("admin", "resourceId") + + if err != nil { + t.Fatalf("Failed to fetch Cron Jobs: %v", err) + } + t.Logf("Fetched Events: %v", events) + assert.Equal(t, 0, len(events), "Expected to return 0 events") + }) +} diff --git a/api/kubernetes/cli/namespace.go b/api/kubernetes/cli/namespace.go index 0ebb6189a..560b91e75 100644 --- a/api/kubernetes/cli/namespace.go +++ b/api/kubernetes/cli/namespace.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "sort" "strconv" "time" @@ -265,9 +266,12 @@ func isSystemNamespace(namespace *corev1.Namespace) bool { return systemLabelValue == "true" } - systemNamespaces := defaultSystemNamespaces() + return isSystemDefaultNamespace(namespace.Name) +} - _, isSystem := systemNamespaces[namespace.Name] +func isSystemDefaultNamespace(namespace string) bool { + systemNamespaces := defaultSystemNamespaces() + _, isSystem := systemNamespaces[namespace] return isSystem } @@ -348,6 +352,34 @@ func (kcl *KubeClient) DeleteNamespace(namespaceName string) (*corev1.Namespace, return namespace, nil } +// CombineNamespacesWithUnhealthyEvents combines namespaces with unhealthy events across all namespaces +func (kcl *KubeClient) CombineNamespacesWithUnhealthyEvents(namespaces map[string]portainer.K8sNamespaceInfo) (map[string]portainer.K8sNamespaceInfo, error) { + allEvents, err := kcl.GetEvents("", "") + if err != nil && !k8serrors.IsNotFound(err) { + log.Error(). + Str("context", "CombineNamespacesWithUnhealthyEvents"). + Err(err). + Msg("unable to retrieve unhealthy events from the Kubernetes for an admin user") + return nil, err + } + + unhealthyEventCounts := make(map[string]int) + for _, event := range allEvents { + if event.Type == "Warning" { + unhealthyEventCounts[event.Namespace]++ + } + } + + for namespaceName, namespace := range namespaces { + if count, exists := unhealthyEventCounts[namespaceName]; exists { + namespace.UnhealthyEventCount = count + namespaces[namespaceName] = namespace + } + } + + return namespaces, nil +} + // CombineNamespacesWithResourceQuotas combines namespaces with resource quotas where matching is based on "portainer-rq-"+namespace.Name func (kcl *KubeClient) CombineNamespacesWithResourceQuotas(namespaces map[string]portainer.K8sNamespaceInfo, w http.ResponseWriter) *httperror.HandlerError { resourceQuotas, err := kcl.GetResourceQuotas("") @@ -390,7 +422,9 @@ func (kcl *KubeClient) CombineNamespaceWithResourceQuota(namespace portainer.K8s func (kcl *KubeClient) buildNonAdminNamespacesMap() map[string]struct{} { nonAdminNamespaceSet := make(map[string]struct{}, len(kcl.NonAdminNamespaces)) for _, namespace := range kcl.NonAdminNamespaces { - nonAdminNamespaceSet[namespace] = struct{}{} + if !isSystemDefaultNamespace(namespace) { + nonAdminNamespaceSet[namespace] = struct{}{} + } } return nonAdminNamespaceSet @@ -404,5 +438,10 @@ func (kcl *KubeClient) ConvertNamespaceMapToSlice(namespaces map[string]portaine namespaceSlice = append(namespaceSlice, namespace) } + // Sort namespaces by name + sort.Slice(namespaceSlice, func(i, j int) bool { + return namespaceSlice[i].Name < namespaceSlice[j].Name + }) + return namespaceSlice } diff --git a/api/kubernetes/cli/pod.go b/api/kubernetes/cli/pod.go index 8d22a20db..eb8992124 100644 --- a/api/kubernetes/cli/pod.go +++ b/api/kubernetes/cli/pod.go @@ -7,11 +7,11 @@ import ( "time" portainer "github.com/portainer/portainer/api" + models "github.com/portainer/portainer/api/http/models/kubernetes" "github.com/pkg/errors" "github.com/rs/zerolog/log" appsv1 "k8s.io/api/apps/v1" - autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -110,7 +110,7 @@ func (kcl *KubeClient) CreateUserShellPod(ctx context.Context, serviceAccountNam }, } - shellPod, err := kcl.cli.CoreV1().Pods(portainerNamespace).Create(ctx, podSpec, metav1.CreateOptions{}) + shellPod, err := kcl.cli.CoreV1().Pods(portainerNamespace).Create(context.TODO(), podSpec, metav1.CreateOptions{}) if err != nil { return nil, errors.Wrap(err, "error creating shell pod") } @@ -158,7 +158,7 @@ func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase corev1.PodPha case <-ctx.Done(): return ctx.Err() default: - pod, err := kcl.cli.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + pod, err := kcl.cli.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return err } @@ -172,83 +172,84 @@ func (kcl *KubeClient) waitForPodStatus(ctx context.Context, phase corev1.PodPha } } -// fetchAllPodsAndReplicaSets fetches all pods and replica sets across the cluster, i.e. all namespaces -func (kcl *KubeClient) fetchAllPodsAndReplicaSets(namespace string, podListOptions metav1.ListOptions) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, []autoscalingv2.HorizontalPodAutoscaler, error) { - return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, false, false) -} - // fetchAllApplicationsListResources fetches all pods, replica sets, stateful sets, and daemon sets across the cluster, i.e. all namespaces // this is required for the applications list view -func (kcl *KubeClient) fetchAllApplicationsListResources(namespace string, podListOptions metav1.ListOptions) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, []autoscalingv2.HorizontalPodAutoscaler, error) { +func (kcl *KubeClient) fetchAllApplicationsListResources(namespace string, podListOptions metav1.ListOptions) (PortainerApplicationResources, error) { return kcl.fetchResourcesWithOwnerReferences(namespace, podListOptions, true, true) } // fetchResourcesWithOwnerReferences fetches pods and other resources based on owner references -func (kcl *KubeClient) fetchResourcesWithOwnerReferences(namespace string, podListOptions metav1.ListOptions, includeStatefulSets, includeDaemonSets bool) ([]corev1.Pod, []appsv1.ReplicaSet, []appsv1.Deployment, []appsv1.StatefulSet, []appsv1.DaemonSet, []corev1.Service, []autoscalingv2.HorizontalPodAutoscaler, error) { +func (kcl *KubeClient) fetchResourcesWithOwnerReferences(namespace string, podListOptions metav1.ListOptions, includeStatefulSets, includeDaemonSets bool) (PortainerApplicationResources, error) { pods, err := kcl.cli.CoreV1().Pods(namespace).List(context.Background(), podListOptions) if err != nil { if k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, nil + return PortainerApplicationResources{}, nil } - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list pods across the cluster: %w", err) + return PortainerApplicationResources{}, fmt.Errorf("unable to list pods across the cluster: %w", err) } - // if replicaSet owner reference exists, fetch the replica sets - // this also means that the deployments will be fetched because deployments own replica sets - replicaSets := &appsv1.ReplicaSetList{} - deployments := &appsv1.DeploymentList{} - if containsReplicaSetOwnerReference(pods) { - replicaSets, err = kcl.cli.AppsV1().ReplicaSets(namespace).List(context.Background(), metav1.ListOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list replica sets across the cluster: %w", err) - } - - deployments, err = kcl.cli.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list deployments across the cluster: %w", err) - } + portainerApplicationResources := PortainerApplicationResources{ + Pods: pods.Items, } - statefulSets := &appsv1.StatefulSetList{} - if includeStatefulSets && containsStatefulSetOwnerReference(pods) { - statefulSets, err = kcl.cli.AppsV1().StatefulSets(namespace).List(context.Background(), metav1.ListOptions{}) + replicaSets, err := kcl.cli.AppsV1().ReplicaSets(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return PortainerApplicationResources{}, fmt.Errorf("unable to list replica sets across the cluster: %w", err) + } + portainerApplicationResources.ReplicaSets = replicaSets.Items + + deployments, err := kcl.cli.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return PortainerApplicationResources{}, fmt.Errorf("unable to list deployments across the cluster: %w", err) + } + portainerApplicationResources.Deployments = deployments.Items + + if includeStatefulSets { + statefulSets, err := kcl.cli.AppsV1().StatefulSets(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list stateful sets across the cluster: %w", err) + return PortainerApplicationResources{}, fmt.Errorf("unable to list stateful sets across the cluster: %w", err) } + portainerApplicationResources.StatefulSets = statefulSets.Items } - daemonSets := &appsv1.DaemonSetList{} - if includeDaemonSets && containsDaemonSetOwnerReference(pods) { - daemonSets, err = kcl.cli.AppsV1().DaemonSets(namespace).List(context.Background(), metav1.ListOptions{}) + if includeDaemonSets { + daemonSets, err := kcl.cli.AppsV1().DaemonSets(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list daemon sets across the cluster: %w", err) + return PortainerApplicationResources{}, fmt.Errorf("unable to list daemon sets across the cluster: %w", err) } + portainerApplicationResources.DaemonSets = daemonSets.Items } services, err := kcl.cli.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list services across the cluster: %w", err) + return PortainerApplicationResources{}, fmt.Errorf("unable to list services across the cluster: %w", err) } + portainerApplicationResources.Services = services.Items hpas, err := kcl.cli.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil && !k8serrors.IsNotFound(err) { - return nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("unable to list horizontal pod autoscalers across the cluster: %w", err) + return PortainerApplicationResources{}, fmt.Errorf("unable to list horizontal pod autoscalers across the cluster: %w", err) } + portainerApplicationResources.HorizontalPodAutoscalers = hpas.Items - return pods.Items, replicaSets.Items, deployments.Items, statefulSets.Items, daemonSets.Items, services.Items, hpas.Items, nil + return portainerApplicationResources, nil } // isPodUsingConfigMap checks if a pod is using a specific ConfigMap -func isPodUsingConfigMap(pod *corev1.Pod, configMapName string) bool { +func isPodUsingConfigMap(pod *corev1.Pod, configMap models.K8sConfigMap) bool { + if pod.Namespace != configMap.Namespace { + return false + } + for _, volume := range pod.Spec.Volumes { - if volume.ConfigMap != nil && volume.ConfigMap.Name == configMapName { + if volume.ConfigMap != nil && volume.ConfigMap.Name == configMap.Name { return true } } for _, container := range pod.Spec.Containers { for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMapName { + if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil && env.ValueFrom.ConfigMapKeyRef.Name == configMap.Name { return true } } @@ -258,16 +259,20 @@ func isPodUsingConfigMap(pod *corev1.Pod, configMapName string) bool { } // isPodUsingSecret checks if a pod is using a specific Secret -func isPodUsingSecret(pod *corev1.Pod, secretName string) bool { +func isPodUsingSecret(pod *corev1.Pod, secret models.K8sSecret) bool { + if pod.Namespace != secret.Namespace { + return false + } + for _, volume := range pod.Spec.Volumes { - if volume.Secret != nil && volume.Secret.SecretName == secretName { + if volume.Secret != nil && volume.Secret.SecretName == secret.Name { return true } } for _, container := range pod.Spec.Containers { for _, env := range container.Env { - if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secretName { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil && env.ValueFrom.SecretKeyRef.Name == secret.Name { return true } } diff --git a/api/kubernetes/cli/role.go b/api/kubernetes/cli/role.go index 9a3c6635f..c45e2c299 100644 --- a/api/kubernetes/cli/role.go +++ b/api/kubernetes/cli/role.go @@ -10,7 +10,6 @@ import ( rbacv1 "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // GetRoles gets all the roles for either at the cluster level or a given namespace in a k8s endpoint. @@ -137,7 +136,7 @@ func (kcl *KubeClient) DeleteRoles(reqs models.K8sRoleDeleteRequests) error { for _, name := range reqs[namespace] { client := kcl.cli.RbacV1().Roles(namespace) - role, err := client.Get(context.Background(), name, v1.GetOptions{}) + role, err := client.Get(context.Background(), name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { continue diff --git a/api/kubernetes/cli/role_binding.go b/api/kubernetes/cli/role_binding.go index e8e90cbb0..7775748e2 100644 --- a/api/kubernetes/cli/role_binding.go +++ b/api/kubernetes/cli/role_binding.go @@ -7,11 +7,9 @@ import ( models "github.com/portainer/portainer/api/http/models/kubernetes" "github.com/portainer/portainer/api/internal/errorlist" "github.com/rs/zerolog/log" - corev1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // GetRoleBindings gets all the roleBindings for either at the cluster level or a given namespace in a k8s endpoint. @@ -98,7 +96,7 @@ func (kcl *KubeClient) isSystemRoleBinding(rb *rbacv1.RoleBinding) bool { return false } -func (kcl *KubeClient) getRole(namespace, name string) (*corev1.Role, error) { +func (kcl *KubeClient) getRole(namespace, name string) (*rbacv1.Role, error) { client := kcl.cli.RbacV1().Roles(namespace) return client.Get(context.Background(), name, metav1.GetOptions{}) } @@ -111,7 +109,7 @@ func (kcl *KubeClient) DeleteRoleBindings(reqs models.K8sRoleBindingDeleteReques for _, name := range reqs[namespace] { client := kcl.cli.RbacV1().RoleBindings(namespace) - roleBinding, err := client.Get(context.Background(), name, v1.GetOptions{}) + roleBinding, err := client.Get(context.Background(), name, metav1.GetOptions{}) if err != nil { if k8serrors.IsNotFound(err) { continue @@ -125,7 +123,7 @@ func (kcl *KubeClient) DeleteRoleBindings(reqs models.K8sRoleBindingDeleteReques log.Error().Str("role_name", name).Msg("ignoring delete of 'system' role binding, not allowed") } - if err := client.Delete(context.Background(), name, v1.DeleteOptions{}); err != nil { + if err := client.Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil { errors = append(errors, err) } } diff --git a/api/kubernetes/cli/secret.go b/api/kubernetes/cli/secret.go index 8e38c9857..5bcd386cb 100644 --- a/api/kubernetes/cli/secret.go +++ b/api/kubernetes/cli/secret.go @@ -8,6 +8,7 @@ import ( models "github.com/portainer/portainer/api/http/models/kubernetes" "github.com/rs/zerolog/log" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +32,7 @@ func (kcl *KubeClient) GetSecrets(namespace string) ([]models.K8sSecret, error) // getSecretsForNonAdmin fetches the secrets in the namespaces the user has access to. // This function is called when the user is not an admin. func (kcl *KubeClient) getSecretsForNonAdmin(namespace string) ([]models.K8sSecret, error) { - log.Debug().Msgf("Fetching volumes for non-admin user: %v", kcl.NonAdminNamespaces) + log.Debug().Msgf("Fetching secrets for non-admin user: %v", kcl.NonAdminNamespaces) if len(kcl.NonAdminNamespaces) == 0 { return nil, nil @@ -111,34 +112,28 @@ func parseSecret(secret *corev1.Secret, withData bool) models.K8sSecret { return result } -// CombineSecretsWithApplications combines the secrets with the applications that use them. +// SetSecretsIsUsed combines the secrets with the applications that use them. // the function fetches all the pods and replica sets in the cluster and checks if the secret is used by any of the pods. // if the secret is used by a pod, the application that uses the pod is added to the secret. // otherwise, the secret is returned as is. -func (kcl *KubeClient) CombineSecretsWithApplications(secrets []models.K8sSecret) ([]models.K8sSecret, error) { - updatedSecrets := make([]models.K8sSecret, len(secrets)) - - pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{}) +func (kcl *KubeClient) SetSecretsIsUsed(secrets *[]models.K8sSecret) error { + portainerApplicationResources, err := kcl.fetchAllApplicationsListResources("", metav1.ListOptions{}) if err != nil { - return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to fetch pods and replica sets. Error: %w", err) + return fmt.Errorf("an error occurred during the SetSecretsIsUsed operation, unable to fetch Portainer application resources. Error: %w", err) } - for index, secret := range secrets { - updatedSecret := secret + for i := range *secrets { + secret := &(*secrets)[i] - applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, pods, replicaSets) - if err != nil { - return nil, fmt.Errorf("an error occurred during the CombineSecretsWithApplications operation, unable to get applications from secret. Error: %w", err) + for _, pod := range portainerApplicationResources.Pods { + if isPodUsingSecret(&pod, *secret) { + secret.IsUsed = true + break + } } - - if len(applicationConfigurationOwners) > 0 { - updatedSecret.ConfigurationOwnerResources = applicationConfigurationOwners - } - - updatedSecrets[index] = updatedSecret } - return updatedSecrets, nil + return nil } // CombineSecretWithApplications combines the secret with the applications that use it. @@ -156,20 +151,22 @@ func (kcl *KubeClient) CombineSecretWithApplications(secret models.K8sSecret) (m break } + var replicaSets *appsv1.ReplicaSetList if containsReplicaSetOwner { - replicaSets, err := kcl.cli.AppsV1().ReplicaSets(secret.Namespace).List(context.Background(), metav1.ListOptions{}) + replicaSets, err = kcl.cli.AppsV1().ReplicaSets(secret.Namespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return models.K8sSecret{}, fmt.Errorf("an error occurred during the CombineSecretWithApplications operation, unable to get replica sets. Error: %w", err) } + } - applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, pods.Items, replicaSets.Items) - if err != nil { - return models.K8sSecret{}, fmt.Errorf("an error occurred during the CombineSecretWithApplications operation, unable to get applications from secret. Error: %w", err) - } + applicationConfigurationOwners, err := kcl.GetApplicationConfigurationOwnersFromSecret(secret, pods.Items, replicaSets.Items) + if err != nil { + return models.K8sSecret{}, fmt.Errorf("an error occurred during the CombineSecretWithApplications operation, unable to get applications from secret. Error: %w", err) + } - if len(applicationConfigurationOwners) > 0 { - secret.ConfigurationOwnerResources = applicationConfigurationOwners - } + if len(applicationConfigurationOwners) > 0 { + secret.ConfigurationOwnerResources = applicationConfigurationOwners + secret.IsUsed = true } return secret, nil diff --git a/api/kubernetes/cli/service.go b/api/kubernetes/cli/service.go index 8a7ef03ab..3f0543735 100644 --- a/api/kubernetes/cli/service.go +++ b/api/kubernetes/cli/service.go @@ -81,8 +81,8 @@ func parseService(service corev1.Service) models.K8sServiceInfo { ingressStatus := make([]models.K8sServiceIngress, 0) for _, status := range service.Status.LoadBalancer.Ingress { ingressStatus = append(ingressStatus, models.K8sServiceIngress{ - IP: status.IP, - Host: status.Hostname, + IP: status.IP, + Hostname: status.Hostname, }) } @@ -130,7 +130,7 @@ func (kcl *KubeClient) convertToK8sService(info models.K8sServiceInfo) corev1.Se for _, i := range info.IngressStatus { service.Status.LoadBalancer.Ingress = append( service.Status.LoadBalancer.Ingress, - corev1.LoadBalancerIngress{IP: i.IP, Hostname: i.Host}, + corev1.LoadBalancerIngress{IP: i.IP, Hostname: i.Hostname}, ) } @@ -174,7 +174,7 @@ func (kcl *KubeClient) UpdateService(namespace string, info models.K8sServiceInf func (kcl *KubeClient) CombineServicesWithApplications(services []models.K8sServiceInfo) ([]models.K8sServiceInfo, error) { if containsServiceWithSelector(services) { updatedServices := make([]models.K8sServiceInfo, len(services)) - pods, replicaSets, _, _, _, _, _, err := kcl.fetchAllPodsAndReplicaSets("", metav1.ListOptions{}) + portainerApplicationResources, err := kcl.fetchAllApplicationsListResources("", metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to fetch pods and replica sets. Error: %w", err) } @@ -182,7 +182,7 @@ func (kcl *KubeClient) CombineServicesWithApplications(services []models.K8sServ for index, service := range services { updatedService := service - application, err := kcl.GetApplicationFromServiceSelector(pods, service, replicaSets) + application, err := kcl.GetApplicationFromServiceSelector(portainerApplicationResources.Pods, service, portainerApplicationResources.ReplicaSets) if err != nil { return services, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to get application from service. Error: %w", err) } diff --git a/api/kubernetes/cli/service_account.go b/api/kubernetes/cli/service_account.go index 831080efc..af674d794 100644 --- a/api/kubernetes/cli/service_account.go +++ b/api/kubernetes/cli/service_account.go @@ -5,7 +5,6 @@ import ( "fmt" portainer "github.com/portainer/portainer/api" - "github.com/portainer/portainer/api/http/models/kubernetes" models "github.com/portainer/portainer/api/http/models/kubernetes" "github.com/portainer/portainer/api/internal/errorlist" corev1 "k8s.io/api/core/v1" @@ -92,7 +91,7 @@ func (kcl *KubeClient) isSystemServiceAccount(namespace string) bool { // DeleteServices processes a K8sServiceDeleteRequest by deleting each service // in its given namespace. -func (kcl *KubeClient) DeleteServiceAccounts(reqs kubernetes.K8sServiceAccountDeleteRequests) error { +func (kcl *KubeClient) DeleteServiceAccounts(reqs models.K8sServiceAccountDeleteRequests) error { var errors []error for namespace := range reqs { for _, serviceName := range reqs[namespace] { diff --git a/api/kubernetes/cli/volumes.go b/api/kubernetes/cli/volumes.go index 52ee02ab8..d8a3cbf56 100644 --- a/api/kubernetes/cli/volumes.go +++ b/api/kubernetes/cli/volumes.go @@ -7,7 +7,6 @@ import ( models "github.com/portainer/portainer/api/http/models/kubernetes" "github.com/rs/zerolog/log" appsv1 "k8s.io/api/apps/v1" - autoscalingv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -265,7 +264,12 @@ func (kcl *KubeClient) updateVolumesWithOwningApplications(volumes *[]models.K8s if pod.Spec.Volumes != nil { for _, podVolume := range pod.Spec.Volumes { if podVolume.VolumeSource.PersistentVolumeClaim != nil && podVolume.VolumeSource.PersistentVolumeClaim.ClaimName == volume.PersistentVolumeClaim.Name && pod.Namespace == volume.PersistentVolumeClaim.Namespace { - application, err := kcl.ConvertPodToApplication(pod, replicaSetItems, deploymentItems, statefulSetItems, daemonSetItems, []corev1.Service{}, []autoscalingv2.HorizontalPodAutoscaler{}, false) + application, err := kcl.ConvertPodToApplication(pod, PortainerApplicationResources{ + ReplicaSets: replicaSetItems, + Deployments: deploymentItems, + StatefulSets: statefulSetItems, + DaemonSets: daemonSetItems, + }, false) if err != nil { log.Error().Err(err).Msg("Failed to convert pod to application") return nil, fmt.Errorf("an error occurred during the CombineServicesWithApplications operation, unable to convert pod to application. Error: %w", err) diff --git a/api/kubernetes/kubeclusteraccess_service.go b/api/kubernetes/kubeclusteraccess_service.go index 60f0cc503..fce7f55ea 100644 --- a/api/kubernetes/kubeclusteraccess_service.go +++ b/api/kubernetes/kubeclusteraccess_service.go @@ -109,6 +109,7 @@ func (service *kubeClusterAccessService) GetClusterDetails(hostURL string, endpo Str("host_URL", hostURL). Str("HTTPS_bind_address", service.httpsBindAddr). Str("base_URL", baseURL). + Bool("is_internal", isInternal). Msg("kubeconfig") clusterServerURL, err := url.JoinPath("https://", hostURL, baseURL, "/api/endpoints/", strconv.Itoa(int(endpointID)), "/kubernetes") diff --git a/api/ldap/ldap.go b/api/ldap/ldap.go index 09e8e6450..5037666b9 100644 --- a/api/ldap/ldap.go +++ b/api/ldap/ldap.go @@ -4,11 +4,12 @@ import ( "fmt" "strings" - ldap "github.com/go-ldap/ldap/v3" - "github.com/pkg/errors" portainer "github.com/portainer/portainer/api" "github.com/portainer/portainer/api/crypto" httperrors "github.com/portainer/portainer/api/http/errors" + + ldap "github.com/go-ldap/ldap/v3" + "github.com/pkg/errors" ) var ( @@ -30,36 +31,44 @@ func createConnection(settings *portainer.LDAPSettings) (*ldap.Conn, error) { } func createConnectionForURL(url string, settings *portainer.LDAPSettings) (*ldap.Conn, error) { - if settings.TLSConfig.TLS || settings.StartTLS { - config, err := crypto.CreateTLSConfigurationFromDisk(settings.TLSConfig.TLSCACertPath, settings.TLSConfig.TLSCertPath, settings.TLSConfig.TLSKeyPath, settings.TLSConfig.TLSSkipVerify) - if err != nil { - return nil, err - } - config.ServerName = strings.Split(url, ":")[0] - - if settings.TLSConfig.TLS { - return ldap.DialTLS("tcp", url, config) - } - - conn, err := ldap.Dial("tcp", url) - if err != nil { - return nil, err - } - - err = conn.StartTLS(config) - if err != nil { - return nil, err - } - - return conn, nil + if !settings.TLSConfig.TLS && !settings.StartTLS { + return ldap.Dial("tcp", url) } - return ldap.Dial("tcp", url) + // Store the original value to ensure the TLSConfig is created + t := settings.TLSConfig.TLS + settings.TLSConfig.TLS = settings.TLSConfig.TLS || settings.StartTLS + + config, err := crypto.CreateTLSConfigurationFromDisk(settings.TLSConfig) + if err != nil { + return nil, err + } + + // Restore the original value + settings.TLSConfig.TLS = t + + if settings.TLSConfig.TLS || settings.StartTLS { + config.ServerName = strings.Split(url, ":")[0] + } + + if settings.TLSConfig.TLS { + return ldap.DialTLS("tcp", url, config) + } + + conn, err := ldap.Dial("tcp", url) + if err != nil { + return nil, err + } + + if err := conn.StartTLS(config); err != nil { + return nil, err + } + + return conn, nil } // AuthenticateUser is used to authenticate a user against a LDAP/AD. func (*Service) AuthenticateUser(username, password string, settings *portainer.LDAPSettings) error { - connection, err := createConnection(settings) if err != nil { return err diff --git a/api/ldap/ldap_test.go b/api/ldap/ldap_test.go new file mode 100644 index 000000000..1d16d2d0e --- /dev/null +++ b/api/ldap/ldap_test.go @@ -0,0 +1,72 @@ +package ldap + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + portainer "github.com/portainer/portainer/api" + + "github.com/stretchr/testify/require" +) + +func TestCreateConnectionForURL(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + tlsSrv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer tlsSrv.Close() + + srvURL, err := url.Parse(tlsSrv.URL) + require.NoError(t, err) + + // TCP + + settings := &portainer.LDAPSettings{ + URL: srvURL.Host, + } + + conn, err := createConnectionForURL(settings.URL, settings) + require.NoError(t, err) + require.NotNil(t, conn) + conn.Close() + + // TLS + + settings.TLSConfig = portainer.TLSConfiguration{ + TLS: true, + TLSSkipVerify: true, + } + + conn, err = createConnectionForURL(settings.URL, settings) + require.NoError(t, err) + require.NotNil(t, conn) + conn.Close() + + // Invalid TLS + + settings.TLSConfig = portainer.TLSConfiguration{ + TLS: true, + TLSSkipVerify: true, + TLSCertPath: "/invalid/path/cert", + TLSKeyPath: "/invalid/path/key", + } + + conn, err = createConnectionForURL(settings.URL, settings) + require.Error(t, err) + require.Nil(t, conn) + + // StartTLS + + settings.TLSConfig.TLS = false + settings.StartTLS = true + + conn, err = createConnectionForURL(settings.URL, settings) + require.Error(t, err) + require.Nil(t, conn) +} diff --git a/api/cmd/portainer/log.go b/api/logs/log.go similarity index 91% rename from api/cmd/portainer/log.go rename to api/logs/log.go index b5b4121ea..b44e6dc8c 100644 --- a/api/cmd/portainer/log.go +++ b/api/logs/log.go @@ -1,4 +1,4 @@ -package main +package logs import ( "fmt" @@ -10,7 +10,7 @@ import ( "github.com/rs/zerolog/pkgerrors" ) -func configureLogger() { +func ConfigureLogger() { zerolog.ErrorStackFieldName = "stack_trace" zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack zerolog.TimeFieldFormat = zerolog.TimeFormatUnix @@ -21,7 +21,7 @@ func configureLogger() { log.Logger = log.Logger.With().Caller().Stack().Logger() } -func setLoggingLevel(level string) { +func SetLoggingLevel(level string) { switch level { case "ERROR": zerolog.SetGlobalLevel(zerolog.ErrorLevel) @@ -34,7 +34,7 @@ func setLoggingLevel(level string) { } } -func setLoggingMode(mode string) { +func SetLoggingMode(mode string) { switch mode { case "PRETTY": log.Logger = log.Output(zerolog.ConsoleWriter{ diff --git a/api/platform/service.go b/api/platform/service.go index 628e1cf9b..e74855fb7 100644 --- a/api/platform/service.go +++ b/api/platform/service.go @@ -14,6 +14,10 @@ import ( "github.com/rs/zerolog/log" ) +var ( + ErrNoLocalEnvironment = errors.New("No local environment was detected") +) + type Service interface { GetLocalEnvironment() (*portainer.Endpoint, error) GetPlatform() (ContainerPlatform, error) @@ -35,7 +39,7 @@ func (service *service) loadEnvAndPlatform() error { return nil } - environment, platform, err := guessLocalEnvironment(service.dataStore) + environment, platform, err := detectLocalEnvironment(service.dataStore) if err != nil { return err } @@ -73,7 +77,7 @@ var platformToEndpointType = map[ContainerPlatform][]portainer.EndpointType{ PlatformKubernetes: {portainer.KubernetesLocalEnvironment}, } -func guessLocalEnvironment(dataStore dataservices.DataStore) (*portainer.Endpoint, ContainerPlatform, error) { +func detectLocalEnvironment(dataStore dataservices.DataStore) (*portainer.Endpoint, ContainerPlatform, error) { platform := DetermineContainerPlatform() if !slices.Contains([]ContainerPlatform{PlatformDocker, PlatformKubernetes}, platform) { @@ -113,7 +117,7 @@ func guessLocalEnvironment(dataStore dataservices.DataStore) (*portainer.Endpoin } } - return nil, "", errors.New("failed to find local environment") + return nil, "", ErrNoLocalEnvironment } func checkDockerEnvTypeForUpgrade(environment *portainer.Endpoint) ContainerPlatform { diff --git a/api/portainer.go b/api/portainer.go index ad0989437..3d9b6be4b 100644 --- a/api/portainer.go +++ b/api/portainer.go @@ -4,16 +4,21 @@ import ( "context" "fmt" "io" + "net/http" "time" + gittypes "github.com/portainer/portainer/api/git/types" + models "github.com/portainer/portainer/api/http/models/kubernetes" + "github.com/portainer/portainer/api/roar" + "github.com/portainer/portainer/pkg/featureflags" + httperror "github.com/portainer/portainer/pkg/libhttp/error" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/system" "github.com/docker/docker/api/types/volume" - gittypes "github.com/portainer/portainer/api/git/types" - models "github.com/portainer/portainer/api/http/models/kubernetes" - "github.com/portainer/portainer/pkg/featureflags" - + "github.com/segmentio/encoding/json" "golang.org/x/oauth2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/version" @@ -106,6 +111,7 @@ type ( AdminPassword *string AdminPasswordFile *string Assets *string + CSP *bool Data *string FeatureFlags *[]string EnableEdgeComputeFeatures *bool @@ -134,6 +140,8 @@ type ( LogLevel *string LogMode *string KubectlShellImage *string + PullLimitCheckDisabled *bool + TrustedOrigins *string } // CustomTemplateVariableDefinition @@ -208,26 +216,34 @@ type ( // DockerSnapshot represents a snapshot of a specific Docker environment(endpoint) at a specific time DockerSnapshot struct { - Time int64 `json:"Time"` - DockerVersion string `json:"DockerVersion"` - Swarm bool `json:"Swarm"` - TotalCPU int `json:"TotalCPU"` - TotalMemory int64 `json:"TotalMemory"` - ContainerCount int `json:"ContainerCount"` - RunningContainerCount int `json:"RunningContainerCount"` - StoppedContainerCount int `json:"StoppedContainerCount"` - HealthyContainerCount int `json:"HealthyContainerCount"` - UnhealthyContainerCount int `json:"UnhealthyContainerCount"` - VolumeCount int `json:"VolumeCount"` - ImageCount int `json:"ImageCount"` - ServiceCount int `json:"ServiceCount"` - StackCount int `json:"StackCount"` - SnapshotRaw DockerSnapshotRaw `json:"DockerSnapshotRaw"` - NodeCount int `json:"NodeCount"` - GpuUseAll bool `json:"GpuUseAll"` - GpuUseList []string `json:"GpuUseList"` - IsPodman bool `json:"IsPodman"` - DiagnosticsData *DiagnosticsData `json:"DiagnosticsData"` + Time int64 `json:"Time"` + DockerVersion string `json:"DockerVersion"` + Swarm bool `json:"Swarm"` + TotalCPU int `json:"TotalCPU"` + TotalMemory int64 `json:"TotalMemory"` + ContainerCount int `json:"ContainerCount"` + RunningContainerCount int `json:"RunningContainerCount"` + StoppedContainerCount int `json:"StoppedContainerCount"` + HealthyContainerCount int `json:"HealthyContainerCount"` + UnhealthyContainerCount int `json:"UnhealthyContainerCount"` + VolumeCount int `json:"VolumeCount"` + ImageCount int `json:"ImageCount"` + ServiceCount int `json:"ServiceCount"` + StackCount int `json:"StackCount"` + SnapshotRaw DockerSnapshotRaw `json:"DockerSnapshotRaw"` + NodeCount int `json:"NodeCount"` + GpuUseAll bool `json:"GpuUseAll"` + GpuUseList []string `json:"GpuUseList"` + IsPodman bool `json:"IsPodman"` + DiagnosticsData *DiagnosticsData `json:"DiagnosticsData"` + PerformanceMetrics *PerformanceMetrics `json:"PerformanceMetrics"` + } + + // PerformanceMetrics represents the performance metrics of a Docker, Swarm, Podman, and Kubernetes environments + PerformanceMetrics struct { + CPUUsage float64 `json:"CPUUsage,omitempty"` + MemoryUsage float64 `json:"MemoryUsage,omitempty"` + NetworkUsage float64 `json:"NetworkUsage,omitempty"` } // DockerContainerSnapshot is an extent of Docker's Container struct @@ -241,7 +257,7 @@ type ( DockerSnapshotRaw struct { Containers []DockerContainerSnapshot `json:"Containers" swaggerignore:"true"` Volumes volume.ListResponse `json:"Volumes" swaggerignore:"true"` - Networks []types.NetworkResource `json:"Networks" swaggerignore:"true"` + Networks []network.Summary `json:"Networks" swaggerignore:"true"` Images []image.Summary `json:"Images" swaggerignore:"true"` Info system.Info `json:"Info" swaggerignore:"true"` Version types.Version `json:"Version" swaggerignore:"true"` @@ -250,12 +266,15 @@ type ( // EdgeGroup represents an Edge group EdgeGroup struct { // EdgeGroup Identifier - ID EdgeGroupID `json:"Id" example:"1"` - Name string `json:"Name"` - Dynamic bool `json:"Dynamic"` - TagIDs []TagID `json:"TagIds"` - Endpoints []EndpointID `json:"Endpoints"` - PartialMatch bool `json:"PartialMatch"` + ID EdgeGroupID `json:"Id" example:"1"` + Name string `json:"Name"` + Dynamic bool `json:"Dynamic"` + TagIDs []TagID `json:"TagIds"` + EndpointIDs roar.Roar[EndpointID] `json:"EndpointIds"` + PartialMatch bool `json:"PartialMatch"` + + // Deprecated: only used for API responses + Endpoints []EndpointID `json:"Endpoints"` } // EdgeGroupID represents an Edge group identifier @@ -309,7 +328,7 @@ type ( // FileVersion is the version of the stack file, used to detect changes FileVersion int `json:"FileVersion"` // ConfigHash is the commit hash of the git repository used for deploying the stack - ConfigHash string `json:"ConfigHash"` + ConfigHash string `json:"ConfigHash,omitempty"` } // EdgeStack represents an edge stack @@ -331,6 +350,15 @@ type ( UseManifestNamespaces bool } + EdgeStackStatusForEnv struct { + EndpointID EndpointID + Status []EdgeStackDeploymentStatus + // EE only feature + DeploymentInfo StackDeploymentInfo + // ReadyRePullImage is a flag to indicate whether the auto update is trigger to re-pull image + ReadyRePullImage bool `json:"ReadyRePullImage,omitempty"` + } + EdgeStackDeploymentType int // EdgeStackID represents an edge stack id @@ -353,24 +381,24 @@ type ( // EE only feature DeploymentInfo StackDeploymentInfo // ReadyRePullImage is a flag to indicate whether the auto update is trigger to re-pull image - ReadyRePullImage bool + ReadyRePullImage bool `json:"ReadyRePullImage,omitempty"` // Deprecated - Details EdgeStackStatusDetails + Details *EdgeStackStatusDetails `json:"Details,omitempty"` // Deprecated - Error string + Error string `json:"Error,omitempty"` // Deprecated - Type EdgeStackStatusType `json:"Type"` + Type EdgeStackStatusType `json:"Type,omitempty"` } // EdgeStackDeploymentStatus represents an edge stack deployment status EdgeStackDeploymentStatus struct { Time int64 Type EdgeStackStatusType - Error string + Error string `json:"Error,omitempty"` // EE only feature - RollbackTo *int - Version int `json:"Version,omitempty"` + RollbackTo *int `json:"RollbackTo,omitempty"` + Version int `json:"Version,omitempty"` } // EdgeStackStatusType represents an edge stack status type @@ -579,6 +607,12 @@ type ( ProjectPath string `json:"ProjectPath"` } + // GithubRegistryData represents data required for Github registry to work + GithubRegistryData struct { + UseOrganisation bool `json:"UseOrganisation"` + OrganisationName string `json:"OrganisationName"` + } + HelmUserRepositoryID int // HelmUserRepositories stores a Helm repository URL for the given user @@ -606,15 +640,16 @@ type ( JobType int K8sNamespaceInfo struct { - Id string `json:"Id"` - Name string `json:"Name"` - Status corev1.NamespaceStatus `json:"Status"` - Annotations map[string]string `json:"Annotations"` - CreationDate string `json:"CreationDate"` - NamespaceOwner string `json:"NamespaceOwner"` - IsSystem bool `json:"IsSystem"` - IsDefault bool `json:"IsDefault"` - ResourceQuota *corev1.ResourceQuota `json:"ResourceQuota"` + Id string `json:"Id"` + Name string `json:"Name"` + Status corev1.NamespaceStatus `json:"Status"` + Annotations map[string]string `json:"Annotations"` + CreationDate string `json:"CreationDate"` + UnhealthyEventCount int `json:"UnhealthyEventCount"` + NamespaceOwner string `json:"NamespaceOwner"` + IsSystem bool `json:"IsSystem"` + IsDefault bool `json:"IsDefault"` + ResourceQuota *corev1.ResourceQuota `json:"ResourceQuota"` } K8sNodeLimits struct { @@ -646,12 +681,13 @@ type ( // KubernetesSnapshot represents a snapshot of a specific Kubernetes environment(endpoint) at a specific time KubernetesSnapshot struct { - Time int64 `json:"Time"` - KubernetesVersion string `json:"KubernetesVersion"` - NodeCount int `json:"NodeCount"` - TotalCPU int64 `json:"TotalCPU"` - TotalMemory int64 `json:"TotalMemory"` - DiagnosticsData *DiagnosticsData `json:"DiagnosticsData"` + Time int64 `json:"Time"` + KubernetesVersion string `json:"KubernetesVersion"` + NodeCount int `json:"NodeCount"` + TotalCPU int64 `json:"TotalCPU"` + TotalMemory int64 `json:"TotalMemory"` + DiagnosticsData *DiagnosticsData `json:"DiagnosticsData"` + PerformanceMetrics *PerformanceMetrics `json:"PerformanceMetrics"` } // KubernetesConfiguration represents the configuration of a Kubernetes environment(endpoint) @@ -797,6 +833,7 @@ type ( Password string `json:"Password,omitempty" example:"registry_password"` ManagementConfiguration *RegistryManagementConfiguration `json:"ManagementConfiguration"` Gitlab GitlabRegistryData `json:"Gitlab"` + Github GithubRegistryData `json:"Github"` Quay QuayRegistryData `json:"Quay"` Ecr EcrData `json:"Ecr"` RegistryAccesses RegistryAccesses `json:"RegistryAccesses"` @@ -1373,6 +1410,12 @@ type ( Kubernetes *KubernetesSnapshot `json:"Kubernetes"` } + SnapshotRawMessage struct { + EndpointID EndpointID `json:"EndpointId"` + Docker json.RawMessage `json:"Docker"` + Kubernetes json.RawMessage `json:"Kubernetes"` + } + // CLIService represents a service for managing CLI CLIService interface { ParseFlags(version string) (*CLIFlags, error) @@ -1491,17 +1534,50 @@ type ( StoreSSLCertPair(cert, key []byte) (string, string, error) CopySSLCertPair(certPath, keyPath string) (string, string, error) CopySSLCACert(caCertPath string) (string, error) - StoreMTLSCertificates(cert, caCert, key []byte) (string, string, string, error) + StoreMTLSCertificates(caCert, cert, key []byte) (string, string, string, error) + GetMTLSCertificates() (string, string, string, error) GetDefaultChiselPrivateKeyPath() string StoreChiselPrivateKey(privateKey []byte) error } // GitService represents a service for managing Git GitService interface { - CloneRepository(destination string, repositoryURL, referenceName, username, password string, tlsSkipVerify bool) error - LatestCommitID(repositoryURL, referenceName, username, password string, tlsSkipVerify bool) (string, error) - ListRefs(repositoryURL, username, password string, hardRefresh bool, tlsSkipVerify bool) ([]string, error) - ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includeExts []string, tlsSkipVerify bool) ([]string, error) + CloneRepository( + destination string, + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, + ) error + LatestCommitID( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + tlsSkipVerify bool, + ) (string, error) + ListRefs( + repositoryURL, + username, + password string, + authType gittypes.GitCredentialAuthType, + hardRefresh bool, + tlsSkipVerify bool, + ) ([]string, error) + ListFiles( + repositoryURL, + referenceName, + username, + password string, + authType gittypes.GitCredentialAuthType, + dirOnly, + hardRefresh bool, + includeExts []string, + tlsSkipVerify bool, + ) ([]string, error) } // OpenAMTService represents a service for managing OpenAMT @@ -1522,56 +1598,127 @@ type ( // KubeClient represents a service used to query a Kubernetes environment(endpoint) KubeClient interface { - ServerVersion() (*version.Info, error) + // Access + GetIsKubeAdmin() bool + SetIsKubeAdmin(isKubeAdmin bool) + GetClientNonAdminNamespaces() []string + SetClientNonAdminNamespaces([]string) + NamespaceAccessPoliciesDeleteNamespace(ns string) error + UpdateNamespaceAccessPolicies(accessPolicies map[string]K8sNamespaceAccessPolicy) error + GetNamespaceAccessPolicies() (map[string]K8sNamespaceAccessPolicy, error) + GetNonAdminNamespaces(userID int, teamIDs []int, isRestrictDefaultNamespace bool) ([]string, error) - SetupUserServiceAccount(userID int, teamIDs []int, restrictDefaultNamespace bool) error - IsRBACEnabled() (bool, error) - GetPortainerUserServiceAccount(tokendata *TokenData) (*corev1.ServiceAccount, error) - GetServiceAccounts(namespace string) ([]models.K8sServiceAccount, error) - DeleteServiceAccounts(reqs models.K8sServiceAccountDeleteRequests) error - GetServiceAccountBearerToken(userID int) (string, error) - CreateUserShellPod(ctx context.Context, serviceAccountName, shellPodImage string) (*KubernetesShellPod, error) + // Applications + GetApplications(namespace, nodeName string) ([]models.K8sApplication, error) + GetApplicationsResource(namespace, node string) (models.K8sApplicationResource, error) + + // ClusterRole + GetClusterRoles() ([]models.K8sClusterRole, error) + DeleteClusterRoles(req models.K8sClusterRoleDeleteRequests) error + + // ConfigMap + GetConfigMap(namespace, configMapName string) (models.K8sConfigMap, error) + CombineConfigMapWithApplications(configMap models.K8sConfigMap) (models.K8sConfigMap, error) + + // CronJob + GetCronJobs(namespace string) ([]models.K8sCronJob, error) + DeleteCronJobs(payload models.K8sCronJobDeleteRequests) error + + // Event + GetEvents(namespace string, resourceId string) ([]models.K8sEvent, error) + + // Exec StartExecProcess(token string, useAdminToken bool, namespace, podName, containerName string, command []string, stdin io.Reader, stdout io.Writer, errChan chan error) + // ClusterRoleBinding + GetClusterRoleBindings() ([]models.K8sClusterRoleBinding, error) + DeleteClusterRoleBindings(reqs models.K8sClusterRoleBindingDeleteRequests) error + + // Dashboard + GetDashboard() (models.K8sDashboard, error) + + // Deployment HasStackName(namespace string, stackName string) (bool, error) - NamespaceAccessPoliciesDeleteNamespace(namespace string) error - CreateNamespace(info models.K8sNamespaceDetails) (*corev1.Namespace, error) - UpdateNamespace(info models.K8sNamespaceDetails) (*corev1.Namespace, error) - GetNamespaces() (map[string]K8sNamespaceInfo, error) - GetNamespace(string) (K8sNamespaceInfo, error) - DeleteNamespace(namespace string) (*corev1.Namespace, error) - GetConfigMaps(namespace string) ([]models.K8sConfigMap, error) - GetSecrets(namespace string) ([]models.K8sSecret, error) + + // Ingress GetIngressControllers() (models.K8sIngressControllers, error) - GetApplications(namespace, nodename string, withDependencies bool) ([]models.K8sApplication, error) - GetMetrics() (models.K8sMetrics, error) - GetStorage() ([]KubernetesStorageClassConfig, error) - CreateIngress(namespace string, info models.K8sIngressInfo, owner string) error - UpdateIngress(namespace string, info models.K8sIngressInfo) error + GetIngress(namespace, ingressName string) (models.K8sIngressInfo, error) GetIngresses(namespace string) ([]models.K8sIngressInfo, error) + CreateIngress(namespace string, info models.K8sIngressInfo, owner string) error DeleteIngresses(reqs models.K8sIngressDeleteRequests) error - CreateService(namespace string, service models.K8sServiceInfo) error - UpdateService(namespace string, service models.K8sServiceInfo) error - GetServices(namespace string) ([]models.K8sServiceInfo, error) - DeleteServices(reqs models.K8sServiceDeleteRequests) error + UpdateIngress(namespace string, info models.K8sIngressInfo) error + CombineIngressWithService(ingress models.K8sIngressInfo) (models.K8sIngressInfo, error) + CombineIngressesWithServices(ingresses []models.K8sIngressInfo) ([]models.K8sIngressInfo, error) + + // Job + GetJobs(namespace string, includeCronJobChildren bool) ([]models.K8sJob, error) + DeleteJobs(payload models.K8sJobDeleteRequests) error + + // Metrics + GetMetrics() (models.K8sMetrics, error) + + // Namespace + ToggleSystemState(namespaceName string, isSystem bool) error + UpdateNamespace(info models.K8sNamespaceDetails) (*corev1.Namespace, error) + GetNamespace(name string) (K8sNamespaceInfo, error) + CreateNamespace(info models.K8sNamespaceDetails) (*corev1.Namespace, error) + GetNamespaces() (map[string]K8sNamespaceInfo, error) + CombineNamespaceWithResourceQuota(namespace K8sNamespaceInfo, w http.ResponseWriter) *httperror.HandlerError + DeleteNamespace(namespaceName string) (*corev1.Namespace, error) + CombineNamespacesWithResourceQuotas(namespaces map[string]K8sNamespaceInfo, w http.ResponseWriter) *httperror.HandlerError + ConvertNamespaceMapToSlice(namespaces map[string]K8sNamespaceInfo) []K8sNamespaceInfo + + // NodeLimits GetNodesLimits() (K8sNodesLimits, error) - GetMaxResourceLimits(name string, overCommitEnabled bool, resourceOverCommitPercent int) (K8sNodeLimits, error) - GetNamespaceAccessPolicies() (map[string]K8sNamespaceAccessPolicy, error) - UpdateNamespaceAccessPolicies(accessPolicies map[string]K8sNamespaceAccessPolicy) error + GetMaxResourceLimits(skipNamespace string, overCommitEnabled bool, resourceOverCommitPercent int) (K8sNodeLimits, error) + + // Pod + CreateUserShellPod(ctx context.Context, serviceAccountName, shellPodImage string) (*KubernetesShellPod, error) + + // RBAC + IsRBACEnabled() (bool, error) + + // Registries DeleteRegistrySecret(registry RegistryID, namespace string) error CreateRegistrySecret(registry *Registry, namespace string) error IsRegistrySecret(namespace, secretName string) (bool, error) - ToggleSystemState(namespace string, isSystem bool) error - GetClusterRoles() ([]models.K8sClusterRole, error) - DeleteClusterRoles(models.K8sClusterRoleDeleteRequests) error - GetClusterRoleBindings() ([]models.K8sClusterRoleBinding, error) - DeleteClusterRoleBindings(models.K8sClusterRoleBindingDeleteRequests) error - - GetRoles(namespace string) ([]models.K8sRole, error) - DeleteRoles(models.K8sRoleDeleteRequests) error + // RoleBinding GetRoleBindings(namespace string) ([]models.K8sRoleBinding, error) - DeleteRoleBindings(models.K8sRoleBindingDeleteRequests) error + DeleteRoleBindings(reqs models.K8sRoleBindingDeleteRequests) error + + // Role + DeleteRoles(reqs models.K8sRoleDeleteRequests) error + + // Secret + GetSecrets(namespace string) ([]models.K8sSecret, error) + GetSecret(namespace string, secretName string) (models.K8sSecret, error) + CombineSecretWithApplications(secret models.K8sSecret) (models.K8sSecret, error) + + // ServiceAccount + GetServiceAccounts(namespace string) ([]models.K8sServiceAccount, error) + DeleteServiceAccounts(reqs models.K8sServiceAccountDeleteRequests) error + SetupUserServiceAccount(int, []int, bool) error + GetPortainerUserServiceAccount(tokendata *TokenData) (*corev1.ServiceAccount, error) + GetServiceAccountBearerToken(userID int) (string, error) + + // Service + GetServices(namespace string) ([]models.K8sServiceInfo, error) + CombineServicesWithApplications(services []models.K8sServiceInfo) ([]models.K8sServiceInfo, error) + CreateService(namespace string, info models.K8sServiceInfo) error + DeleteServices(reqs models.K8sServiceDeleteRequests) error + UpdateService(namespace string, info models.K8sServiceInfo) error + + // ServerVersion + ServerVersion() (*version.Info, error) + + // Storage + GetStorage() ([]KubernetesStorageClassConfig, error) + + // Volumes + GetVolumes(namespace string) ([]models.K8sVolumeInfo, error) + GetVolume(namespace, volumeName string) (*models.K8sVolumeInfo, error) + CombineVolumesWithApplications(volumes *[]models.K8sVolumeInfo) (*[]models.K8sVolumeInfo, error) } // KubernetesDeployer represents a service to deploy a manifest inside a Kubernetes environment(endpoint) @@ -1621,7 +1768,7 @@ type ( Start() SetSnapshotInterval(snapshotInterval string) error SnapshotEndpoint(endpoint *Endpoint) error - FillSnapshotData(endpoint *Endpoint) error + FillSnapshotData(endpoint *Endpoint, includeRaw bool) error } // SwarmStackManager represents a service to manage Swarm stacks @@ -1636,9 +1783,9 @@ type ( const ( // APIVersion is the version number of the Portainer API - APIVersion = "2.27.0-rc1" + APIVersion = "2.32.0" // Support annotation for the API version ("STS" for Short-Term Support or "LTS" for Long-Term Support) - APIVersionSupport = "LTS" + APIVersionSupport = "STS" // Edition is what this edition of Portainer is called Edition = PortainerCE // ComposeSyntaxMaxVersion is a maximum supported version of the docker compose syntax @@ -1647,8 +1794,10 @@ const ( AssetsServerURL = "https://portainer-io-assets.sfo2.digitaloceanspaces.com" // MessageOfTheDayURL represents the URL where Portainer MOTD message can be retrieved MessageOfTheDayURL = AssetsServerURL + "/motd.json" + // ReleasesURL represents the URL used to retrieve all releases of Portainer + ReleasesURL = "https://api.github.com/repos/portainer/portainer/releases" // VersionCheckURL represents the URL used to retrieve the latest version of Portainer - VersionCheckURL = "https://api.github.com/repos/portainer/portainer/releases/latest" + VersionCheckURL = ReleasesURL + "/latest" // PortainerAgentHeader represents the name of the header available in any agent response PortainerAgentHeader = "Portainer-Agent" // PortainerAgentEdgeIDHeader represent the name of the header containing the Edge ID associated to an agent/agent cluster @@ -1688,6 +1837,17 @@ const ( PortainerCacheHeader = "X-Portainer-Cache" // KubectlShellImageEnvVar is the environment variable used to override the default kubectl shell image KubectlShellImageEnvVar = "KUBECTL_SHELL_IMAGE" + // PullLimitCheckDisabledEnvVar is the environment variable used to disable the pull limit check + PullLimitCheckDisabledEnvVar = "PULL_LIMIT_CHECK_DISABLED" + // LicenseServerBaseURL represents the base URL of the API used to validate + // an extension license. + LicenseServerBaseURL = "https://api.portainer.io" + // URL to validate licenses along with system metadata. + LicenseCheckInURL = LicenseServerBaseURL + "/licenses/checkin" + // TrustedOriginsEnvVar is the environment variable used to set the trusted origins for CSRF protection + TrustedOriginsEnvVar = "TRUSTED_ORIGINS" + // CSPEnvVar is the environment variable used to enable/disable the Content Security Policy + CSPEnvVar = "CSP" ) // List of supported features @@ -1857,6 +2017,8 @@ const ( DockerHubRegistry // EcrRegistry represents an ECR registry EcrRegistry + // Github container registry + GithubRegistry ) const ( diff --git a/api/roar/roar.go b/api/roar/roar.go new file mode 100644 index 000000000..6edc67f75 --- /dev/null +++ b/api/roar/roar.go @@ -0,0 +1,145 @@ +package roar + +import ( + "fmt" + + "github.com/RoaringBitmap/roaring/v2" +) + +type Roar[T ~int] struct { + rb *roaring.Bitmap +} + +// Iterate iterates over the bitmap, calling the given callback with each value in the bitmap. If the callback returns +// false, the iteration is halted. +// The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). +// There is no guarantee as to what order the values will be iterated. +func (r *Roar[T]) Iterate(f func(T) bool) { + if r.rb == nil { + return + } + + r.rb.Iterate(func(e uint32) bool { + return f(T(e)) + }) +} + +// Len returns the number of elements contained in the bitmap +func (r *Roar[T]) Len() int { + if r.rb == nil { + return 0 + } + + return int(r.rb.GetCardinality()) +} + +// Remove removes the given element from the bitmap +func (r *Roar[T]) Remove(e T) { + if r.rb == nil { + return + } + + r.rb.Remove(uint32(e)) +} + +// Add adds the given element to the bitmap +func (r *Roar[T]) Add(e T) { + if r.rb == nil { + r.rb = roaring.New() + } + + r.rb.AddInt(int(e)) +} + +// Contains returns whether the bitmap contains the given element or not +func (r *Roar[T]) Contains(e T) bool { + if r.rb == nil { + return false + } + + return r.rb.ContainsInt(int(e)) +} + +// Union combines the elements of the given bitmap with this bitmap +func (r *Roar[T]) Union(other Roar[T]) { + if other.rb == nil { + return + } else if r.rb == nil { + r.rb = roaring.New() + } + + r.rb.Or(other.rb) +} + +// Intersection modifies this bitmap to only contain elements that are also in the other bitmap +func (r *Roar[T]) Intersection(other Roar[T]) { + if other.rb == nil { + if r.rb != nil { + r.rb.Clear() + } + + return + } + + if r.rb == nil { + r.rb = roaring.New() + } + + r.rb.And(other.rb) +} + +// ToSlice converts the bitmap to a slice of elements +func (r *Roar[T]) ToSlice() []T { + if r.rb == nil { + return make([]T, 0) + } + + slice := make([]T, 0, r.rb.GetCardinality()) + r.rb.Iterate(func(e uint32) bool { + slice = append(slice, T(e)) + + return true + }) + + return slice +} + +func (r *Roar[T]) MarshalJSON() ([]byte, error) { + if r.rb == nil { + return []byte("null"), nil + } + + r.rb.RunOptimize() + + buf, err := r.rb.ToBase64() + if err != nil { + return nil, fmt.Errorf("failed to encode roaring bitmap: %w", err) + } + + return fmt.Appendf(nil, `"%s"`, buf), nil +} + +func (r *Roar[T]) UnmarshalJSON(data []byte) error { + if len(data) == 0 || string(data) == "null" { + return nil + } + + r.rb = roaring.New() + + _, err := r.rb.FromBase64(string(data[1 : len(data)-1])) + + return err +} + +// FromSlice creates a Roar by adding all elements from the provided slices +func FromSlice[T ~int](ess ...[]T) Roar[T] { + var r Roar[T] + + for _, es := range ess { + for _, e := range es { + r.Add(e) + } + } + + return r +} diff --git a/api/roar/roar_test.go b/api/roar/roar_test.go new file mode 100644 index 000000000..ed5103ad5 --- /dev/null +++ b/api/roar/roar_test.go @@ -0,0 +1,123 @@ +package roar + +import ( + "slices" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRoar(t *testing.T) { + r := Roar[int]{} + require.Equal(t, 0, r.Len()) + + r.Add(1) + require.Equal(t, 1, r.Len()) + require.True(t, r.Contains(1)) + require.False(t, r.Contains(2)) + + r.Add(2) + require.Equal(t, 2, r.Len()) + require.True(t, r.Contains(2)) + + r.Remove(1) + require.Equal(t, 1, r.Len()) + require.False(t, r.Contains(1)) + + s := FromSlice([]int{3, 4, 5}) + require.Equal(t, 3, s.Len()) + require.True(t, s.Contains(3)) + require.True(t, s.Contains(4)) + require.True(t, s.Contains(5)) + + r.Union(s) + require.Equal(t, 4, r.Len()) + require.True(t, r.Contains(2)) + require.True(t, r.Contains(3)) + require.True(t, r.Contains(4)) + require.True(t, r.Contains(5)) + + r.Iterate(func(id int) bool { + require.True(t, slices.Contains([]int{2, 3, 4, 5}, id)) + + return true + }) + + rSlice := r.ToSlice() + require.EqualValues(t, []int{2, 3, 4, 5}, rSlice) + + r.Intersection(FromSlice([]int{4})) + require.Equal(t, 1, r.Len()) + require.True(t, r.Contains(4)) + require.False(t, r.Contains(2)) + require.False(t, r.Contains(3)) + require.False(t, r.Contains(5)) + + b, err := r.MarshalJSON() + require.NoError(t, err) + require.NotEqual(t, "null", string(b)) + require.True(t, strings.HasPrefix(string(b), `"`)) + require.True(t, strings.HasSuffix(string(b), `"`)) +} + +func TestNilSafety(t *testing.T) { + var r, s, u Roar[int] + + r.Iterate(func(id int) bool { + require.Fail(t, "should not iterate over nil Roar") + + return true + }) + + b, err := r.MarshalJSON() + require.NoError(t, err) + require.Equal(t, "null", string(b)) + + err = r.UnmarshalJSON([]byte("null")) + require.NoError(t, err) + require.Equal(t, 0, r.Len()) + + r.Contains(1) + r.Remove(1) + + require.Equal(t, 0, r.Len()) + require.Empty(t, r.ToSlice()) + + r.Add(1) + require.Equal(t, 1, r.Len()) + require.False(t, r.Contains(2)) + + s.Union(r) + require.Equal(t, 1, s.Len()) + require.True(t, s.Contains(1)) + + r.Union(u) + require.Equal(t, 1, r.Len()) + require.True(t, r.Contains(1)) + + s.Intersection(u) + require.Equal(t, 0, s.Len()) + + u.Intersection(r) + require.Equal(t, 0, u.Len()) +} + +func TestJSON(t *testing.T) { + var r, u Roar[int] + + r.Add(1) + r.Add(2) + r.Add(3) + + b, err := r.MarshalJSON() + require.NoError(t, err) + require.NotEqual(t, "null", string(b)) + + err = u.UnmarshalJSON(b) + require.NoError(t, err) + require.Equal(t, 3, u.Len()) + require.True(t, u.Contains(1)) + require.True(t, u.Contains(2)) + require.True(t, u.Contains(3)) +} diff --git a/api/slicesx/filter.go b/api/slicesx/filter.go new file mode 100644 index 000000000..13dc12105 --- /dev/null +++ b/api/slicesx/filter.go @@ -0,0 +1,28 @@ +package slicesx + +// Iterates over elements of collection, returning an array of all elements predicate returns truthy for. +// +// Note: Unlike `FilterInPlace`, this method returns a new array. +func Filter[T any](input []T, predicate func(T) bool) []T { + result := make([]T, 0) + for i := range input { + if predicate(input[i]) { + result = append(result, input[i]) + } + } + return result +} + +// Filter in place all elements from input that predicate returns truthy for and returns an array of the removed elements. +// +// Note: Unlike `Filter`, this method mutates input. +func FilterInPlace[T any](input []T, predicate func(T) bool) []T { + n := 0 + for _, v := range input { + if predicate(v) { + input[n] = v + n++ + } + } + return input[:n] +} diff --git a/api/slicesx/filter_test.go b/api/slicesx/filter_test.go new file mode 100644 index 000000000..36f97fa10 --- /dev/null +++ b/api/slicesx/filter_test.go @@ -0,0 +1,96 @@ +package slicesx_test + +import ( + "testing" + + "github.com/portainer/portainer/api/slicesx" +) + +func Test_Filter(t *testing.T) { + test(t, slicesx.Filter, "Filter even numbers", + []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, + []int{2, 4, 6, 8}, + func(x int) bool { return x%2 == 0 }, + ) + test(t, slicesx.Filter, "Filter odd numbers", + []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, + []int{1, 3, 5, 7, 9}, + func(x int) bool { return x%2 == 1 }, + ) + test(t, slicesx.Filter, "Filter strings starting with 'A'", + []string{"Apple", "Banana", "Avocado", "Grapes", "Apricot"}, + []string{"Apple", "Avocado", "Apricot"}, + func(s string) bool { return s[0] == 'A' }, + ) + test(t, slicesx.Filter, "Filter strings longer than 5 chars", + []string{"Apple", "Banana", "Avocado", "Grapes", "Apricot"}, + []string{"Banana", "Avocado", "Grapes", "Apricot"}, + func(s string) bool { return len(s) > 5 }, + ) +} + +func Test_Retain(t *testing.T) { + test(t, slicesx.FilterInPlace, "Filter even numbers", + []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, + []int{2, 4, 6, 8}, + func(x int) bool { return x%2 == 0 }, + ) + test(t, slicesx.FilterInPlace, "Filter odd numbers", + []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, + []int{1, 3, 5, 7, 9}, + func(x int) bool { return x%2 == 1 }, + ) + test(t, slicesx.FilterInPlace, "Filter strings starting with 'A'", + []string{"Apple", "Banana", "Avocado", "Grapes", "Apricot"}, + []string{"Apple", "Avocado", "Apricot"}, + func(s string) bool { return s[0] == 'A' }, + ) + test(t, slicesx.FilterInPlace, "Filter strings longer than 5 chars", + []string{"Apple", "Banana", "Avocado", "Grapes", "Apricot"}, + []string{"Banana", "Avocado", "Grapes", "Apricot"}, + func(s string) bool { return len(s) > 5 }, + ) +} + +func Benchmark_Filter(b *testing.B) { + n := 100000 + + source := make([]int, n) + for i := range source { + source[i] = i + } + + b.ResetTimer() + for range b.N { + e := slicesx.Filter(source, func(x int) bool { return x%2 == 0 }) + if len(e) != n/2 { + b.FailNow() + } + } +} + +func Benchmark_FilterInPlace(b *testing.B) { + n := 100000 + + source := make([]int, n) + for i := range source { + source[i] = i + } + + // Preallocate all copies before timing + // because FilterInPlace mutates the original slice + copies := make([][]int, b.N) + for i := range b.N { + buf := make([]int, len(source)) + copy(buf, source) + copies[i] = buf + } + + b.ResetTimer() + for i := range b.N { + e := slicesx.FilterInPlace(copies[i], func(x int) bool { return x%2 == 0 }) + if len(e) != n/2 { + b.FailNow() + } + } +} diff --git a/api/slicesx/flatten.go b/api/slicesx/flatten.go new file mode 100644 index 000000000..56a77f3e9 --- /dev/null +++ b/api/slicesx/flatten.go @@ -0,0 +1,7 @@ +package slicesx + +import "slices" + +func Flatten[T any](input [][]T) []T { + return slices.Concat(input...) +} diff --git a/api/slicesx/flatten_test.go b/api/slicesx/flatten_test.go new file mode 100644 index 000000000..6875c4e6b --- /dev/null +++ b/api/slicesx/flatten_test.go @@ -0,0 +1,19 @@ +package slicesx_test + +import ( + "testing" + + "github.com/portainer/portainer/api/slicesx" + "github.com/stretchr/testify/assert" +) + +func Test_Flatten(t *testing.T) { + t.Run("Flatten an array of arrays", func(t *testing.T) { + is := assert.New(t) + + source := [][]int{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}} + expected := []int{1, 2, 3, 4, 5, 6, 7, 8, 9} + is.ElementsMatch(slicesx.Flatten(source), expected) + + }) +} diff --git a/api/slicesx/includes.go b/api/slicesx/includes.go new file mode 100644 index 000000000..377a54215 --- /dev/null +++ b/api/slicesx/includes.go @@ -0,0 +1,17 @@ +package slicesx + +import "slices" + +// Checks if predicate returns truthy for any element of input. Iteration is stopped once predicate returns truthy. +func Some[T any](input []T, predicate func(T) bool) bool { + return slices.ContainsFunc(input, predicate) +} + +// Checks if predicate returns truthy for all elements of input. Iteration is stopped once predicate returns falsey. +// +// Note: This method returns true for empty collections because everything is true of elements of empty collections. +// https://en.wikipedia.org/wiki/Vacuous_truth +func Every[T any](input []T, predicate func(T) bool) bool { + // if the slice doesn't contain an inverted predicate then all items follow the predicate + return !slices.ContainsFunc(input, func(t T) bool { return !predicate(t) }) +} diff --git a/api/slicesx/includes_test.go b/api/slicesx/includes_test.go new file mode 100644 index 000000000..a3f074c1c --- /dev/null +++ b/api/slicesx/includes_test.go @@ -0,0 +1,76 @@ +package slicesx_test + +import ( + "testing" + + "github.com/portainer/portainer/api/slicesx" +) + +func Test_Every(t *testing.T) { + test(t, slicesx.Every, "All start with an A (ok)", + []string{"Apple", "Avocado", "Apricot"}, + true, + func(s string) bool { return s[0] == 'A' }, + ) + test(t, slicesx.Every, "All start with an A (ko = some don't start with A)", + []string{"Apple", "Avocado", "Banana"}, + false, + func(s string) bool { return s[0] == 'A' }, + ) + test(t, slicesx.Every, "All are under 5 (ok)", + []int{1, 2, 3}, + true, + func(i int) bool { return i < 5 }, + ) + test(t, slicesx.Every, "All are under 5 (ko = some above 10)", + []int{1, 2, 10}, + false, + func(i int) bool { return i < 5 }, + ) + test(t, slicesx.Every, "All are true (ok)", + []struct{ x bool }{{x: true}, {x: true}, {x: true}}, + true, + func(s struct{ x bool }) bool { return s.x }) + test(t, slicesx.Every, "All are true (ko = some are false)", + []struct{ x bool }{{x: true}, {x: true}, {x: false}}, + false, + func(s struct{ x bool }) bool { return s.x }) + test(t, slicesx.Every, "Must be true on empty slice", + []int{}, + true, + func(i int) bool { return i%2 == 0 }, + ) +} + +func Test_Some(t *testing.T) { + test(t, slicesx.Some, "Some start with an A (ok)", + []string{"Apple", "Avocado", "Banana"}, + true, + func(s string) bool { return s[0] == 'A' }, + ) + test(t, slicesx.Some, "Some start with an A (ko = all don't start with A)", + []string{"Banana", "Cherry", "Peach"}, + false, + func(s string) bool { return s[0] == 'A' }, + ) + test(t, slicesx.Some, "Some are under 5 (ok)", + []int{1, 2, 30}, + true, + func(i int) bool { return i < 5 }, + ) + test(t, slicesx.Some, "Some are under 5 (ko = all above 5)", + []int{10, 11, 12}, + false, + func(i int) bool { return i < 5 }, + ) + test(t, slicesx.Some, "Some are true (ok)", + []struct{ x bool }{{x: true}, {x: true}, {x: false}}, + true, + func(s struct{ x bool }) bool { return s.x }, + ) + test(t, slicesx.Some, "Some are true (ko = all are false)", + []struct{ x bool }{{x: false}, {x: false}, {x: false}}, + false, + func(s struct{ x bool }) bool { return s.x }, + ) +} diff --git a/api/slicesx/map.go b/api/slicesx/map.go new file mode 100644 index 000000000..7e24bdd0d --- /dev/null +++ b/api/slicesx/map.go @@ -0,0 +1,15 @@ +package slicesx + +// Map applies the given function to each element of the slice and returns a new slice with the results +func Map[T, U any](s []T, f func(T) U) []U { + result := make([]U, len(s)) + for i, v := range s { + result[i] = f(v) + } + return result +} + +// FlatMap applies the given function to each element of the slice and returns a new slice with the flattened results +func FlatMap[T, U any](s []T, f func(T) []U) []U { + return Flatten(Map(s, f)) +} diff --git a/api/slicesx/map_test.go b/api/slicesx/map_test.go new file mode 100644 index 000000000..a2cd2256d --- /dev/null +++ b/api/slicesx/map_test.go @@ -0,0 +1,43 @@ +package slicesx_test + +import ( + "strconv" + "testing" + + "github.com/portainer/portainer/api/slicesx" +) + +func Test_Map(t *testing.T) { + test(t, slicesx.Map, "Map integers to strings", + []int{1, 2, 3, 4, 5}, + []string{"1", "2", "3", "4", "5"}, + strconv.Itoa, + ) + test(t, slicesx.Map, "Map strings to integers", + []string{"1", "2", "3", "4", "5"}, + []int{1, 2, 3, 4, 5}, + func(s string) int { + n, _ := strconv.Atoi(s) + return n + }, + ) +} + +func Test_FlatMap(t *testing.T) { + test(t, slicesx.FlatMap, "Map integers to strings and flatten", + []int{1, 2, 3, 4, 5}, + []string{"1", "1", "2", "2", "3", "3", "4", "4", "5", "5"}, + func(i int) []string { + x := strconv.Itoa(i) + return []string{x, x} + }, + ) + test(t, slicesx.FlatMap, "Map strings to integers and flatten", + []string{"1", "2", "3", "4", "5"}, + []int{1, 1, 2, 2, 3, 3, 4, 4, 5, 5}, + func(s string) []int { + n, _ := strconv.Atoi(s) + return []int{n, n} + }, + ) +} diff --git a/api/slicesx/slices_test.go b/api/slicesx/slices_test.go deleted file mode 100644 index d75f9b559..000000000 --- a/api/slicesx/slices_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package slicesx - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -type filterTestCase[T any] struct { - name string - input []T - expected []T - predicate func(T) bool -} - -func TestFilter(t *testing.T) { - intTestCases := []filterTestCase[int]{ - { - name: "Filter even numbers", - input: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, - expected: []int{2, 4, 6, 8}, - - predicate: func(n int) bool { - return n%2 == 0 - }, - }, - { - name: "Filter odd numbers", - input: []int{1, 2, 3, 4, 5, 6, 7, 8, 9}, - expected: []int{1, 3, 5, 7, 9}, - - predicate: func(n int) bool { - return n%2 != 0 - }, - }, - } - - runTestCases(t, intTestCases) - - stringTestCases := []filterTestCase[string]{ - { - name: "Filter strings starting with 'A'", - input: []string{"Apple", "Banana", "Avocado", "Grapes", "Apricot"}, - expected: []string{"Apple", "Avocado", "Apricot"}, - predicate: func(s string) bool { - return s[0] == 'A' - }, - }, - { - name: "Filter strings longer than 5 characters", - input: []string{"Apple", "Banana", "Avocado", "Grapes", "Apricot"}, - expected: []string{"Banana", "Avocado", "Grapes", "Apricot"}, - predicate: func(s string) bool { - return len(s) > 5 - }, - }, - } - - runTestCases(t, stringTestCases) -} - -func runTestCases[T any](t *testing.T, testCases []filterTestCase[T]) { - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - is := assert.New(t) - result := Filter(testCase.input, testCase.predicate) - - is.Equal(len(testCase.expected), len(result)) - is.ElementsMatch(testCase.expected, result) - }) - } -} - -func TestMap(t *testing.T) { - intTestCases := []struct { - name string - input []int - expected []string - mapper func(int) string - }{ - { - name: "Map integers to strings", - input: []int{1, 2, 3, 4, 5}, - expected: []string{"1", "2", "3", "4", "5"}, - mapper: strconv.Itoa, - }, - } - - runMapTestCases(t, intTestCases) - - stringTestCases := []struct { - name string - input []string - expected []int - mapper func(string) int - }{ - { - name: "Map strings to integers", - input: []string{"1", "2", "3", "4", "5"}, - expected: []int{1, 2, 3, 4, 5}, - mapper: func(s string) int { - n, _ := strconv.Atoi(s) - return n - }, - }, - } - - runMapTestCases(t, stringTestCases) -} - -func runMapTestCases[T, U any](t *testing.T, testCases []struct { - name string - input []T - expected []U - mapper func(T) U -}) { - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - is := assert.New(t) - result := Map(testCase.input, testCase.mapper) - - is.Equal(len(testCase.expected), len(result)) - is.ElementsMatch(testCase.expected, result) - }) - } -} diff --git a/api/slicesx/slicesx_test.go b/api/slicesx/slicesx_test.go new file mode 100644 index 000000000..1bb8a76fe --- /dev/null +++ b/api/slicesx/slicesx_test.go @@ -0,0 +1,29 @@ +package slicesx_test + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +type libFunc[T, U, V any] func([]T, func(T) U) V +type predicateFunc[T, U any] func(T) U + +func test[T, U, V any](t *testing.T, libFn libFunc[T, U, V], name string, input []T, expected V, predicate predicateFunc[T, U]) { + t.Helper() + + t.Run(name, func(t *testing.T) { + is := assert.New(t) + + result := libFn(input, predicate) + + switch reflect.TypeOf(result).Kind() { + case reflect.Slice, reflect.Array: + is.Equal(expected, result) + is.ElementsMatch(expected, result) + default: + is.Equal(expected, result) + } + }) +} diff --git a/api/slicesx/slices.go b/api/slicesx/unique.go similarity index 51% rename from api/slicesx/slices.go rename to api/slicesx/unique.go index b7e0aa0ef..8659b0778 100644 --- a/api/slicesx/slices.go +++ b/api/slicesx/unique.go @@ -1,27 +1,5 @@ package slicesx -// Map applies the given function to each element of the slice and returns a new slice with the results -func Map[T, U any](s []T, f func(T) U) []U { - result := make([]U, len(s)) - for i, v := range s { - result[i] = f(v) - } - return result -} - -// Filter returns a new slice containing only the elements of the slice for which the given predicate returns true -func Filter[T any](s []T, predicate func(T) bool) []T { - n := 0 - for _, v := range s { - if predicate(v) { - s[n] = v - n++ - } - } - - return s[:n] -} - func Unique[T comparable](items []T) []T { return UniqueBy(items, func(item T) T { return item diff --git a/api/slicesx/unique_test.go b/api/slicesx/unique_test.go new file mode 100644 index 000000000..8ff967ca6 --- /dev/null +++ b/api/slicesx/unique_test.go @@ -0,0 +1,46 @@ +package slicesx_test + +import ( + "testing" + + "github.com/portainer/portainer/api/slicesx" + "github.com/stretchr/testify/assert" +) + +func Test_Unique(t *testing.T) { + is := assert.New(t) + t.Run("Should extract unique numbers", func(t *testing.T) { + + source := []int{1, 1, 2, 3, 4, 4, 5, 4, 6, 7, 8, 9, 1} + result := slicesx.Unique(source) + expected := []int{1, 2, 3, 4, 5, 6, 7, 8, 9} + + is.ElementsMatch(result, expected) + }) + + t.Run("Should return empty array", func(t *testing.T) { + source := []int{} + result := slicesx.Unique(source) + expected := []int{} + is.ElementsMatch(result, expected) + }) +} + +func Test_UniqueBy(t *testing.T) { + is := assert.New(t) + t.Run("Should extract unique numbers by property", func(t *testing.T) { + + source := []struct{ int }{{1}, {1}, {2}, {3}, {4}, {4}, {5}, {4}, {6}, {7}, {8}, {9}, {1}} + result := slicesx.UniqueBy(source, func(item struct{ int }) int { return item.int }) + expected := []struct{ int }{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}} + + is.ElementsMatch(result, expected) + }) + + t.Run("Should return empty array", func(t *testing.T) { + source := []int{} + result := slicesx.UniqueBy(source, func(x int) int { return x }) + expected := []int{} + is.ElementsMatch(result, expected) + }) +} diff --git a/api/stacks/deployments/deploy.go b/api/stacks/deployments/deploy.go index d963e8686..627c3012a 100644 --- a/api/stacks/deployments/deploy.go +++ b/api/stacks/deployments/deploy.go @@ -2,7 +2,6 @@ package deployments import ( "cmp" - "crypto/tls" "fmt" "strconv" "time" @@ -215,13 +214,9 @@ func isEnvironmentOnline(endpoint *portainer.Endpoint) bool { return true } - var err error - var tlsConfig *tls.Config - if endpoint.TLSConfig.TLS { - tlsConfig, err = crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig.TLSCACertPath, endpoint.TLSConfig.TLSCertPath, endpoint.TLSConfig.TLSKeyPath, endpoint.TLSConfig.TLSSkipVerify) - if err != nil { - return false - } + tlsConfig, err := crypto.CreateTLSConfigurationFromDisk(endpoint.TLSConfig) + if err != nil { + return false } _, _, err = agent.GetAgentVersionAndPlatform(endpoint.URL, tlsConfig) diff --git a/api/stacks/deployments/deploy_test.go b/api/stacks/deployments/deploy_test.go index d45313cb9..cda695b4a 100644 --- a/api/stacks/deployments/deploy_test.go +++ b/api/stacks/deployments/deploy_test.go @@ -10,6 +10,7 @@ import ( "testing" portainer "github.com/portainer/portainer/api" + "github.com/portainer/portainer/api/crypto" "github.com/portainer/portainer/api/datastore" gittypes "github.com/portainer/portainer/api/git/types" "github.com/portainer/portainer/api/internal/testhelpers" @@ -127,9 +128,8 @@ func agentServer(t *testing.T) string { cert, err := tls.X509KeyPair([]byte(localhostCert), []byte(localhostKey)) require.NoError(t, err) - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - } + tlsConfig := crypto.CreateTLSConfiguration(false) + tlsConfig.Certificates = []tls.Certificate{cert} l, err := tls.Listen("tcp", "127.0.0.1:0", tlsConfig) require.NoError(t, err) diff --git a/api/stacks/stackutils/gitops.go b/api/stacks/stackutils/gitops.go index d035b783d..566a2a2e2 100644 --- a/api/stacks/stackutils/gitops.go +++ b/api/stacks/stackutils/gitops.go @@ -19,13 +19,23 @@ var ( func DownloadGitRepository(config gittypes.RepoConfig, gitService portainer.GitService, getProjectPath func() string) (string, error) { username := "" password := "" + authType := gittypes.GitCredentialAuthType_Basic if config.Authentication != nil { username = config.Authentication.Username password = config.Authentication.Password + authType = config.Authentication.AuthorizationType } projectPath := getProjectPath() - err := gitService.CloneRepository(projectPath, config.URL, config.ReferenceName, username, password, config.TLSSkipVerify) + err := gitService.CloneRepository( + projectPath, + config.URL, + config.ReferenceName, + username, + password, + authType, + config.TLSSkipVerify, + ) if err != nil { if errors.Is(err, gittypes.ErrAuthenticationFailure) { newErr := git.ErrInvalidGitCredential @@ -36,7 +46,14 @@ func DownloadGitRepository(config gittypes.RepoConfig, gitService portainer.GitS return "", newErr } - commitID, err := gitService.LatestCommitID(config.URL, config.ReferenceName, username, password, config.TLSSkipVerify) + commitID, err := gitService.LatestCommitID( + config.URL, + config.ReferenceName, + username, + password, + authType, + config.TLSSkipVerify, + ) if err != nil { newErr := fmt.Errorf("unable to fetch git repository id: %w", err) return "", newErr diff --git a/api/stacks/stackutils/util.go b/api/stacks/stackutils/util.go index 5dd5bf4c5..9a4daeae3 100644 --- a/api/stacks/stackutils/util.go +++ b/api/stacks/stackutils/util.go @@ -45,11 +45,6 @@ func SanitizeLabel(value string) string { return strings.Trim(onlyAllowedCharacterString, ".-_") } -// IsGitStack checks if the stack is a git stack or not -func IsGitStack(stack *portainer.Stack) bool { - return stack.GitConfig != nil && len(stack.GitConfig.URL) != 0 -} - // IsRelativePathStack checks if the stack is a git stack or not func IsRelativePathStack(stack *portainer.Stack) bool { // Always return false in CE diff --git a/api/swagger.yaml b/api/swagger.yaml deleted file mode 100644 index e5d6eabea..000000000 --- a/api/swagger.yaml +++ /dev/null @@ -1,6114 +0,0 @@ -basePath: /api -definitions: - auth.authenticatePayload: - properties: - password: - description: Password - example: mypassword - type: string - username: - description: Username - example: admin - type: string - required: - - password - - username - type: object - auth.authenticateResponse: - properties: - jwt: - description: JWT token used to authenticate against the API - example: abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyzAB - type: string - type: object - auth.oauthPayload: - properties: - code: - description: OAuth code returned from OAuth Provided - type: string - type: object - customtemplates.customTemplateFromFileContentPayload: - properties: - description: - description: Description of the template - example: High performance web server - type: string - fileContent: - description: Content of stack file - type: string - logo: - description: URL of the template's logo - example: https://cloudinovasi.id/assets/img/logos/nginx.png - type: string - note: - description: A note that will be displayed in the UI. Supports HTML content - example: This is my custom template - type: string - platform: - description: |- - Platform associated to the template. - Valid values are: 1 - 'linux', 2 - 'windows' - enum: - - 1 - - 2 - example: 1 - type: integer - title: - description: Title of the template - example: Nginx - type: string - type: - description: Type of created stack (1 - swarm, 2 - compose) - enum: - - 1 - - 2 - example: 1 - type: integer - required: - - description - - fileContent - - platform - - title - - type - type: object - customtemplates.customTemplateFromGitRepositoryPayload: - properties: - composeFilePathInRepository: - default: docker-compose.yml - description: Path to the Stack file inside the Git repository - example: docker-compose.yml - type: string - description: - description: Description of the template - example: High performance web server - type: string - logo: - description: URL of the template's logo - example: https://cloudinovasi.id/assets/img/logos/nginx.png - type: string - note: - description: A note that will be displayed in the UI. Supports HTML content - example: This is my custom template - type: string - platform: - description: |- - Platform associated to the template. - Valid values are: 1 - 'linux', 2 - 'windows' - enum: - - 1 - - 2 - example: 1 - type: integer - repositoryAuthentication: - description: Use basic authentication to clone the Git repository - example: true - type: boolean - repositoryPassword: - description: Password used in basic authentication. Required when RepositoryAuthentication - is true. - example: myGitPassword - type: string - repositoryReferenceName: - description: Reference name of a Git repository hosting the Stack file - example: refs/heads/master - type: string - repositoryURL: - description: URL of a Git repository hosting the Stack file - example: https://github.com/openfaas/faas - type: string - repositoryUsername: - description: Username used in basic authentication. Required when RepositoryAuthentication - is true. - example: myGitUsername - type: string - title: - description: Title of the template - example: Nginx - type: string - type: - description: Type of created stack (1 - swarm, 2 - compose) - enum: - - 1 - - 2 - example: 1 - type: integer - required: - - description - - platform - - repositoryURL - - title - - type - type: object - customtemplates.customTemplateUpdatePayload: - properties: - description: - description: Description of the template - example: High performance web server - type: string - fileContent: - description: Content of stack file - type: string - logo: - description: URL of the template's logo - example: https://cloudinovasi.id/assets/img/logos/nginx.png - type: string - note: - description: A note that will be displayed in the UI. Supports HTML content - example: This is my custom template - type: string - platform: - description: |- - Platform associated to the template. - Valid values are: 1 - 'linux', 2 - 'windows' - enum: - - 1 - - 2 - example: 1 - type: integer - title: - description: Title of the template - example: Nginx - type: string - type: - description: Type of created stack (1 - swarm, 2 - compose) - enum: - - 1 - - 2 - example: 1 - type: integer - required: - - description - - fileContent - - platform - - title - - type - type: object - customtemplates.fileResponse: - properties: - fileContent: - type: string - type: object - dockerhub.dockerhubUpdatePayload: - properties: - authentication: - description: Enable authentication against DockerHub - example: false - type: boolean - password: - description: Password used to authenticate against the DockerHub - example: hub_password - type: string - username: - description: Username used to authenticate against the DockerHub - example: hub_user - type: string - required: - - authentication - - password - - username - type: object - edgegroups.edgeGroupCreatePayload: - properties: - dynamic: - type: boolean - endpoints: - items: - type: integer - type: array - name: - type: string - partialMatch: - type: boolean - tagIDs: - items: - description: Tag identifier - example: 1 - type: integer - type: array - type: object - edgegroups.edgeGroupUpdatePayload: - properties: - dynamic: - type: boolean - endpoints: - items: - type: integer - type: array - name: - type: string - partialMatch: - type: boolean - tagIDs: - items: - description: Tag identifier - example: 1 - type: integer - type: array - type: object - edgejobs.edgeJobCreateFromFileContentPayload: - properties: - cronExpression: - type: string - endpoints: - items: - type: integer - type: array - fileContent: - type: string - name: - type: string - recurring: - type: boolean - type: object - edgejobs.edgeJobCreateFromFilePayload: - properties: - cronExpression: - type: string - endpoints: - items: - type: integer - type: array - file: - items: - type: integer - type: array - name: - type: string - recurring: - type: boolean - type: object - edgejobs.edgeJobFileResponse: - properties: - FileContent: - type: string - type: object - edgejobs.edgeJobUpdatePayload: - properties: - cronExpression: - type: string - endpoints: - items: - type: integer - type: array - fileContent: - type: string - name: - type: string - recurring: - type: boolean - type: object - edgejobs.fileResponse: - properties: - FileContent: - type: string - type: object - edgejobs.taskContainer: - properties: - EndpointId: - type: integer - Id: - type: string - LogsStatus: - type: integer - type: object - edgestacks.stackFileResponse: - properties: - StackFileContent: - type: string - type: object - edgestacks.swarmStackFromFileContentPayload: - properties: - edgeGroups: - items: - description: EdgeGroup Identifier - example: 1 - type: integer - type: array - name: - type: string - stackFileContent: - type: string - type: object - edgestacks.swarmStackFromFileUploadPayload: - properties: - edgeGroups: - items: - description: EdgeGroup Identifier - example: 1 - type: integer - type: array - name: - type: string - stackFileContent: - items: - type: integer - type: array - type: object - edgestacks.swarmStackFromGitRepositoryPayload: - properties: - composeFilePathInRepository: - type: string - edgeGroups: - items: - description: EdgeGroup Identifier - example: 1 - type: integer - type: array - name: - type: string - repositoryAuthentication: - type: boolean - repositoryPassword: - type: string - repositoryReferenceName: - type: string - repositoryURL: - type: string - repositoryUsername: - type: string - type: object - edgestacks.updateEdgeStackPayload: - properties: - edgeGroups: - items: - description: EdgeGroup Identifier - example: 1 - type: integer - type: array - prune: - type: boolean - stackFileContent: - type: string - version: - type: integer - type: object - endpointedge.configResponse: - properties: - name: - type: string - prune: - type: boolean - stackFileContent: - type: string - type: object - endpointgroups.endpointGroupCreatePayload: - properties: - associatedEndpoints: - description: List of endpoint identifiers that will be part of this group - example: - - 1 - - 3 - items: - type: integer - type: array - description: - description: Endpoint group description - example: description - type: string - name: - description: Endpoint group name - example: my-endpoint-group - type: string - tagIDs: - description: List of tag identifiers to which this endpoint group is associated - example: - - 1 - - 2 - items: - description: Tag identifier - example: 1 - type: integer - type: array - required: - - name - type: object - endpointgroups.endpointGroupUpdatePayload: - properties: - description: - description: Endpoint group description - example: description - type: string - name: - description: Endpoint group name - example: my-endpoint-group - type: string - tagIDs: - description: List of tag identifiers associated to the endpoint group - example: - - 3 - - 4 - items: - description: Tag identifier - example: 1 - type: integer - type: array - teamAccessPolicies: - $ref: '#/definitions/portainer.TeamAccessPolicies' - userAccessPolicies: - $ref: '#/definitions/portainer.UserAccessPolicies' - type: object - endpoints.edgeJobResponse: - properties: - CollectLogs: - description: Whether to collect logs - example: true - type: boolean - CronExpression: - description: A cron expression to schedule this job - example: '* * * * *' - type: string - Id: - description: EdgeJob Identifier - example: 2 - type: integer - Script: - description: Script to run - example: echo hello - type: string - Version: - description: Version of this EdgeJob - example: 2 - type: integer - type: object - endpoints.endpointEdgeStatusInspectResponse: - properties: - checkin: - description: The current value of CheckinInterval - example: 5 - type: integer - credentials: - type: string - port: - description: The tunnel port - example: 8732 - type: integer - schedules: - description: List of requests for jobs to run on the endpoint - items: - $ref: '#/definitions/endpoints.edgeJobResponse' - type: array - stacks: - description: List of stacks to be deployed on the endpoints - items: - $ref: '#/definitions/endpoints.stackStatusResponse' - type: array - status: - description: Status represents the endpoint status - example: REQUIRED - type: string - type: object - endpoints.endpointUpdatePayload: - properties: - azureApplicationID: - description: Azure application ID - example: eag7cdo9-o09l-9i83-9dO9-f0b23oe78db4 - type: string - azureAuthenticationKey: - description: Azure authentication key - example: cOrXoK/1D35w8YQ8nH1/8ZGwzz45JIYD5jxHKXEQknk= - type: string - azureTenantID: - description: Azure tenant ID - example: 34ddc78d-4fel-2358-8cc1-df84c8o839f5 - type: string - edgeCheckinInterval: - description: The check in interval for edge agent (in seconds) - example: 5 - type: integer - groupID: - description: Group identifier - example: 1 - type: integer - kubernetes: - $ref: '#/definitions/portainer.KubernetesData' - description: Associated Kubernetes data - name: - description: Name that will be used to identify this endpoint - example: my-endpoint - type: string - publicURL: - description: |- - URL or IP address where exposed containers will be reachable.\ - Defaults to URL if not specified - example: docker.mydomain.tld:2375 - type: string - status: - description: The status of the endpoint (1 - up, 2 - down) - example: 1 - type: integer - tagIDs: - description: List of tag identifiers to which this endpoint is associated - example: - - 1 - - 2 - items: - description: Tag identifier - example: 1 - type: integer - type: array - teamAccessPolicies: - $ref: '#/definitions/portainer.TeamAccessPolicies' - tls: - description: Require TLS to connect against this endpoint - example: true - type: boolean - tlsskipClientVerify: - description: Skip client verification when using TLS - example: false - type: boolean - tlsskipVerify: - description: Skip server verification when using TLS - example: false - type: boolean - url: - description: URL or IP address of a Docker host - example: docker.mydomain.tld:2375 - type: string - userAccessPolicies: - $ref: '#/definitions/portainer.UserAccessPolicies' - type: object - endpoints.stackStatusResponse: - properties: - id: - description: EdgeStack Identifier - example: 1 - type: integer - version: - description: Version of this stack - example: 3 - type: integer - type: object - motd.motdResponse: - properties: - ContentLayout: - additionalProperties: - type: string - type: object - Hash: - items: - type: integer - type: array - Message: - type: string - Style: - type: string - Title: - type: string - type: object - portainer.AccessPolicy: - properties: - RoleId: - description: Role identifier. Reference the role that will be associated to - this access policy - example: 1 - type: integer - type: object - portainer.Authorizations: - additionalProperties: - type: boolean - type: object - portainer.AzureCredentials: - properties: - ApplicationID: - description: Azure application ID - example: eag7cdo9-o09l-9i83-9dO9-f0b23oe78db4 - type: string - AuthenticationKey: - description: Azure authentication key - example: cOrXoK/1D35w8YQ8nH1/8ZGwzz45JIYD5jxHKXEQknk= - type: string - TenantID: - description: Azure tenant ID - example: 34ddc78d-4fel-2358-8cc1-df84c8o839f5 - type: string - type: object - portainer.CustomTemplate: - properties: - CreatedByUserId: - description: User identifier who created this template - example: 3 - type: integer - Description: - description: Description of the template - example: High performance web server - type: string - EntryPoint: - description: Path to the Stack file - example: docker-compose.yml - type: string - Id: - description: CustomTemplate Identifier - example: 1 - type: integer - Logo: - description: URL of the template's logo - example: https://cloudinovasi.id/assets/img/logos/nginx.png - type: string - Note: - description: A note that will be displayed in the UI. Supports HTML content - example: This is my custom template - type: string - Platform: - description: |- - Platform associated to the template. - Valid values are: 1 - 'linux', 2 - 'windows' - enum: - - 1 - - 2 - example: 1 - type: integer - ProjectPath: - description: Path on disk to the repository hosting the Stack file - example: /data/custom_template/3 - type: string - ResourceControl: - $ref: '#/definitions/portainer.ResourceControl' - Title: - description: Title of the template - example: Nginx - type: string - Type: - description: Type of created stack (1 - swarm, 2 - compose) - example: 1 - type: integer - type: object - portainer.DockerHub: - properties: - Authentication: - description: Is authentication against DockerHub enabled - example: true - type: boolean - Password: - description: Password used to authenticate against the DockerHub - example: passwd - type: string - Username: - description: Username used to authenticate against the DockerHub - example: user - type: string - type: object - portainer.DockerSnapshot: - properties: - DockerSnapshotRaw: - $ref: '#/definitions/portainer.DockerSnapshotRaw' - DockerVersion: - type: string - GpuUseAll: - type: boolean - GpuUseList: - items: - type: string - type: array - HealthyContainerCount: - type: integer - ImageCount: - type: integer - RunningContainerCount: - type: integer - ServiceCount: - type: integer - StackCount: - type: integer - StoppedContainerCount: - type: integer - Swarm: - type: boolean - Time: - type: integer - TotalCPU: - type: integer - TotalMemory: - type: integer - UnhealthyContainerCount: - type: integer - VolumeCount: - type: integer - type: object - portainer.DockerSnapshotRaw: - properties: - Containers: - type: object - Images: - type: object - Info: - type: object - Networks: - type: object - Version: - type: object - Volumes: - type: object - type: object - portainer.EdgeGroup: - properties: - Dynamic: - type: boolean - Endpoints: - items: - type: integer - type: array - Id: - description: EdgeGroup Identifier - example: 1 - type: integer - Name: - type: string - PartialMatch: - type: boolean - TagIds: - items: - description: Tag identifier - example: 1 - type: integer - type: array - type: object - portainer.EdgeJob: - properties: - Created: - type: integer - CronExpression: - type: string - Endpoints: - additionalProperties: - $ref: '#/definitions/portainer.EdgeJobEndpointMeta' - type: object - Id: - description: EdgeJob Identifier - example: 1 - type: integer - Name: - type: string - Recurring: - type: boolean - ScriptPath: - type: string - Version: - type: integer - type: object - portainer.EdgeJobEndpointMeta: - properties: - collectLogs: - type: boolean - logsStatus: - type: integer - type: object - portainer.EdgeStack: - properties: - CreationDate: - type: integer - EdgeGroups: - items: - description: EdgeGroup Identifier - example: 1 - type: integer - type: array - EntryPoint: - type: string - Id: - description: EdgeStack Identifier - example: 1 - type: integer - Name: - type: string - ProjectPath: - type: string - Prune: - type: boolean - Status: - additionalProperties: - $ref: '#/definitions/portainer.EdgeStackStatus' - type: object - Version: - type: integer - type: object - portainer.EdgeStackStatus: - properties: - EndpointID: - type: integer - Error: - type: string - Type: - type: integer - type: object - portainer.Endpoint: - properties: - AuthorizedTeams: - items: - type: integer - type: array - AuthorizedUsers: - description: Deprecated in DBVersion == 18 - items: - description: User identifier who created this template - example: 3 - type: integer - type: array - AzureCredentials: - $ref: '#/definitions/portainer.AzureCredentials' - EdgeCheckinInterval: - description: The check in interval for edge agent (in seconds) - example: 5 - type: integer - EdgeID: - description: The identifier of the edge agent associated with this endpoint - type: string - EdgeKey: - description: The key which is used to map the agent to Portainer - type: string - Gpus: - description: Endpoint Gpus information - items: - $ref: '#/definitions/portainer.Pair' - type: array - GroupId: - description: Endpoint group identifier - example: 1 - type: integer - Id: - description: Endpoint Identifier - example: 1 - type: integer - Kubernetes: - $ref: '#/definitions/portainer.KubernetesData' - description: Associated Kubernetes data - Name: - description: Endpoint name - example: my-endpoint - type: string - PublicURL: - description: URL or IP address where exposed containers will be reachable - example: docker.mydomain.tld:2375 - type: string - Snapshots: - description: List of snapshots - items: - $ref: '#/definitions/portainer.DockerSnapshot' - type: array - Status: - description: The status of the endpoint (1 - up, 2 - down) - example: 1 - type: integer - TLS: - description: |- - Deprecated fields - Deprecated in DBVersion == 4 - type: boolean - TLSCACert: - type: string - TLSCert: - type: string - TLSConfig: - $ref: '#/definitions/portainer.TLSConfiguration' - TLSKey: - type: string - TagIds: - description: List of tag identifiers to which this endpoint is associated - items: - description: Tag identifier - example: 1 - type: integer - type: array - Tags: - description: Deprecated in DBVersion == 22 - items: - type: string - type: array - TeamAccessPolicies: - $ref: '#/definitions/portainer.TeamAccessPolicies' - description: List of team identifiers authorized to connect to this endpoint - Type: - description: Endpoint environment type. 1 for a Docker environment, 2 for - an agent on Docker environment or 3 for an Azure environment. - example: 1 - type: integer - URL: - description: URL or IP address of the Docker host associated to this endpoint - example: docker.mydomain.tld:2375 - type: string - UserAccessPolicies: - $ref: '#/definitions/portainer.UserAccessPolicies' - description: List of user identifiers authorized to connect to this endpoint - type: object - portainer.EndpointAuthorizations: - additionalProperties: - $ref: '#/definitions/portainer.Authorizations' - type: object - portainer.EndpointGroup: - properties: - AuthorizedTeams: - items: - type: integer - type: array - AuthorizedUsers: - description: Deprecated in DBVersion == 18 - items: - description: User identifier who created this template - example: 3 - type: integer - type: array - Description: - description: Description associated to the endpoint group - example: Endpoint group description - type: string - Id: - description: Endpoint group Identifier - example: 1 - type: integer - Labels: - description: Deprecated fields - items: - $ref: '#/definitions/portainer.Pair' - type: array - Name: - description: Endpoint group name - example: my-endpoint-group - type: string - TagIds: - description: List of tags associated to this endpoint group - items: - description: Tag identifier - example: 1 - type: integer - type: array - Tags: - description: Deprecated in DBVersion == 22 - items: - type: string - type: array - TeamAccessPolicies: - $ref: '#/definitions/portainer.TeamAccessPolicies' - UserAccessPolicies: - $ref: '#/definitions/portainer.UserAccessPolicies' - type: object - portainer.GitlabRegistryData: - properties: - InstanceURL: - type: string - ProjectId: - type: integer - ProjectPath: - type: string - type: object - portainer.KubernetesConfiguration: - properties: - IngressClasses: - items: - $ref: '#/definitions/portainer.KubernetesIngressClassConfig' - type: array - StorageClasses: - items: - $ref: '#/definitions/portainer.KubernetesStorageClassConfig' - type: array - UseLoadBalancer: - type: boolean - UseServerMetrics: - type: boolean - RestrictDefaultNamespace: - type: boolean - type: object - portainer.KubernetesData: - properties: - Configuration: - $ref: '#/definitions/portainer.KubernetesConfiguration' - Snapshots: - items: - $ref: '#/definitions/portainer.KubernetesSnapshot' - type: array - type: object - portainer.KubernetesIngressClassConfig: - properties: - Name: - type: string - Type: - type: string - type: object - portainer.KubernetesSnapshot: - properties: - KubernetesVersion: - type: string - NodeCount: - type: integer - Time: - type: integer - TotalCPU: - type: integer - TotalMemory: - type: integer - type: object - portainer.KubernetesStorageClassConfig: - properties: - AccessModes: - items: - type: string - type: array - AllowVolumeExpansion: - type: boolean - Name: - type: string - Provisioner: - type: string - type: object - portainer.InternalAuthSettings: - properties: - RequiredPasswordLength: - description: The minimum character length a user can set their password - example: 12 - type: integer - portainer.LDAPGroupSearchSettings: - properties: - GroupAttribute: - description: LDAP attribute which denotes the group membership - example: member - type: string - GroupBaseDN: - description: The distinguished name of the element from which the LDAP server - will search for groups - example: dc=ldap,dc=domain,dc=tld - type: string - GroupFilter: - description: The LDAP search filter used to select group elements, optional - example: (objectClass=account - type: string - type: object - portainer.LDAPSearchSettings: - properties: - BaseDN: - description: The distinguished name of the element from which the LDAP server - will search for users - example: dc=ldap,dc=domain,dc=tld - type: string - Filter: - description: Optional LDAP search filter used to select user elements - example: (objectClass=account) - type: string - UserNameAttribute: - description: LDAP attribute which denotes the username - example: uid - type: string - type: object - portainer.LDAPSettings: - properties: - AnonymousMode: - description: Enable this option if the server is configured for Anonymous - access. When enabled, ReaderDN and Password will not be used - example: true - type: boolean - AutoCreateUsers: - description: Automatically provision users and assign them to matching LDAP - group names - example: true - type: boolean - GroupSearchSettings: - items: - $ref: '#/definitions/portainer.LDAPGroupSearchSettings' - type: array - Password: - description: Password of the account that will be used to search users - example: readonly-password - type: string - ReaderDN: - description: Account that will be used to search for users - example: cn=readonly-account,dc=ldap,dc=domain,dc=tld - type: string - SearchSettings: - items: - $ref: '#/definitions/portainer.LDAPSearchSettings' - type: array - StartTLS: - description: Whether LDAP connection should use StartTLS - example: true - type: boolean - TLSConfig: - $ref: '#/definitions/portainer.TLSConfiguration' - URL: - description: URL or IP address of the LDAP server - example: myldap.domain.tld:389 - type: string - type: object - portainer.OAuthSettings: - properties: - AccessTokenURI: - type: string - AuthorizationURI: - type: string - ClientID: - type: string - ClientSecret: - type: string - DefaultTeamID: - type: integer - OAuthAutoCreateUsers: - type: boolean - RedirectURI: - type: string - ResourceURI: - type: string - Scopes: - type: string - UserIdentifier: - type: string - type: object - portainer.Pair: - properties: - name: - example: name - type: string - value: - example: value - type: string - type: object - portainer.Registry: - properties: - Authentication: - description: Is authentication against this registry enabled - example: true - type: boolean - AuthorizedTeams: - items: - type: integer - type: array - AuthorizedUsers: - description: |- - Deprecated fields - Deprecated in DBVersion == 18 - items: - description: User identifier who created this template - example: 3 - type: integer - type: array - Gitlab: - $ref: '#/definitions/portainer.GitlabRegistryData' - Id: - description: Registry Identifier - example: 1 - type: integer - ManagementConfiguration: - $ref: '#/definitions/portainer.RegistryManagementConfiguration' - Name: - description: Registry Name - example: my-registry - type: string - Password: - description: Password used to authenticate against this registry - example: registry_password - type: string - TeamAccessPolicies: - $ref: '#/definitions/portainer.TeamAccessPolicies' - Type: - description: Registry Type (1 - Quay, 2 - Azure, 3 - Custom, 4 - Gitlab, 5 - ProGet) - enum: - - 1 - - 2 - - 3 - - 4 - - 5 - type: integer - URL: - description: URL or IP address of the Docker registry - example: registry.mydomain.tld:2375 - type: string - BaseURL: - description: Base URL or IP address of the ProGet registry - example: registry.mydomain.tld:2375 - type: string - UserAccessPolicies: - $ref: '#/definitions/portainer.UserAccessPolicies' - Username: - description: Username used to authenticate against this registry - example: registry user - type: string - type: object - portainer.RegistryManagementConfiguration: - properties: - Authentication: - type: boolean - Password: - type: string - TLSConfig: - $ref: '#/definitions/portainer.TLSConfiguration' - Type: - type: integer - Username: - type: string - type: object - portainer.ResourceControl: - properties: - AccessLevel: - type: integer - AdministratorsOnly: - description: Permit access to resource only to admins - example: true - type: boolean - Id: - description: ResourceControl Identifier - example: 1 - type: integer - OwnerId: - description: |- - Deprecated fields - Deprecated in DBVersion == 2 - type: integer - Public: - description: Permit access to the associated resource to any user - example: true - type: boolean - ResourceId: - description: |- - Docker resource identifier on which access control will be applied.\ - In the case of a resource control applied to a stack, use the stack name as identifier - example: 617c5f22bb9b023d6daab7cba43a57576f83492867bc767d1c59416b065e5f08 - type: string - SubResourceIds: - description: List of Docker resources that will inherit this access control - example: - - 617c5f22bb9b023d6daab7cba43a57576f83492867bc767d1c59416b065e5f08 - items: - type: string - type: array - System: - type: boolean - TeamAccesses: - items: - $ref: '#/definitions/portainer.TeamResourceAccess' - type: array - Type: - description: |- - Type of Docker resource. Valid values are: 1- container, 2 -service - 3 - volume, 4 - secret, 5 - stack, 6 - config or 7 - custom template - example: 1 - type: integer - UserAccesses: - items: - $ref: '#/definitions/portainer.UserResourceAccess' - type: array - type: object - portainer.Role: - properties: - Authorizations: - $ref: '#/definitions/portainer.Authorizations' - description: Authorizations associated to a role - Description: - description: Role description - example: Read-only access of all resources in an endpoint - type: string - Id: - description: Role Identifier - example: 1 - type: integer - Name: - description: Role name - example: HelpDesk - type: string - Priority: - type: integer - type: object - portainer.Settings: - properties: - AllowBindMountsForRegularUsers: - description: Whether non-administrator should be able to use bind mounts when - creating containers - example: false - type: boolean - AllowContainerCapabilitiesForRegularUsers: - description: Whether non-administrator should be able to use container capabilities - type: boolean - AllowDeviceMappingForRegularUsers: - description: Whether non-administrator should be able to use device mapping - type: boolean - AllowHostNamespaceForRegularUsers: - description: Whether non-administrator should be able to use the host pid - type: boolean - AllowPrivilegedModeForRegularUsers: - description: Whether non-administrator should be able to use privileged mode - when creating containers - example: false - type: boolean - AllowStackManagementForRegularUsers: - description: Whether non-administrator should be able to manage stacks - type: boolean - AllowVolumeBrowserForRegularUsers: - description: Whether non-administrator should be able to browse volumes - type: boolean - AuthenticationMethod: - description: 'Active authentication method for the Portainer instance. Valid - values are: 1 for internal, 2 for LDAP, or 3 for oauth' - example: 1 - type: integer - BlackListedLabels: - description: A list of label name & value that will be used to hide containers - when querying containers - items: - $ref: '#/definitions/portainer.Pair' - type: array - EdgeAgentCheckinInterval: - description: The default check in interval for edge agent (in seconds) - example: 5 - type: integer - EnableEdgeComputeFeatures: - description: Whether edge compute features are enabled - type: boolean - EnableHostManagementFeatures: - description: Whether host management features are enabled - type: boolean - EnableTelemetry: - description: Whether telemetry is enabled - example: false - type: boolean - InternalAuthSettings: - $ref: '#/definitions/portainer.InternalAuthSettings' - LDAPSettings: - $ref: '#/definitions/portainer.LDAPSettings' - LogoURL: - description: URL to a logo that will be displayed on the login page as well - as on top of the sidebar. Will use default Portainer logo when value is - empty string - example: https://mycompany.mydomain.tld/logo.png - type: string - OAuthSettings: - $ref: '#/definitions/portainer.OAuthSettings' - SnapshotInterval: - description: The interval in which endpoint snapshots are created - example: 5m - type: string - TemplatesURL: - description: URL to the templates that will be displayed in the UI when navigating - to App Templates - example: https://raw.githubusercontent.com/portainer/templates/master/templates.json - type: string - UserSessionTimeout: - description: The duration of a user session - example: 5m - type: string - displayDonationHeader: - description: Deprecated fields - type: boolean - displayExternalContributors: - type: boolean - type: object - portainer.Stack: - properties: - EndpointId: - description: Endpoint identifier. Reference the endpoint that will be used - for deployment - example: 1 - type: integer - EntryPoint: - description: Path to the Stack file - example: docker-compose.yml - type: string - Env: - description: A list of environment variables used during stack deployment - items: - $ref: '#/definitions/portainer.Pair' - type: array - Id: - description: Stack Identifier - example: 1 - type: integer - Name: - description: Stack name - example: myStack - type: string - ResourceControl: - $ref: '#/definitions/portainer.ResourceControl' - Status: - description: Stack status (1 - active, 2 - inactive) - example: 1 - type: integer - SwarmId: - description: Cluster identifier of the Swarm cluster where the stack is deployed - example: jpofkc0i9uo9wtx1zesuk649w - type: string - Type: - description: Stack type. 1 for a Swarm stack, 2 for a Compose stack - example: 2 - type: integer - createdBy: - description: The username which created this stack - example: admin - type: string - creationDate: - description: The date in unix time when stack was created - example: 1587399600 - type: integer - projectPath: - description: Path on disk to the repository hosting the Stack file - example: /data/compose/myStack_jpofkc0i9uo9wtx1zesuk649w - type: string - updateDate: - description: The date in unix time when stack was last updated - example: 1587399600 - type: integer - updatedBy: - description: The username which last updated this stack - example: bob - type: string - type: object - portainer.Status: - properties: - Version: - description: Portainer API version - example: 2.0.0 - type: string - type: object - portainer.TLSConfiguration: - properties: - TLS: - description: Use TLS - example: true - type: boolean - TLSCACert: - description: Path to the TLS CA certificate file - example: /data/tls/ca.pem - type: string - TLSCert: - description: Path to the TLS client certificate file - example: /data/tls/cert.pem - type: string - TLSKey: - description: Path to the TLS client key file - example: /data/tls/key.pem - type: string - TLSSkipVerify: - description: Skip the verification of the server TLS certificate - example: false - type: boolean - type: object - portainer.Tag: - properties: - EndpointGroups: - additionalProperties: - type: boolean - description: A set of endpoint group ids that have this tag - type: object - Endpoints: - additionalProperties: - type: boolean - description: A set of endpoint ids that have this tag - type: object - Name: - description: Tag name - example: org/acme - type: string - id: - description: Tag identifier - example: 1 - type: integer - type: object - portainer.Team: - properties: - Id: - description: Team Identifier - example: 1 - type: integer - Name: - description: Team name - example: developers - type: string - type: object - portainer.TeamAccessPolicies: - additionalProperties: - $ref: '#/definitions/portainer.AccessPolicy' - type: object - portainer.TeamMembership: - properties: - Id: - description: Membership Identifier - example: 1 - type: integer - Role: - description: Team role (1 for team leader and 2 for team member) - example: 1 - type: integer - TeamID: - description: Team identifier - example: 1 - type: integer - UserID: - description: User identifier - example: 1 - type: integer - type: object - portainer.TeamResourceAccess: - properties: - AccessLevel: - type: integer - TeamId: - type: integer - type: object - portainer.Template: - properties: - Id: - description: |- - Mandatory container/stack fields - Template Identifier - example: 1 - type: integer - administrator_only: - description: Whether the template should be available to administrators only - example: true - type: boolean - categories: - description: A list of categories associated to the template - example: - - database - items: - type: string - type: array - command: - description: The command that will be executed in a container template - example: ls -lah - type: string - description: - description: Description of the template - example: High performance web server - type: string - env: - description: A list of environment variables used during the template deployment - items: - $ref: '#/definitions/portainer.TemplateEnv' - type: array - hostname: - description: Container hostname - example: mycontainer - type: string - image: - description: |- - Mandatory container fields - Image associated to a container template. Mandatory for a container template - example: nginx:latest - type: string - interactive: - description: |- - Whether the container should be started in - interactive mode (-i -t equivalent on the CLI) - example: true - type: boolean - labels: - description: Container labels - items: - $ref: '#/definitions/portainer.Pair' - type: array - logo: - description: URL of the template's logo - example: https://cloudinovasi.id/assets/img/logos/nginx.png - type: string - name: - description: |- - Optional stack/container fields - Default name for the stack/container to be used on deployment - example: mystackname - type: string - network: - description: Name of a network that will be used on container deployment if - it exists inside the environment - example: mynet - type: string - note: - description: A note that will be displayed in the UI. Supports HTML content - example: This is my custom template - type: string - platform: - description: |- - Platform associated to the template. - Valid values are: 'linux', 'windows' or leave empty for multi-platform - example: linux - type: string - ports: - description: A list of ports exposed by the container - example: - - 8080:80/tcp - items: - type: string - type: array - privileged: - description: Whether the container should be started in privileged mode - example: true - type: boolean - registry: - description: |- - Optional container fields - The URL of a registry associated to the image for a container template - example: quay.io - type: string - repository: - $ref: '#/definitions/portainer.TemplateRepository' - description: Mandatory stack fields - restart_policy: - description: Container restart policy - example: on-failure - type: string - stackFile: - description: |- - Mandatory Edge stack fields - Stack file used for this template - type: string - title: - description: Title of the template - example: Nginx - type: string - type: - description: 'Template type. Valid values are: 1 (container), 2 (Swarm stack) - or 3 (Compose stack)' - example: 1 - type: integer - volumes: - description: A list of volumes used during the container template deployment - items: - $ref: '#/definitions/portainer.TemplateVolume' - type: array - type: object - portainer.TemplateEnv: - properties: - default: - description: Default value that will be set for the variable - example: default_value - type: string - description: - description: Content of the tooltip that will be generated in the UI - example: MySQL root account password - type: string - label: - description: Text for the label that will be generated in the UI - example: Root password - type: string - name: - description: name of the environment variable - example: MYSQL_ROOT_PASSWORD - type: string - preset: - description: If set to true, will not generate any input for this variable - in the UI - example: false - type: boolean - select: - description: A list of name/value that will be used to generate a dropdown - in the UI - items: - $ref: '#/definitions/portainer.TemplateEnvSelect' - type: array - type: object - portainer.TemplateEnvSelect: - properties: - default: - description: Will set this choice as the default choice - example: false - type: boolean - text: - description: Some text that will displayed as a choice - example: text value - type: string - value: - description: A value that will be associated to the choice - example: value - type: string - type: object - portainer.TemplateRepository: - properties: - stackfile: - description: Path to the stack file inside the git repository - example: ./subfolder/docker-compose.yml - type: string - url: - description: URL of a git repository used to deploy a stack template. Mandatory - for a Swarm/Compose stack template - example: https://github.com/portainer/portainer-compose - type: string - type: object - portainer.TemplateVolume: - properties: - bind: - description: Path on the host - example: /tmp - type: string - container: - description: Path inside the container - example: /data - type: string - readonly: - description: Whether the volume used should be readonly - example: true - type: boolean - type: object - portainer.User: - properties: - EndpointAuthorizations: - $ref: '#/definitions/portainer.EndpointAuthorizations' - Id: - description: User Identifier - example: 1 - type: integer - Password: - example: passwd - type: string - PortainerAuthorizations: - $ref: '#/definitions/portainer.Authorizations' - description: |- - Deprecated fields - Deprecated in DBVersion == 25 - Role: - description: User role (1 for administrator account and 2 for regular account) - example: 1 - type: integer - Username: - example: bob - type: string - type: object - portainer.UserAccessPolicies: - additionalProperties: - $ref: '#/definitions/portainer.AccessPolicy' - type: object - portainer.UserResourceAccess: - properties: - AccessLevel: - type: integer - UserId: - type: integer - type: object - portainer.Webhook: - properties: - EndpointId: - type: integer - Id: - description: Webhook Identifier - example: 1 - type: integer - ResourceId: - type: string - Token: - type: string - Type: - type: integer - type: object - registries.registryConfigurePayload: - properties: - authentication: - description: Is authentication against this registry enabled - example: false - type: boolean - password: - description: Password used to authenticate against this registry. required - when Authentication is true - example: registry_password - type: string - tls: - description: Use TLS - example: true - type: boolean - tlscacertFile: - description: The TLS CA certificate file - items: - type: integer - type: array - tlscertFile: - description: The TLS client certificate file - items: - type: integer - type: array - tlskeyFile: - description: The TLS client key file - items: - type: integer - type: array - tlsskipVerify: - description: Skip the verification of the server TLS certificate - example: false - type: boolean - username: - description: Username used to authenticate against this registry. Required - when Authentication is true - example: registry_user - type: string - required: - - authentication - type: object - registries.registryCreatePayload: - properties: - authentication: - description: Is authentication against this registry enabled - example: false - type: boolean - gitlab: - $ref: '#/definitions/portainer.GitlabRegistryData' - description: Gitlab specific details, required when type = 4 - name: - description: Name that will be used to identify this registry - example: my-registry - type: string - password: - description: Password used to authenticate against this registry. required - when Authentication is true - example: registry_password - type: string - type: - description: 'Registry Type. Valid values are: 1 (Quay.io), 2 (Azure container - registry), 3 (custom registry), 4 (Gitlab registry) or 5 (ProGet registry)' - enum: - - 1 - - 2 - - 3 - - 4 - - 5 - example: 1 - type: integer - url: - description: URL or IP address of the Docker registry - example: registry.mydomain.tld:2375 - type: string - baseUrl: - description: Base URL or IP address of the ProGet registry - example: registry.mydomain.tld:2375 - type: string - username: - description: Username used to authenticate against this registry. Required - when Authentication is true - example: registry_user - type: string - required: - - authentication - - name - - type - - url - type: object - registries.registryUpdatePayload: - properties: - authentication: - description: Is authentication against this registry enabled - example: false - type: boolean - name: - description: Name that will be used to identify this registry - example: my-registry - type: string - password: - description: Password used to authenticate against this registry. required - when Authentication is true - example: registry_password - type: string - teamAccessPolicies: - $ref: '#/definitions/portainer.TeamAccessPolicies' - url: - description: URL or IP address of the Docker registry - example: registry.mydomain.tld:2375 - type: string - baseUrl: - description: Base URL or IP address of the ProGet registry - example: registry.mydomain.tld:2375 - type: string - userAccessPolicies: - $ref: '#/definitions/portainer.UserAccessPolicies' - username: - description: Username used to authenticate against this registry. Required - when Authentication is true - example: registry_user - type: string - required: - - authentication - - name - - url - type: object - resourcecontrols.resourceControlCreatePayload: - properties: - administratorsOnly: - description: Permit access to resource only to admins - example: true - type: boolean - public: - description: Permit access to the associated resource to any user - example: true - type: boolean - resourceID: - example: 617c5f22bb9b023d6daab7cba43a57576f83492867bc767d1c59416b065e5f08 - type: string - subResourceIDs: - description: List of Docker resources that will inherit this access control - example: - - 617c5f22bb9b023d6daab7cba43a57576f83492867bc767d1c59416b065e5f08 - items: - type: string - type: array - teams: - description: List of team identifiers with access to the associated resource - example: - - 56 - - 7 - items: - type: integer - type: array - type: - description: |- - Type of Docker resource. Valid values are: container, volume\ - service, secret, config or stack - example: container - type: string - users: - description: List of user identifiers with access to the associated resource - example: - - 1 - - 4 - items: - type: integer - type: array - required: - - resourceID - - type - type: object - resourcecontrols.resourceControlUpdatePayload: - properties: - administratorsOnly: - description: Permit access to resource only to admins - example: true - type: boolean - public: - description: Permit access to the associated resource to any user - example: true - type: boolean - teams: - description: List of team identifiers with access to the associated resource - example: - - 7 - items: - type: integer - type: array - users: - description: List of user identifiers with access to the associated resource - example: - - 4 - items: - type: integer - type: array - type: object - settings.publicSettingsResponse: - properties: - AllowBindMountsForRegularUsers: - description: Whether non-administrator should be able to use bind mounts when - creating containers - example: true - type: boolean - AllowContainerCapabilitiesForRegularUsers: - description: Whether non-administrator should be able to use container capabilities - example: true - type: boolean - AllowDeviceMappingForRegularUsers: - description: Whether non-administrator should be able to use device mapping - example: true - type: boolean - AllowHostNamespaceForRegularUsers: - description: Whether non-administrator should be able to use the host pid - example: true - type: boolean - AllowPrivilegedModeForRegularUsers: - description: Whether non-administrator should be able to use privileged mode - when creating containers - example: true - type: boolean - AllowStackManagementForRegularUsers: - description: Whether non-administrator should be able to manage stacks - example: true - type: boolean - AllowVolumeBrowserForRegularUsers: - description: Whether non-administrator should be able to browse volumes - example: true - type: boolean - AuthenticationMethod: - description: 'Active authentication method for the Portainer instance. Valid - values are: 1 for internal, 2 for LDAP, or 3 for oauth' - example: 1 - type: integer - EnableEdgeComputeFeatures: - description: Whether edge compute features are enabled - example: true - type: boolean - EnableHostManagementFeatures: - description: Whether host management features are enabled - example: true - type: boolean - EnableTelemetry: - description: Whether telemetry is enabled - example: true - type: boolean - LogoURL: - description: URL to a logo that will be displayed on the login page as well - as on top of the sidebar. Will use default Portainer logo when value is - empty string - example: https://mycompany.mydomain.tld/logo.png - type: string - OAuthLoginURI: - description: The URL used for oauth login - example: https://gitlab.com/oauth - type: string - type: object - settings.settingsLDAPCheckPayload: - properties: - ldapsettings: - $ref: '#/definitions/portainer.LDAPSettings' - type: object - settings.settingsUpdatePayload: - properties: - allowBindMountsForRegularUsers: - description: Whether non-administrator should be able to use bind mounts when - creating containers - example: false - type: boolean - allowContainerCapabilitiesForRegularUsers: - description: Whether non-administrator should be able to use container capabilities - example: true - type: boolean - allowDeviceMappingForRegularUsers: - description: Whether non-administrator should be able to use device mapping - example: true - type: boolean - allowHostNamespaceForRegularUsers: - description: Whether non-administrator should be able to use the host pid - example: true - type: boolean - allowPrivilegedModeForRegularUsers: - description: Whether non-administrator should be able to use privileged mode - when creating containers - example: false - type: boolean - allowStackManagementForRegularUsers: - description: Whether non-administrator should be able to manage stacks - example: true - type: boolean - allowVolumeBrowserForRegularUsers: - description: Whether non-administrator should be able to browse volumes - example: true - type: boolean - authenticationMethod: - description: 'Active authentication method for the Portainer instance. Valid - values are: 1 for internal, 2 for LDAP, or 3 for oauth' - example: 1 - type: integer - blackListedLabels: - description: A list of label name & value that will be used to hide containers - when querying containers - items: - $ref: '#/definitions/portainer.Pair' - type: array - edgeAgentCheckinInterval: - description: The default check in interval for edge agent (in seconds) - example: 5 - type: integer - enableEdgeComputeFeatures: - description: Whether edge compute features are enabled - example: true - type: boolean - enableHostManagementFeatures: - description: Whether host management features are enabled - example: true - type: boolean - enableTelemetry: - description: Whether telemetry is enabled - example: false - type: boolean - internalAuthSettings: - $ref: '#/definitions/portainer.InternalAuthSettings' - ldapsettings: - $ref: '#/definitions/portainer.LDAPSettings' - logoURL: - description: URL to a logo that will be displayed on the login page as well - as on top of the sidebar. Will use default Portainer logo when value is - empty string - example: https://mycompany.mydomain.tld/logo.png - type: string - oauthSettings: - $ref: '#/definitions/portainer.OAuthSettings' - snapshotInterval: - description: The interval in which endpoint snapshots are created - example: 5m - type: string - templatesURL: - description: URL to the templates that will be displayed in the UI when navigating - to App Templates - example: https://raw.githubusercontent.com/portainer/templates/master/templates.json - type: string - userSessionTimeout: - description: The duration of a user session - example: 5m - type: string - type: object - stacks.composeStackFromFileContentPayload: - properties: - env: - description: A list of environment variables used during stack deployment - items: - $ref: '#/definitions/portainer.Pair' - type: array - name: - description: Name of the stack - example: myStack - type: string - stackFileContent: - description: Content of the Stack file - example: |- - version: 3 - services: - web: - image:nginx - type: string - required: - - name - - stackFileContent - type: object - stacks.composeStackFromGitRepositoryPayload: - properties: - composeFilePathInRepository: - default: docker-compose.yml - description: Path to the Stack file inside the Git repository - example: docker-compose.yml - type: string - env: - description: A list of environment variables used during stack deployment - items: - $ref: '#/definitions/portainer.Pair' - type: array - name: - description: Name of the stack - example: myStack - type: string - repositoryAuthentication: - description: Use basic authentication to clone the Git repository - example: true - type: boolean - repositoryPassword: - description: Password used in basic authentication. Required when RepositoryAuthentication - is true. - example: myGitPassword - type: string - repositoryReferenceName: - description: Reference name of a Git repository hosting the Stack file - example: refs/heads/master - type: string - repositoryURL: - description: URL of a Git repository hosting the Stack file - example: https://github.com/openfaas/faas - type: string - repositoryUsername: - description: Username used in basic authentication. Required when RepositoryAuthentication - is true. - example: myGitUsername - type: string - required: - - name - - repositoryURL - type: object - stacks.stackFileResponse: - properties: - StackFileContent: - description: Content of the Stack file - example: |- - version: 3 - services: - web: - image:nginx - type: string - type: object - stacks.stackMigratePayload: - properties: - endpointID: - description: Endpoint identifier of the target endpoint where the stack will - be relocated - example: 2 - type: integer - name: - description: If provided will rename the migrated stack - example: new-stack - type: string - swarmID: - description: Swarm cluster identifier, must match the identifier of the cluster - where the stack will be relocated - example: jpofkc0i9uo9wtx1zesuk649w - type: string - required: - - endpointID - type: object - stacks.swarmStackFromFileContentPayload: - properties: - env: - description: A list of environment variables used during stack deployment - items: - $ref: '#/definitions/portainer.Pair' - type: array - name: - description: Name of the stack - example: myStack - type: string - stackFileContent: - description: Content of the Stack file - example: |- - version: 3 - services: - web: - image:nginx - type: string - swarmID: - description: Swarm cluster identifier - example: jpofkc0i9uo9wtx1zesuk649w - type: string - required: - - name - - stackFileContent - - swarmID - type: object - stacks.swarmStackFromGitRepositoryPayload: - properties: - composeFilePathInRepository: - default: docker-compose.yml - description: Path to the Stack file inside the Git repository - example: docker-compose.yml - type: string - env: - description: A list of environment variables used during stack deployment - items: - $ref: '#/definitions/portainer.Pair' - type: array - name: - description: Name of the stack - example: myStack - type: string - repositoryAuthentication: - description: Use basic authentication to clone the Git repository - example: true - type: boolean - repositoryPassword: - description: Password used in basic authentication. Required when RepositoryAuthentication - is true. - example: myGitPassword - type: string - repositoryReferenceName: - description: Reference name of a Git repository hosting the Stack file - example: refs/heads/master - type: string - repositoryURL: - description: URL of a Git repository hosting the Stack file - example: https://github.com/openfaas/faas - type: string - repositoryUsername: - description: Username used in basic authentication. Required when RepositoryAuthentication - is true. - example: myGitUsername - type: string - swarmID: - description: Swarm cluster identifier - example: jpofkc0i9uo9wtx1zesuk649w - type: string - required: - - name - - repositoryURL - - swarmID - type: object - stacks.updateSwarmStackPayload: - properties: - env: - description: A list of environment variables used during stack deployment - items: - $ref: '#/definitions/portainer.Pair' - type: array - prune: - description: Prune services that are no longer referenced (only available - for Swarm stacks) - example: true - type: boolean - stackFileContent: - description: New content of the Stack file - example: |- - version: 3 - services: - web: - image:nginx - type: string - type: object - status.inspectVersionResponse: - properties: - LatestVersion: - description: The latest version available - example: 2.0.0 - type: string - UpdateAvailable: - description: Whether portainer has an update available - example: false - type: boolean - type: object - tags.tagCreatePayload: - properties: - name: - description: Name - example: org/acme - type: string - required: - - name - type: object - teammemberships.teamMembershipCreatePayload: - properties: - role: - description: Role for the user inside the team (1 for leader and 2 for regular - member) - enum: - - 1 - - 2 - example: 1 - type: integer - teamID: - description: Team identifier - example: 1 - type: integer - userID: - description: User identifier - example: 1 - type: integer - required: - - role - - teamID - - userID - type: object - teammemberships.teamMembershipUpdatePayload: - properties: - role: - description: Role for the user inside the team (1 for leader and 2 for regular - member) - enum: - - 1 - - 2 - example: 1 - type: integer - teamID: - description: Team identifier - example: 1 - type: integer - userID: - description: User identifier - example: 1 - type: integer - required: - - role - - teamID - - userID - type: object - teams.teamCreatePayload: - properties: - name: - description: Name - example: developers - type: string - required: - - name - type: object - teams.teamUpdatePayload: - properties: - name: - description: Name - example: developers - type: string - type: object - templates.filePayload: - properties: - composeFilePathInRepository: - description: Path to the file inside the git repository - example: ./subfolder/docker-compose.yml - type: string - repositoryURL: - description: URL of a git repository where the file is stored - example: https://github.com/portainer/portainer-compose - type: string - required: - - composeFilePathInRepository - - repositoryURL - type: object - templates.fileResponse: - properties: - fileContent: - description: The requested file content - type: string - type: object - templates.listResponse: - properties: - templates: - items: - $ref: '#/definitions/portainer.Template' - type: array - version: - type: string - type: object - users.adminInitPayload: - properties: - password: - description: Password for the admin user - example: admin-password - type: string - username: - description: Username for the admin user - example: admin - type: string - required: - - password - - username - type: object - users.userCreatePayload: - properties: - password: - example: cg9Wgky3 - type: string - role: - description: User role (1 for administrator account and 2 for regular account) - enum: - - 1 - - 2 - example: 2 - type: integer - username: - example: bob - type: string - required: - - password - - role - - username - type: object - users.userUpdatePasswordPayload: - properties: - newPassword: - description: New Password - example: new_passwd - type: string - password: - description: Current Password - example: passwd - type: string - required: - - newPassword - - password - type: object - users.userUpdatePayload: - properties: - password: - example: cg9Wgky3 - type: string - role: - description: User role (1 for administrator account and 2 for regular account) - enum: - - 1 - - 2 - example: 2 - type: integer - username: - example: bob - type: string - required: - - password - - role - - username - type: object - webhooks.webhookCreatePayload: - properties: - endpointID: - type: integer - resourceID: - type: string - webhookType: - type: integer - type: object -info: - contact: - email: info@portainer.io - description: | - Portainer API is an HTTP API served by Portainer. It is used by the Portainer UI and everything you can do with the UI can be done using the HTTP API. - Examples are available at https://documentation.portainer.io/api/api-examples/ - You can find out more about Portainer at [http://portainer.io](http://portainer.io) and get some support on [Slack](http://portainer.io/slack/). - - # Authentication - - Most of the API endpoints require to be authenticated as well as some level of authorization to be used. - Portainer API uses JSON Web Token to manage authentication and thus requires you to provide a token in the **Authorization** header of each request - with the **Bearer** authentication mechanism. - - Example: - - ``` - Bearer abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyzAB - ``` - - # Security - - Each API endpoint has an associated access policy, it is documented in the description of each endpoint. - - Different access policies are available: - - - Public access - - Authenticated access - - Restricted access - - Administrator access - - ### Public access - - No authentication is required to access the endpoints with this access policy. - - ### Authenticated access - - Authentication is required to access the endpoints with this access policy. - - ### Restricted access - - Authentication is required to access the endpoints with this access policy. - Extra-checks might be added to ensure access to the resource is granted. Returned data might also be filtered. - - ### Administrator access - - Authentication as well as an administrator role are required to access the endpoints with this access policy. - - # Execute Docker requests - - Portainer **DO NOT** expose specific endpoints to manage your Docker resources (create a container, remove a volume, etc...). - - Instead, it acts as a reverse-proxy to the Docker HTTP API. This means that you can execute Docker requests **via** the Portainer HTTP API. - - To do so, you can use the `/endpoints/{id}/docker` Portainer API endpoint (which is not documented below due to Swagger limitations). This endpoint has a restricted access policy so you still need to be authenticated to be able to query this endpoint. Any query on this endpoint will be proxied to the Docker API of the associated endpoint (requests and responses objects are the same as documented in the Docker API). - - **NOTE**: You can find more information on how to query the Docker API in the [Docker official documentation](https://docs.docker.com/engine/api/v1.30/) as well as in [this Portainer example](https://documentation.portainer.io/api/api-examples/). - license: {} - title: PortainerCE API - version: 2.0.0 -paths: - /auth: - post: - consumes: - - application/json - description: Use this endpoint to authenticate against Portainer using a username - and password. - operationId: AuthenticateUser - parameters: - - description: Credentials used for authentication - in: body - name: body - required: true - schema: - $ref: '#/definitions/auth.authenticatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/auth.authenticateResponse' - '400': - description: Invalid request - '422': - description: Invalid Credentials - '500': - description: Server error - summary: Authenticate - tags: - - auth - /auth/logout: - post: - consumes: - - application/json - operationId: logout - produces: - - application/json - responses: - '204': - description: '' - security: - - jwt: [] - summary: Logout - tags: - - auth - /auth/oauth/validate: - post: - consumes: - - application/json - operationId: authenticate_oauth - parameters: - - description: OAuth Credentials used for authentication - in: body - name: body - required: true - schema: - $ref: '#/definitions/auth.oauthPayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/auth.authenticateResponse' - '400': - description: Invalid request - '422': - description: Invalid Credentials - '500': - description: Server error - summary: Authenticate with OAuth - tags: - - auth - /custom_templates: - get: - description: |- - List available custom templates. - **Access policy**: authenticated - operationId: CustomTemplateList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.CustomTemplate' - type: array - '500': - description: Server error - security: - - jwt: [] - summary: List available custom templates - tags: - - custom_templates - post: - consumes: - - application/json - - ' multipart/form-data' - description: |- - Create a custom template. - **Access policy**: authenticated - operationId: CustomTemplateCreate - parameters: - - description: method for creating template - enum: - - string - - file - - repository - in: query - name: method - required: true - type: string - - description: Required when using method=string - in: body - name: body_string - schema: - $ref: '#/definitions/customtemplates.customTemplateFromFileContentPayload' - - description: Required when using method=repository - in: body - name: body_repository - schema: - $ref: '#/definitions/customtemplates.customTemplateFromGitRepositoryPayload' - - description: Title of the template. required when method is file - in: formData - name: Title - type: string - - description: Description of the template. required when method is file - in: formData - name: Description - type: string - - description: A note that will be displayed in the UI. Supports HTML content - in: formData - name: Note - type: string - - description: Platform associated to the template (1 - 'linux', 2 - 'windows'). - required when method is file - enum: - - 1 - - 2 - in: formData - name: Platform - type: integer - - description: Type of created stack (1 - swarm, 2 - compose), required when - method is file - enum: - - 1 - - 2 - in: formData - name: Type - type: integer - - description: required when method is file - in: formData - name: file - type: file - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.CustomTemplate' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Create a custom template - tags: - - custom_templates - /custom_templates/{id}: - delete: - description: |- - Remove a template. - **Access policy**: authorized - operationId: CustomTemplateDelete - parameters: - - description: Template identifier - in: path - name: id - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Access denied to resource - '404': - description: Template not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a template - tags: - - custom_templates - get: - consumes: - - application/json - description: |- - Retrieve details about a template. - **Access policy**: authenticated - operationId: CustomTemplateInspect - parameters: - - description: Template identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.CustomTemplate' - '400': - description: Invalid request - '404': - description: Template not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect a custom template - tags: - - custom_templates - put: - consumes: - - application/json - description: |- - Update a template. - **Access policy**: authenticated - operationId: CustomTemplateUpdate - parameters: - - description: Template identifier - in: path - name: id - required: true - type: integer - - description: Template details - in: body - name: body - required: true - schema: - $ref: '#/definitions/customtemplates.customTemplateUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.CustomTemplate' - '400': - description: Invalid request - '403': - description: Permission denied to access template - '404': - description: Template not found - '500': - description: Server error - security: - - jwt: [] - summary: Update a template - tags: - - custom_templates - /custom_templates/{id}/file: - get: - description: |- - Retrieve the content of the Stack file for the specified custom template - **Access policy**: authorized - operationId: CustomTemplateFile - parameters: - - description: Template identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/customtemplates.fileResponse' - '400': - description: Invalid request - '404': - description: Custom template not found - '500': - description: Server error - security: - - jwt: [] - summary: Get Template stack file content. - tags: - - custom_templates - /dockerhub: - get: - description: |- - Use this endpoint to retrieve the information used to connect to the DockerHub - **Access policy**: authenticated - operationId: DockerHubInspect - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.DockerHub' - '500': - description: Server error - security: - - jwt: [] - summary: Retrieve DockerHub information - tags: - - dockerhub - put: - consumes: - - application/json - description: |- - Use this endpoint to update the information used to connect to the DockerHub - **Access policy**: administrator - operationId: DockerHubUpdate - parameters: - - description: DockerHub information - in: body - name: body - required: true - schema: - $ref: '#/definitions/dockerhub.dockerhubUpdatePayload' - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Update DockerHub information - tags: - - dockerhub - /edge_groups: - get: - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: EdgeGroups - schema: - items: - allOf: - - $ref: '#/definitions/portainer.EdgeGroup' - - properties: - HasEdgeStack: - type: boolean - type: object - type: array - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: list EdgeGroups - tags: - - edge_groups - post: - consumes: - - application/json - parameters: - - description: EdgeGroup data - in: body - name: body - required: true - schema: - $ref: '#/definitions/edgegroups.edgeGroupCreatePayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeGroup' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Create an EdgeGroup - tags: - - edge_groups - /edge_groups/{id}: - delete: - consumes: - - application/json - parameters: - - description: EdgeGroup Id - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '204': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Deletes an EdgeGroup - tags: - - edge_groups - get: - consumes: - - application/json - parameters: - - description: EdgeGroup Id - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeGroup' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Inspects an EdgeGroup - tags: - - edge_groups - put: - consumes: - - application/json - parameters: - - description: EdgeGroup Id - in: path - name: id - required: true - type: integer - - description: EdgeGroup data - in: body - name: body - required: true - schema: - $ref: '#/definitions/edgegroups.edgeGroupUpdatePayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeGroup' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Updates an EdgeGroup - tags: - - edge_groups - /edge_jobs: - get: - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: OK - schema: - items: - $ref: '#/definitions/portainer.EdgeJob' - type: array - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Fetch EdgeJobs list - tags: - - edge_jobs - post: - consumes: - - application/json - parameters: - - description: Creation Method - enum: - - file - - string - in: query - name: method - required: true - type: string - - description: EdgeGroup data when method is string - in: body - name: body - required: true - schema: - $ref: '#/definitions/edgejobs.edgeJobCreateFromFileContentPayload' - - description: EdgeGroup data when method is file - in: body - name: body - required: true - schema: - $ref: '#/definitions/edgejobs.edgeJobCreateFromFilePayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeGroup' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Create an EdgeJob - tags: - - edge_jobs - /edge_jobs/{id}: - delete: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '204': - description: '' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Delete an EdgeJob - tags: - - edge_jobs - get: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeJob' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Inspect an EdgeJob - tags: - - edge_jobs - post: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - - description: EdgeGroup data - in: body - name: body - required: true - schema: - $ref: '#/definitions/edgejobs.edgeJobUpdatePayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeJob' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Update an EdgeJob - tags: - - edge_jobs - /edge_jobs/{id}/file: - get: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/edgejobs.edgeJobFileResponse' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Fetch a file of an EdgeJob - tags: - - edge_jobs - /edge_jobs/{id}/tasks: - get: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - items: - $ref: '#/definitions/edgejobs.taskContainer' - type: array - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Fetch the list of tasks on an EdgeJob - tags: - - edge_jobs - /edge_jobs/{id}/tasks/{taskID}/logs: - delete: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - - description: Task Id - in: path - name: taskID - required: true - type: string - produces: - - application/json - responses: - '204': - description: '' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Clear the log for a specifc task on an EdgeJob - tags: - - edge_jobs - get: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - - description: Task Id - in: path - name: taskID - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/edgejobs.fileResponse' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Fetch the log for a specifc task on an EdgeJob - tags: - - edge_jobs - post: - consumes: - - application/json - parameters: - - description: EdgeJob Id - in: path - name: id - required: true - type: string - - description: Task Id - in: path - name: taskID - required: true - type: string - produces: - - application/json - responses: - '204': - description: '' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Collect the log for a specifc task on an EdgeJob - tags: - - edge_jobs - /edge_stacks: - get: - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: OK - schema: - items: - $ref: '#/definitions/portainer.EdgeStack' - type: array - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Fetches the list of EdgeStacks - tags: - - edge_stacks - post: - consumes: - - application/json - parameters: - - description: Creation Method - enum: - - file - - string - - repository - in: query - name: method - required: true - type: string - - description: Required when using method=string - in: body - name: body_string - required: true - schema: - $ref: '#/definitions/edgestacks.swarmStackFromFileContentPayload' - - description: Required when using method=file - in: body - name: body_file - required: true - schema: - $ref: '#/definitions/edgestacks.swarmStackFromFileUploadPayload' - - description: Required when using method=repository - in: body - name: body_repository - required: true - schema: - $ref: '#/definitions/edgestacks.swarmStackFromGitRepositoryPayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeStack' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Create an EdgeStack - tags: - - edge_stacks - /edge_stacks/{id}: - delete: - consumes: - - application/json - parameters: - - description: EdgeStack Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '204': - description: '' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Delete an EdgeStack - tags: - - edge_stacks - get: - consumes: - - application/json - parameters: - - description: EdgeStack Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeStack' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Inspect an EdgeStack - tags: - - edge_stacks - put: - consumes: - - application/json - parameters: - - description: EdgeStack Id - in: path - name: id - required: true - type: string - - description: EdgeStack data - in: body - name: body - required: true - schema: - $ref: '#/definitions/edgestacks.updateEdgeStackPayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeStack' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Update an EdgeStack - tags: - - edge_stacks - /edge_stacks/{id}/file: - get: - consumes: - - application/json - parameters: - - description: EdgeStack Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/edgestacks.stackFileResponse' - '400': - description: '' - '500': - description: '' - '503': - description: Service Unavailable - schema: - type: Edge - security: - - jwt: [] - summary: Fetches the stack file for an EdgeStack - tags: - - edge_stacks - /edge_stacks/{id}/status: - put: - consumes: - - application/json - description: Authorized only if the request is done by an Edge Endpoint - parameters: - - description: EdgeStack Id - in: path - name: id - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.EdgeStack' - '400': - description: '' - '403': - description: '' - '404': - description: '' - '500': - description: '' - summary: Update an EdgeStack status - tags: - - edge_stacks - /edge_templates: - get: - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: OK - schema: - items: - $ref: '#/definitions/portainer.Template' - type: array - '500': - description: '' - security: - - jwt: [] - summary: Fetches the list of Edge Templates - tags: - - edge_templates - /endpoint_groups: - get: - description: |- - List all endpoint groups based on the current user authorizations. Will - return all endpoint groups if using an administrator account otherwise it will - only return authorized endpoint groups. - **Access policy**: restricted - operationId: EndpointGroupList - produces: - - application/json - responses: - '200': - description: Endpoint group - schema: - items: - $ref: '#/definitions/portainer.EndpointGroup' - type: array - '500': - description: Server error - security: - - jwt: [] - summary: List Endpoint groups - tags: - - endpoint_groups - post: - consumes: - - application/json - description: |- - Create a new endpoint group. - **Access policy**: administrator - parameters: - - description: Endpoint Group details - in: body - name: body - required: true - schema: - $ref: '#/definitions/endpointgroups.endpointGroupCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.EndpointGroup' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Create an Endpoint Group - tags: - - endpoint_groups - /endpoint_groups/:id: - get: - consumes: - - application/json - description: |- - Retrieve details abont an endpoint group. - **Access policy**: administrator - parameters: - - description: Endpoint group identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.EndpointGroup' - '400': - description: Invalid request - '404': - description: EndpointGroup not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect an Endpoint group - tags: - - endpoint_groups - put: - consumes: - - application/json - description: |- - Update an endpoint group. - **Access policy**: administrator - operationId: EndpointGroupUpdate - parameters: - - description: EndpointGroup identifier - in: path - name: id - required: true - type: integer - - description: EndpointGroup details - in: body - name: body - required: true - schema: - $ref: '#/definitions/endpointgroups.endpointGroupUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.EndpointGroup' - '400': - description: Invalid request - '404': - description: EndpointGroup not found - '500': - description: Server error - security: - - jwt: [] - summary: Update an endpoint group - tags: - - endpoint_groups - /endpoint_groups/{id}: - delete: - consumes: - - application/json - description: |- - Remove an endpoint group. - **Access policy**: administrator - operationId: EndpointGroupDelete - parameters: - - description: EndpointGroup identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: EndpointGroup not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove an endpoint group - tags: - - endpoint_groups - /endpoint_groups/{id}/endpoints/{endpointId}: - delete: - description: '**Access policy**: administrator' - operationId: EndpointGroupDeleteEndpoint - parameters: - - description: EndpointGroup identifier - in: path - name: id - required: true - type: integer - - description: Endpoint identifier - in: path - name: endpointId - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: EndpointGroup not found - '500': - description: Server error - security: - - jwt: [] - summary: Removes endpoint from an endpoint group - tags: - - endpoint_groups - put: - description: |- - Add an endpoint to an endpoint group - **Access policy**: administrator - operationId: EndpointGroupAddEndpoint - parameters: - - description: EndpointGroup identifier - in: path - name: id - required: true - type: integer - - description: Endpoint identifier - in: path - name: endpointId - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: EndpointGroup not found - '500': - description: Server error - security: - - jwt: [] - summary: Add an endpoint to an endpoint group - tags: - - endpoint_groups - /endpoints: - get: - description: |- - List all endpoints based on the current user authorizations. Will - return all endpoints if using an administrator account otherwise it will - only return authorized endpoints. - **Access policy**: restricted - operationId: EndpointList - parameters: - - description: Start searching from - in: query - name: start - type: integer - - description: Search query - in: query - name: search - type: string - - description: List endpoints of this group - in: query - name: groupId - type: integer - - description: Limit results to this value - in: query - name: limit - type: integer - - description: List endpoints of this type - in: query - name: type - type: integer - - description: search endpoints with these tags (depends on tagsPartialMatch) - in: query - items: - type: integer - name: tagIds - type: array - - description: If true, will return endpoint which has one of tagIds, if false - (or missing) will return only endpoints that has all the tags - in: query - name: tagsPartialMatch - type: boolean - - description: will return only these endpoints - in: query - items: - type: integer - name: endpointIds - type: array - produces: - - application/json - responses: - '200': - description: Endpoints - schema: - items: - $ref: '#/definitions/portainer.Endpoint' - type: array - '500': - description: Internal Server Error - schema: - type: Server - security: - - jwt: [] - summary: List endpoints - tags: - - endpoints - post: - consumes: - - multipart/form-data - description: |- - Create a new endpoint that will be used to manage an environment. - **Access policy**: administrator - operationId: EndpointCreate - parameters: - - description: 'Name that will be used to identify this endpoint (example: my-endpoint)' - in: formData - name: Name - required: true - type: string - - description: 'Environment type. Value must be one of: 1 (Local Docker environment), - 2 (Agent environment), 3 (Azure environment), 4 (Edge agent environment) - or 5 (Local Kubernetes Environment' - in: formData - name: EndpointType - required: true - type: integer - - description: 'URL or IP address of a Docker host (example: docker.mydomain.tld:2375). - Defaults to local if not specified (Linux: /var/run/docker.sock, Windows: - //./pipe/docker_engine)' - in: formData - name: URL - type: string - - description: 'URL or IP address where exposed containers will be reachable. - Defaults to URL if not specified (example: docker.mydomain.tld:2375)' - in: formData - name: PublicURL - type: string - - description: Endpoint group identifier. If not specified will default to 1 - (unassigned). - in: formData - name: GroupID - type: integer - - description: Require TLS to connect against this endpoint - in: formData - name: TLS - type: boolean - - description: Skip server verification when using TLS - in: formData - name: TLSSkipVerify - type: boolean - - description: Skip client verification when using TLS - in: formData - name: TLSSkipClientVerify - type: boolean - - description: TLS CA certificate file - in: formData - name: TLSCACertFile - type: file - - description: TLS client certificate file - in: formData - name: TLSCertFile - type: file - - description: TLS client key file - in: formData - name: TLSKeyFile - type: file - - description: Azure application ID. Required if endpoint type is set to 3 - in: formData - name: AzureApplicationID - type: string - - description: Azure tenant ID. Required if endpoint type is set to 3 - in: formData - name: AzureTenantID - type: string - - description: Azure authentication key. Required if endpoint type is set to - 3 - in: formData - name: AzureAuthenticationKey - type: string - - description: List of tag identifiers to which this endpoint is associated - in: formData - items: - type: integer - name: TagIDs - type: array - - description: The check in interval for edge agent (in seconds) - in: formData - name: EdgeCheckinInterval - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Endpoint' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Create a new endpoint - tags: - - endpoints - /endpoints/{id}: - delete: - description: |- - Remove an endpoint. - **Access policy**: administrator - operationId: EndpointDelete - parameters: - - description: Endpoint identifier - in: path - name: id - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: Endpoint not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove an endpoint - tags: - - endpoints - get: - description: |- - Retrieve details about an endpoint. - **Access policy**: restricted - operationId: EndpointInspect - parameters: - - description: Endpoint identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Endpoint' - '400': - description: Invalid request - '404': - description: Endpoint not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect an endpoint - tags: - - endpoints - put: - consumes: - - application/json - description: |- - Update an endpoint. - **Access policy**: administrator - operationId: EndpointUpdate - parameters: - - description: Endpoint identifier - in: path - name: id - required: true - type: integer - - description: Endpoint details - in: body - name: body - required: true - schema: - $ref: '#/definitions/endpoints.endpointUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Endpoint' - '400': - description: Invalid request - '404': - description: Endpoint not found - '500': - description: Server error - security: - - jwt: [] - summary: Update an endpoint - tags: - - endpoints - /endpoints/{id}/edge/jobs/{jobID}/logs: - post: - consumes: - - application/json - parameters: - - description: Endpoint Id - in: path - name: id - required: true - type: string - - description: Job Id - in: path - name: jobID - required: true - type: string - produces: - - application/json - responses: - '200': - description: '' - '400': - description: '' - '500': - description: '' - summary: Inspect an EdgeJob Log - tags: - - edge - - endpoints - /endpoints/{id}/edge/stacks/{stackId}: - get: - consumes: - - application/json - parameters: - - description: Endpoint Id - in: path - name: id - required: true - type: string - - description: EdgeStack Id - in: path - name: stackID - required: true - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/endpointedge.configResponse' - '400': - description: '' - '404': - description: '' - '500': - description: '' - summary: Inspect an Edge Stack for an Endpoint - tags: - - edge - - endpoints - - edge_stacks - /endpoints/{id}/snapshot: - post: - description: |- - Snapshots an endpoint - **Access policy**: restricted - operationId: EndpointSnapshot - parameters: - - description: Endpoint identifier - in: path - name: id - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: Endpoint not found - '500': - description: Server error - security: - - jwt: [] - summary: Snapshots an endpoint - tags: - - endpoints - /endpoints/{id}/edge/status: - get: - description: |- - Endpoint for edge agent to check status of environment - **Access policy**: restricted only to Edge endpoints - operationId: EndpointEdgeStatusInspect - parameters: - - description: Endpoint identifier - in: path - name: id - required: true - type: integer - responses: - '200': - description: Success - schema: - $ref: '#/definitions/endpoints.endpointEdgeStatusInspectResponse' - '400': - description: Invalid request - '403': - description: Permission denied to access endpoint - '404': - description: Endpoint not found - '500': - description: Server error - security: - - jwt: [] - summary: Get endpoint status - tags: - - endpoints - /endpoints/snapshot: - post: - description: |- - Snapshot all endpoints - **Access policy**: administrator - operationId: EndpointSnapshots - responses: - '204': - description: Success - '500': - description: Server Error - security: - - jwt: [] - summary: Snapshot all endpoints - tags: - - endpoints - /motd: - get: - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/motd.motdResponse' - security: - - jwt: [] - summary: fetches the message of the day - tags: - - motd - /registries: - get: - description: |- - List all registries based on the current user authorizations. - Will return all registries if using an administrator account otherwise it - will only return authorized registries. - **Access policy**: restricted - operationId: RegistryList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.Registry' - type: array - '500': - description: Server error - security: - - jwt: [] - summary: List Registries - tags: - - registries - post: - consumes: - - application/json - description: |- - Create a new registry. - **Access policy**: administrator - operationId: RegistryCreate - parameters: - - description: Registry details - in: body - name: body - required: true - schema: - $ref: '#/definitions/registries.registryCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Registry' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Create a new registry - tags: - - registries - /registries/{id}: - delete: - description: |- - Remove a registry - **Access policy**: administrator - operationId: RegistryDelete - parameters: - - description: Registry identifier - in: path - name: id - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: Registry not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a registry - tags: - - registries - get: - description: |- - Retrieve details about a registry. - **Access policy**: administrator - operationId: RegistryInspect - parameters: - - description: Registry identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Registry' - '400': - description: Invalid request - '403': - description: Permission denied to access registry - '404': - description: Registry not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect a registry - tags: - - registries - put: - consumes: - - application/json - description: |- - Update a registry - **Access policy**: administrator - operationId: RegistryUpdate - parameters: - - description: Registry identifier - in: path - name: id - required: true - type: integer - - description: Registry details - in: body - name: body - required: true - schema: - $ref: '#/definitions/registries.registryUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Registry' - '400': - description: Invalid request - '404': - description: Registry not found - '409': - description: Another registry with the same URL already exists - '500': - description: Server error - security: - - jwt: [] - summary: Update a registry - tags: - - registries - /registries/{id}/configure: - post: - consumes: - - application/json - description: |- - Configures a registry. - **Access policy**: admin - operationId: RegistryConfigure - parameters: - - description: Registry identifier - in: path - name: id - required: true - type: integer - - description: Registry configuration - in: body - name: body - required: true - schema: - $ref: '#/definitions/registries.registryConfigurePayload' - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Registry not found - '500': - description: Server error - security: - - jwt: [] - summary: Configures a registry - tags: - - registries - /resource_controls: - post: - consumes: - - application/json - description: |- - Create a new resource control to restrict access to a Docker resource. - **Access policy**: administrator - operationId: ResourceControlCreate - parameters: - - description: Resource control details - in: body - name: body - required: true - schema: - $ref: '#/definitions/resourcecontrols.resourceControlCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.ResourceControl' - '400': - description: Invalid request - '409': - description: Resource control already exists - '500': - description: Server error - security: - - jwt: [] - summary: Create a new resource control - tags: - - resource_controls - /resource_controls/{id}: - delete: - description: |- - Remove a resource control. - **Access policy**: administrator - parameters: - - description: Resource control identifier - in: path - name: id - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '404': - description: Resource control not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a resource control - tags: - - resource_controls - put: - consumes: - - application/json - description: |- - Update a resource control - **Access policy**: restricted - operationId: ResourceControlUpdate - parameters: - - description: Resource control identifier - in: path - name: id - required: true - type: integer - - description: Resource control details - in: body - name: body - required: true - schema: - $ref: '#/definitions/resourcecontrols.resourceControlUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.ResourceControl' - '400': - description: Invalid request - '403': - description: Unauthorized - '404': - description: Resource control not found - '500': - description: Server error - security: - - jwt: [] - summary: Update a resource control - tags: - - resource_controls - /roles: - get: - description: |- - List all roles available for use - **Access policy**: administrator - operationId: RoleList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.Role' - type: array - '500': - description: Server error - security: - - jwt: [] - summary: List roles - tags: - - roles - /settings: - get: - description: |- - Retrieve Portainer settings. - **Access policy**: administrator - operationId: SettingsInspect - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Settings' - '500': - description: Server error - security: - - jwt: [] - summary: Retrieve Portainer settings - tags: - - settings - put: - consumes: - - application/json - description: |- - Update Portainer settings. - **Access policy**: administrator - operationId: SettingsUpdate - parameters: - - description: New settings - in: body - name: body - required: true - schema: - $ref: '#/definitions/settings.settingsUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Settings' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Update Portainer settings - tags: - - settings - /settings/ldap/check: - put: - consumes: - - application/json - description: |- - Test LDAP connectivity using LDAP details - **Access policy**: administrator - operationId: SettingsLDAPCheck - parameters: - - description: details - in: body - name: body - required: true - schema: - $ref: '#/definitions/settings.settingsLDAPCheckPayload' - responses: - '204': - description: Success - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Test LDAP connectivity - tags: - - settings - /settings/public: - get: - description: |- - Retrieve public settings. Returns a small set of settings that are not reserved to administrators only. - **Access policy**: public - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/settings.publicSettingsResponse' - '500': - description: Server error - summary: Retrieve Portainer public settings - tags: - - settings - /stacks: - get: - description: |- - List all stacks based on the current user authorizations. - Will return all stacks if using an administrator account otherwise it - will only return the list of stacks the user have access to. - **Access policy**: restricted - operationId: StackList - parameters: - - description: Filters to process on the stack list. Encoded as JSON (a map[string]string). - For example, { - in: query - name: filters - type: string - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.Stack' - type: array - '204': - description: Success - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: List stacks - tags: - - stacks - post: - consumes: - - application/json - - ' multipart/form-data' - description: |- - Deploy a new stack into a Docker environment specified via the endpoint identifier. - **Access policy**: restricted - operationId: StackCreate - parameters: - - description: 'Stack deployment type. Possible values: 1 (Swarm stack) or 2 - (Compose stack).' - enum: - - 1 - - 2 - in: query - name: type - required: true - type: integer - - description: 'Stack deployment method. Possible values: file, string or repository.' - enum: - - string - - file - - repository - in: query - name: method - required: true - type: string - - description: Identifier of the endpoint that will be used to deploy the stack - in: query - name: endpointId - required: true - type: integer - - description: Required when using method=string and type=1 - in: body - name: body_swarm_string - schema: - $ref: '#/definitions/stacks.swarmStackFromFileContentPayload' - - description: Required when using method=repository and type=1 - in: body - name: body_swarm_repository - schema: - $ref: '#/definitions/stacks.swarmStackFromGitRepositoryPayload' - - description: Required when using method=string and type=2 - in: body - name: body_compose_string - schema: - $ref: '#/definitions/stacks.composeStackFromFileContentPayload' - - description: Required when using method=repository and type=2 - in: body - name: body_compose_repository - schema: - $ref: '#/definitions/stacks.composeStackFromGitRepositoryPayload' - - description: Name of the stack. required when method is file - in: formData - name: Name - type: string - - description: Swarm cluster identifier. Required when method equals file and - type equals 1. required when method is file - in: formData - name: SwarmID - type: string - - description: "Environment variables passed during deployment, represented - as a JSON array [{'name': 'name', 'value': 'value'}]. Optional, - used when method equals file and type equals 1." - in: formData - name: Env - type: string - - description: Stack file. required when method is file - in: formData - name: file - type: file - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.CustomTemplate' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Deploy a new stack - tags: - - stacks - /stacks/{id}: - delete: - description: |- - Remove a stack. - **Access policy**: restricted - operationId: StackDelete - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - - description: Set to true to delete an external stack. Only external Swarm - stacks are supported - in: query - name: external - type: boolean - - description: Endpoint identifier used to remove an external stack (required - when external is set to true) - in: query - name: endpointId - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: ' not found' - '500': - description: Server error - security: - - jwt: [] - summary: Remove a stack - tags: - - stacks - get: - description: |- - Retrieve details about a stack. - **Access policy**: restricted - operationId: StackInspect - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Stack' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Stack not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect a stack - tags: - - stacks - put: - consumes: - - application/json - description: |- - Update a stack. - **Access policy**: restricted - operationId: StackUpdate - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - - description: Stacks created before version 1.18.0 might not have an associated - endpoint identifier. Use this optional parameter to set the endpoint identifier - used by the stack. - in: query - name: endpointId - type: integer - - description: Stack details - in: body - name: body - required: true - schema: - $ref: '#/definitions/stacks.updateSwarmStackPayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Stack' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: ' not found' - '500': - description: Server error - security: - - jwt: [] - summary: Update a stack - tags: - - stacks - /stacks/{id}/file: - get: - description: |- - Get Stack file content. - **Access policy**: restricted - operationId: StackFileInspect - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/stacks.stackFileResponse' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Stack not found - '500': - description: Server error - security: - - jwt: [] - summary: Retrieve the content of the Stack file for the specified stack - tags: - - stacks - /stacks/{id}/migrate: - post: - description: |- - Migrate a stack from an endpoint to another endpoint. It will re-create the stack inside the target endpoint before removing the original stack. - **Access policy**: restricted - operationId: StackMigrate - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - - description: Stacks created before version 1.18.0 might not have an associated - endpoint identifier. Use this optional parameter to set the endpoint identifier - used by the stack. - in: query - name: endpointId - type: integer - - description: Stack migration details - in: body - name: body - required: true - schema: - $ref: '#/definitions/stacks.stackMigratePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Stack' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Stack not found - '500': - description: Server error - security: - - jwt: [] - summary: Migrate a stack to another endpoint - tags: - - stacks - /stacks/{id}/start: - post: - description: |- - Starts a stopped Stack. - **Access policy**: restricted - operationId: StackStart - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Stack' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: ' not found' - '500': - description: Server error - security: - - jwt: [] - summary: Starts a stopped Stack - tags: - - stacks - /stacks/{id}/stop: - post: - description: |- - Stops a stopped Stack. - **Access policy**: restricted - operationId: StackStop - parameters: - - description: Stack identifier - in: path - name: id - required: true - type: integer - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Stack' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: ' not found' - '500': - description: Server error - security: - - jwt: [] - summary: Stops a stopped Stack - tags: - - stacks - /status: - get: - description: |- - Retrieve Portainer status - **Access policy**: public - operationId: StatusInspect - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Status' - summary: Check Portainer status - tags: - - status - /status/version: - get: - description: |- - Check if portainer has an update available - **Access policy**: authenticated - operationId: StatusInspectVersion - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/status.inspectVersionResponse' - security: - - jwt: [] - summary: Check for portainer updates - tags: - - status - /tags: - get: - description: |- - List tags. - **Access policy**: administrator - operationId: TagList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.Tag' - type: array - '500': - description: Server error - security: - - jwt: [] - summary: List tags - tags: - - tags - post: - description: |- - Create a new tag. - **Access policy**: administrator - operationId: TagCreate - parameters: - - description: Tag details - in: body - name: body - required: true - schema: - $ref: '#/definitions/tags.tagCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Tag' - '409': - description: Tag name exists - '500': - description: Server error - security: - - jwt: [] - summary: Create a new tag - tags: - - tags - /tags/{id}: - delete: - consumes: - - application/json - description: |- - Remove a tag. - **Access policy**: administrator - operationId: TagDelete - parameters: - - description: Tag identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Tag not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a tag - tags: - - tags - /team: - post: - consumes: - - application/json - description: |- - Create a new team. - **Access policy**: administrator - operationId: TeamCreate - parameters: - - description: details - in: body - name: body - required: true - schema: - $ref: '#/definitions/teams.teamCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Team' - '400': - description: Invalid request - '409': - description: Team already exists - '500': - description: Server error - security: - - jwt: [] - summary: Create a new team - tags: - - teams - /team/{id}: - put: - consumes: - - application/json - description: |- - Update a team. - **Access policy**: administrator - operationId: TeamUpdate - parameters: - - description: Team identifier - in: path - name: id - required: true - type: integer - - description: Team details - in: body - name: body - required: true - schema: - $ref: '#/definitions/teams.teamUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Team' - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Team not found - '500': - description: Server error - security: - - jwt: [] - summary: Update a team - tags: - - '' - /team_memberships: - get: - description: |- - List team memberships. Access is only available to administrators and team leaders. - **Access policy**: admin - operationId: TeamMembershipList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.TeamMembership' - type: array - '400': - description: Invalid request - '403': - description: Permission denied - '500': - description: Server error - security: - - jwt: [] - summary: List team memberships - tags: - - team_memberships - post: - consumes: - - application/json - description: |- - Create a new team memberships. Access is only available to administrators leaders of the associated team. - **Access policy**: admin - operationId: TeamMembershipCreate - parameters: - - description: Team membership details - in: body - name: body - required: true - schema: - $ref: '#/definitions/teammemberships.teamMembershipCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.TeamMembership' - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied to manage memberships - '409': - description: Team membership already registered - '500': - description: Server error - security: - - jwt: [] - summary: Create a new team membership - tags: - - team_memberships - /team_memberships/{id}: - delete: - description: |- - Remove a team membership. Access is only available to administrators leaders of the associated team. - **Access policy**: restricted - operationId: TeamMembershipDelete - parameters: - - description: TeamMembership identifier - in: path - name: id - required: true - type: integer - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: TeamMembership not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a team membership - tags: - - team_memberships - put: - consumes: - - application/json - description: |- - Update a team membership. Access is only available to administrators leaders of the associated team. - **Access policy**: restricted - operationId: TeamMembershipUpdate - parameters: - - description: Team membership identifier - in: path - name: id - required: true - type: integer - - description: Team membership details - in: body - name: body - required: true - schema: - $ref: '#/definitions/teammemberships.teamMembershipUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.TeamMembership' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: TeamMembership not found - '500': - description: Server error - security: - - jwt: [] - summary: Update a team membership - tags: - - team_memberships - /teams: - get: - description: |- - List teams. For non-administrator users, will only list the teams they are member of. - **Access policy**: restricted - operationId: TeamList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.Team' - type: array - '500': - description: Server error - security: - - jwt: [] - summary: List teams - tags: - - teams - /teams/{id}: - delete: - description: |- - Remove a team. - **Access policy**: administrator - operationId: TeamDelete - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Team not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a team - tags: - - teams - get: - description: |- - Retrieve details about a team. Access is only available for administrator and leaders of that team. - **Access policy**: restricted - operationId: TeamInspect - parameters: - - description: Team identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.Team' - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: Team not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect a team - tags: - - teams - /teams/{id}/memberships: - get: - description: |- - List team memberships. Access is only available to administrators and team leaders. - **Access policy**: restricted - operationId: TeamMemberships - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.TeamMembership' - type: array - '400': - description: Invalid request - '403': - description: Permission denied - '500': - description: Server error - security: - - jwt: [] - summary: List team memberships - tags: - - team_memberships - /templates: - get: - description: |- - List available templates. - **Access policy**: restricted - operationId: TemplateList - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/templates.listResponse' - '500': - description: Server error - security: - - jwt: [] - summary: List available templates - tags: - - templates - /templates/file: - post: - consumes: - - application/json - description: |- - Get a template's file - **Access policy**: restricted - operationId: TemplateFile - parameters: - - description: File details - in: body - name: body - required: true - schema: - $ref: '#/definitions/templates.filePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/templates.fileResponse' - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Get a template's file - tags: - - templates - /upload/tls/{certificate}: - post: - consumes: - - multipart/form-data - description: |- - Use this endpoint to upload TLS files. - **Access policy**: administrator - operationId: UploadTLS - parameters: - - description: TLS file type. Valid values are 'ca', 'cert' or 'key'. - enum: - - ca - - cert - - key - in: path - name: certificate - required: true - type: string - - description: Folder where the TLS file will be stored. Will be created if - not existing - in: formData - name: folder - required: true - type: string - - description: The file to upload - in: formData - name: file - required: true - type: file - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: Upload TLS files - tags: - - upload - /users: - get: - description: |- - List Portainer users. - Non-administrator users will only be able to list other non-administrator user accounts. - **Access policy**: restricted - operationId: UserList - produces: - - application/json - responses: - '200': - description: Success - schema: - items: - $ref: '#/definitions/portainer.User' - type: array - '400': - description: Invalid request - '500': - description: Server error - security: - - jwt: [] - summary: List users - tags: - - users - post: - consumes: - - application/json - description: |- - Create a new Portainer user. - Only team leaders and administrators can create users. - Only administrators can create an administrator user account. - **Access policy**: restricted - operationId: UserCreate - parameters: - - description: User details - in: body - name: body - required: true - schema: - $ref: '#/definitions/users.userCreatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.User' - '400': - description: Invalid request - '403': - description: Permission denied - '409': - description: User already exists - '500': - description: Server error - security: - - jwt: [] - summary: Create a new user - tags: - - users - /users/{id}: - delete: - consumes: - - application/json - description: |- - Remove a user. - **Access policy**: administrator - operationId: UserDelete - parameters: - - description: User identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: User not found - '500': - description: Server error - security: - - jwt: [] - summary: Remove a user - tags: - - users - get: - description: |- - Retrieve details about a user. - **Access policy**: administrator - operationId: UserInspect - parameters: - - description: User identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.User' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: User not found - '500': - description: Server error - security: - - jwt: [] - summary: Inspect a user - tags: - - users - put: - consumes: - - application/json - description: |- - Update user details. A regular user account can only update his details. - **Access policy**: authenticated - operationId: UserUpdate - parameters: - - description: User identifier - in: path - name: id - required: true - type: integer - - description: User details - in: body - name: body - required: true - schema: - $ref: '#/definitions/users.userUpdatePayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.User' - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: User not found - '409': - description: Username already exist - '500': - description: Server error - security: - - jwt: [] - summary: Update a user - tags: - - users - /users/{id}/memberships: - get: - description: |- - Inspect a user memberships. - **Access policy**: authenticated - operationId: UserMembershipsInspect - parameters: - - description: User identifier - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.TeamMembership' - '400': - description: Invalid request - '403': - description: Permission denied - '500': - description: Server error - security: - - jwt: [] - summary: Inspect a user memberships - tags: - - users - /users/{id}/passwd: - put: - consumes: - - application/json - description: |- - Update password for the specified user. - **Access policy**: authenticated - operationId: UserUpdatePassword - parameters: - - description: identifier - in: path - name: id - required: true - type: integer - - description: details - in: body - name: body - required: true - schema: - $ref: '#/definitions/users.userUpdatePasswordPayload' - produces: - - application/json - responses: - '204': - description: Success - '400': - description: Invalid request - '403': - description: Permission denied - '404': - description: User not found - '500': - description: Server error - security: - - jwt: [] - summary: Update password for a user - tags: - - users - /users/admin/check: - get: - description: |- - Check if an administrator account exists in the database. - **Access policy**: public - operationId: UserAdminCheck - responses: - '204': - description: Success - '404': - description: User not found - summary: Check administrator account existence - tags: - - users - /users/admin/init: - post: - consumes: - - application/json - description: |- - Initialize the 'admin' user account. - **Access policy**: public - operationId: UserAdminInit - parameters: - - description: User details - in: body - name: body - required: true - schema: - $ref: '#/definitions/users.adminInitPayload' - produces: - - application/json - responses: - '200': - description: Success - schema: - $ref: '#/definitions/portainer.User' - '400': - description: Invalid request - '409': - description: Admin user already initialized - '500': - description: Server error - summary: Initialize administrator account - tags: - - '' - /webhooks: - get: - consumes: - - application/json - parameters: - - description: Webhook data - in: body - name: body - required: true - schema: - $ref: '#/definitions/webhooks.webhookCreatePayload' - - in: query - name: EndpointID - type: integer - - in: query - name: ResourceID - type: string - produces: - - application/json - responses: - '200': - description: OK - schema: - items: - $ref: '#/definitions/portainer.Webhook' - type: array - '400': - description: '' - '500': - description: '' - security: - - jwt: [] - summary: List webhooks - tags: - - webhooks - post: - consumes: - - application/json - parameters: - - description: Webhook data - in: body - name: body - required: true - schema: - $ref: '#/definitions/webhooks.webhookCreatePayload' - produces: - - application/json - responses: - '200': - description: OK - schema: - $ref: '#/definitions/portainer.Webhook' - '400': - description: '' - '409': - description: '' - '500': - description: '' - security: - - jwt: [] - summary: Create a webhook - tags: - - webhooks - /webhooks/{id}: - delete: - consumes: - - application/json - parameters: - - description: Webhook id - in: path - name: id - required: true - type: integer - produces: - - application/json - responses: - '202': - description: Webhook deleted - '400': - description: '' - '500': - description: '' - security: - - jwt: [] - summary: Delete a webhook - tags: - - webhooks - /webhooks/{token}: - post: - consumes: - - application/json - description: Acts on a passed in token UUID to restart the docker service - parameters: - - description: Webhook token - in: path - name: token - required: true - type: string - produces: - - application/json - responses: - '202': - description: Webhook executed - '400': - description: '' - '500': - description: '' - summary: Execute a webhook - tags: - - webhooks - /websocket/attach: - get: - consumes: - - application/json - description: |- - If the nodeName query parameter is present, the request will be proxied to the underlying agent endpoint. - If the nodeName query parameter is not specified, the request will be upgraded to the websocket protocol and - an AttachStart operation HTTP request will be created and hijacked. - Authentication and access is controlled via the mandatory token query parameter. - parameters: - - description: endpoint ID of the endpoint where the resource is located - in: query - name: endpointId - required: true - type: integer - - description: node name - in: query - name: nodeName - type: string - - description: JWT token used for authentication against this endpoint - in: query - name: token - required: true - type: string - produces: - - application/json - responses: - '200': - description: '' - '400': - description: '' - '403': - description: '' - '404': - description: '' - '500': - description: '' - security: - - jwt: [] - summary: Attach a websocket - tags: - - websocket - /websocket/exec: - get: - consumes: - - application/json - description: |- - If the nodeName query parameter is present, the request will be proxied to the underlying agent endpoint. - If the nodeName query parameter is not specified, the request will be upgraded to the websocket protocol and - an ExecStart operation HTTP request will be created and hijacked. - Authentication and access is controlled via the mandatory token query parameter. - parameters: - - description: endpoint ID of the endpoint where the resource is located - in: query - name: endpointId - required: true - type: integer - - description: node name - in: query - name: nodeName - type: string - - description: JWT token used for authentication against this endpoint - in: query - name: token - required: true - type: string - produces: - - application/json - responses: - '200': - description: '' - '400': - description: '' - '409': - description: '' - '500': - description: '' - security: - - jwt: [] - summary: Execute a websocket - tags: - - websocket - /websocket/pod: - get: - consumes: - - application/json - description: |- - The request will be upgraded to the websocket protocol. - Authentication and access is controlled via the mandatory token query parameter. - parameters: - - description: endpoint ID of the endpoint where the resource is located - in: query - name: endpointId - required: true - type: integer - - description: namespace where the container is located - in: query - name: namespace - required: true - type: string - - description: name of the pod containing the container - in: query - name: podName - required: true - type: string - - description: name of the container - in: query - name: containerName - required: true - type: string - - description: command to execute in the container - in: query - name: command - required: true - type: string - - description: JWT token used for authentication against this endpoint - in: query - name: token - required: true - type: string - produces: - - application/json - responses: - '200': - description: '' - '400': - description: '' - '403': - description: '' - '404': - description: '' - '500': - description: '' - security: - - jwt: [] - summary: Execute a websocket on pod - tags: - - websocket -schemes: - - http - - https -securityDefinitions: - jwt: - in: header - name: Authorization - type: apiKey -swagger: '2.0' -tags: - - description: Authenticate against Portainer HTTP API - name: auth - - description: Manage Custom Templates - name: custom_templates - - description: Manage how Portainer connects to the DockerHub - name: dockerhub - - description: Manage Edge Groups - name: edge_groups - - description: Manage Edge Jobs - name: edge_jobs - - description: Manage Edge Stacks - name: edge_stacks - - description: Manage Edge Templates - name: edge_templates - - description: Manage Edge related endpoint settings - name: edge - - description: Manage Docker environments - name: endpoints - - description: Manage endpoint groups - name: endpoint_groups - - description: Fetch the message of the day - name: motd - - description: Manage Docker registries - name: registries - - description: Manage access control on Docker resources - name: resource_controls - - description: Manage roles - name: roles - - description: Manage Portainer settings - name: settings - - description: Information about the Portainer instance - name: status - - description: Manage Docker stacks - name: stacks - - description: Manage users - name: users - - description: Manage tags - name: tags - - description: Manage teams - name: teams - - description: Manage team memberships - name: team_memberships - - description: Manage App Templates - name: templates - - description: Manage stacks - name: stacks - - description: Upload files - name: upload - - description: Manage webhooks - name: webhooks - - description: Create exec sessions using websockets - name: websocket diff --git a/app/assets/css/app.css b/app/assets/css/app.css index afcf49470..85d8fbca5 100644 --- a/app/assets/css/app.css +++ b/app/assets/css/app.css @@ -32,6 +32,10 @@ body { color: var(--text-body-color) !important; } +.bg-widget-color { + background: var(--bg-widget-color); +} + html, body, #page-wrapper, @@ -224,7 +228,7 @@ input[type='checkbox'] { .blocklist-item--selected { background-color: var(--bg-blocklist-item-selected-color); - border: 2px solid var(--border-blocklist-item-selected-color); + border-color: var(--border-blocklist-item-selected-color); color: var(--text-blocklist-item-selected-color); } diff --git a/app/assets/css/bootstrap-override.css b/app/assets/css/bootstrap-override.css index 19e63397d..cbded5622 100644 --- a/app/assets/css/bootstrap-override.css +++ b/app/assets/css/bootstrap-override.css @@ -20,9 +20,7 @@ } .vertical-center { - display: inline-flex; - align-items: center; - gap: 5px; + @apply inline-flex items-center gap-1; } .flex-center { diff --git a/app/assets/css/button.css b/app/assets/css/button.css index dc3f82460..547d9fdc0 100644 --- a/app/assets/css/button.css +++ b/app/assets/css/button.css @@ -24,44 +24,84 @@ fieldset[disabled] .btn { box-shadow: none; } +.btn-icon { + @apply !border-none !bg-transparent p-0; +} + .btn.btn-primary { - @apply border-blue-8 bg-blue-8 text-white; - @apply hover:border-blue-9 hover:bg-blue-9 hover:text-white; - @apply th-dark:hover:border-blue-7 th-dark:hover:bg-blue-7; + @apply border-graphite-700 bg-graphite-700 text-mist-100; + @apply hover:border-graphite-700/90 hover:bg-graphite-700/90 hover:text-mist-100; + @apply focus:border-blue-5 focus:shadow-graphite-700/80 focus:text-mist-100; + + @apply th-dark:border-mist-100 th-dark:bg-mist-100 th-dark:text-graphite-700; + @apply th-dark:hover:border-mist-100/90 th-dark:hover:bg-mist-100/90 th-dark:hover:text-graphite-700; + @apply th-dark:focus:border-blue-5 th-dark:focus:shadow-white/80 th-dark:focus:text-graphite-700; + + @apply th-highcontrast:border-mist-100 th-highcontrast:bg-mist-100 th-highcontrast:text-graphite-700; + @apply th-highcontrast:hover:border-mist-100/90 th-highcontrast:hover:bg-mist-100/90 th-highcontrast:hover:text-graphite-700; + @apply th-highcontrast:focus:border-blue-5 th-highcontrast:focus:shadow-white/80 th-highcontrast:focus:text-graphite-700; +} + +/* Sidebar background is always dark, so we need to override the primary button styles */ +.btn.btn-primary.sidebar { + @apply border-mist-100 bg-mist-100 text-graphite-700; + @apply hover:border-mist-100/90 hover:bg-mist-100/90 hover:text-graphite-700; + @apply focus:border-blue-5 focus:shadow-white/80 focus:text-graphite-700; } .btn.btn-primary:active, .btn.btn-primary.active, .open > .dropdown-toggle.btn-primary { - @apply border-blue-5 bg-blue-9; + @apply border-graphite-700/80 bg-graphite-700 text-mist-100; + @apply th-dark:border-white/80 th-dark:bg-mist-100 th-dark:text-graphite-700; + @apply th-highcontrast:border-white/80 th-highcontrast:bg-mist-100 th-highcontrast:text-graphite-700; } .nav-pills > li.active > a, .nav-pills > li.active > a:hover, .nav-pills > li.active > a:focus { - @apply bg-blue-8; + @apply bg-graphite-700 text-mist-100; + @apply th-dark:bg-mist-100 th-dark:text-graphite-700; + @apply th-highcontrast:bg-mist-100 th-highcontrast:text-graphite-700; } /* Button Secondary */ .btn.btn-secondary { @apply border border-solid; - @apply border-blue-8 bg-blue-2 text-blue-9; - @apply hover:bg-blue-3; + @apply border-graphite-700 bg-mist-100 text-graphite-700; + @apply hover:border-graphite-700 hover:bg-graphite-700/10 hover:text-graphite-700; + @apply focus:border-blue-5 focus:shadow-graphite-700/20 focus:text-graphite-700; - @apply th-dark:border-blue-7 th-dark:bg-gray-10 th-dark:text-blue-3; - @apply th-dark:hover:bg-blue-11; + @apply th-dark:border-mist-100 th-dark:bg-graphite-700 th-dark:text-mist-100; + @apply th-dark:hover:border-mist-100 th-dark:hover:bg-mist-100/20 th-dark:hover:text-mist-100; + @apply th-dark:focus:border-blue-5 th-dark:focus:shadow-white/80 th-dark:focus:text-mist-100; + + @apply th-highcontrast:border-mist-100 th-highcontrast:bg-graphite-700 th-highcontrast:text-mist-100; + @apply th-highcontrast:hover:border-mist-100 th-highcontrast:hover:bg-mist-100/20 th-highcontrast:hover:text-mist-100; + @apply th-highcontrast:focus:border-blue-5 th-highcontrast:focus:shadow-white/80 th-highcontrast:focus:text-mist-100; +} + +.btn.btn-secondary:active, +.btn.btn-secondary.active, +.open > .dropdown-toggle.btn-secondary { + @apply border-graphite-700 bg-graphite-700/10 text-graphite-700; + @apply th-dark:border-mist-100 th-dark:bg-mist-100/20 th-dark:text-mist-100; + @apply th-highcontrast:border-mist-100 th-highcontrast:bg-mist-100/20 th-highcontrast:text-mist-100; } .btn.btn-danger { @apply border-error-8 bg-error-8; @apply hover:border-error-7 hover:bg-error-7 hover:text-white; + @apply focus:border-blue-5 focus:shadow-error-8/20 focus:text-white; + @apply th-dark:focus:border-blue-5 th-dark:focus:shadow-white/80 th-dark:focus:text-white; + @apply th-highcontrast:focus:border-blue-5 th-highcontrast:focus:shadow-white/80 th-highcontrast:focus:text-white; } .btn.btn-danger:active, .btn.btn-danger.active, .open > .dropdown-toggle.btn-danger { - @apply border-blue-5 bg-error-8 text-white; + @apply border-error-5 bg-error-8 text-white; } .btn.btn-dangerlight { @@ -70,6 +110,13 @@ fieldset[disabled] .btn { @apply hover:bg-error-2 th-dark:hover:bg-error-11; @apply border-error-5 th-highcontrast:border-error-7 th-dark:border-error-7; @apply border border-solid; + + @apply focus:border-blue-5 focus:shadow-error-8/20 focus:text-error-9; + @apply th-dark:focus:border-blue-5 th-dark:focus:shadow-white/80 th-dark:focus:text-white; + @apply th-highcontrast:focus:border-blue-5 th-highcontrast:focus:shadow-white/80; +} +.btn.btn-icon.btn-dangerlight { + @apply hover:text-error-11 th-dark:hover:text-error-7; } .btn.btn-success { @@ -83,15 +130,18 @@ fieldset[disabled] .btn { /* secondary-grey */ .btn.btn-default, .btn.btn-light { - @apply border-gray-5 bg-white text-gray-9; + @apply border-gray-5 bg-white text-gray-8; @apply hover:border-gray-5 hover:bg-gray-3 hover:text-gray-10; + @apply focus:border-blue-5 focus:shadow-graphite-700/20 focus:text-gray-8; /* dark mode */ @apply th-dark:border-gray-warm-7 th-dark:bg-gray-iron-10 th-dark:text-gray-warm-4; @apply th-dark:hover:border-gray-6 th-dark:hover:bg-gray-iron-9 th-dark:hover:text-gray-warm-4; + @apply th-dark:focus:border-blue-5 th-dark:focus:shadow-white/80 th-dark:focus:text-gray-warm-4; @apply th-highcontrast:border-gray-2 th-highcontrast:bg-black th-highcontrast:text-white; @apply th-highcontrast:hover:border-gray-6 th-highcontrast:hover:bg-gray-9 th-highcontrast:hover:text-gray-warm-4; + @apply th-highcontrast:focus:border-blue-5 th-highcontrast:focus:shadow-white/80 th-highcontrast:focus:text-white; } .btn.btn-light:active, @@ -112,38 +162,17 @@ fieldset[disabled] .btn { .input-group-btn .btn.active, .btn-group .btn.active { - @apply border-blue-5 bg-blue-2 text-blue-10; - @apply th-dark:border-blue-9 th-dark:bg-blue-11 th-dark:text-blue-2; + @apply border-graphite-700/80 bg-graphite-700 text-mist-100; + @apply th-dark:border-white/80 th-dark:bg-mist-100 th-dark:text-graphite-700; + @apply th-highcontrast:border-white/80 th-highcontrast:bg-mist-100 th-highcontrast:text-graphite-700; } -/* focus */ - -.btn.btn-primary:focus, -.btn.btn-secondary:focus, -.btn.btn-light:focus { - @apply border-blue-5; +.btn.btn-icon:focus { + box-shadow: none !important; } -.btn.btn-danger:focus, -.btn.btn-dangerlight:focus { - @apply border-blue-6; -} - -.btn.btn-primary:focus, -.btn.btn-secondary:focus, -.btn.btn-light:focus, -.btn.btn-danger:focus, -.btn.btn-dangerlight:focus { - --btn-focus-color: var(--ui-blue-3); - box-shadow: 0px 0px 0px 4px var(--btn-focus-color); -} - -[theme='dark'] .btn.btn-primary:focus, -[theme='dark'] .btn.btn-secondary:focus, -[theme='dark'] .btn.btn-light:focus, -[theme='dark'] .btn.btn-danger:focus, -[theme='dark'] .btn.btn-dangerlight:focus { - --btn-focus-color: var(--ui-blue-11); +.btn:focus { + box-shadow: 0px 0px 0px 2px var(--tw-shadow-color); } a.no-link, diff --git a/app/assets/css/colors.json b/app/assets/css/colors.json index 55f2922e5..94d3c2015 100644 --- a/app/assets/css/colors.json +++ b/app/assets/css/colors.json @@ -1,6 +1,31 @@ { "black": "#000000", "white": "#ffffff", + "graphite": { + "10": "#f5f5f6", + "50": "#e5e6e8", + "100": "#ced0d3", + "200": "#abafb5", + "300": "#7b8089", + "400": "#5c6066", + "500": "#484a4e", + "600": "#3a3b3f", + "700": "#2e2f33", + "800": "#222326", + "900": "#161719" + }, + "mist": { + "50": "#fcfbfa", + "100": "#f7f6f3", + "200": "#f0f0ec", + "300": "#e8e7e2", + "400": "#e2e1db", + "500": "#d9d8d2", + "600": "#ceccc4", + "700": "#bebcb4", + "800": "#a7a6a0", + "900": "#8b8983" + }, "gray": { "1": "#fcfcfd", "2": "#f9fafb", diff --git a/app/assets/css/react-datetime-picker-override.css b/app/assets/css/react-datetime-picker-override.css index acd26fb58..dbbea4766 100644 --- a/app/assets/css/react-datetime-picker-override.css +++ b/app/assets/css/react-datetime-picker-override.css @@ -12,35 +12,40 @@ /* Extending Calendar.css from react-daterange-picker__calendar */ -.react-daterange-picker__calendar .react-calendar { +.react-calendar { background: var(--bg-calendar-color); color: var(--text-main-color); + @apply th-dark:bg-gray-iron-10; } /* calendar nav buttons */ -.react-daterange-picker__calendar .react-calendar__navigation button:disabled { +.react-calendar__navigation button:disabled { background: var(--bg-calendar-color); @apply opacity-60; @apply brightness-95 th-dark:brightness-110; + @apply th-dark:bg-gray-iron-7; } -.react-daterange-picker__calendar .react-calendar__navigation button:enabled:hover, -.react-daterange-picker__calendar .react-calendar__navigation button:enabled:focus { +.react-calendar__navigation button:enabled:hover, +.react-calendar__navigation button:enabled:focus { background: var(--bg-daterangepicker-color); + @apply th-dark:bg-gray-iron-7; } /* date tile */ -.react-daterange-picker__calendar .react-calendar__tile:disabled { - background: var(--bg-calendar-color); +.react-calendar__tile:disabled { @apply opacity-60; @apply brightness-95 th-dark:brightness-110; + @apply th-dark:bg-gray-iron-7; } -.react-daterange-picker__calendar .react-calendar__tile:enabled:hover, -.react-daterange-picker__calendar .react-calendar__tile:enabled:focus { + +.react-calendar__tile:enabled:hover, +.react-calendar__tile:enabled:focus { background: var(--bg-daterangepicker-hover); + @apply th-dark:bg-gray-iron-7; } /* today's date tile */ -.react-daterange-picker__calendar .react-calendar__tile--now { +.react-calendar__tile--now { @apply th-highcontrast:text-[color:var(--bg-calendar-color)] th-dark:text-[color:var(--bg-calendar-color)]; border-radius: 0.25rem !important; } @@ -48,23 +53,27 @@ .react-daterange-picker__calendar .react-calendar__tile--now:enabled:focus { background: var(--bg-daterangepicker-hover); color: var(--text-daterangepicker-hover); + @apply th-dark:bg-gray-iron-7; } /* probably date tile in range */ -.react-daterange-picker__calendar .react-calendar__tile--hasActive { +.react-calendar__tile--hasActive { background: var(--bg-daterangepicker-end-date); color: var(--text-daterangepicker-end-date); + @apply th-dark:bg-gray-iron-7; } -.react-daterange-picker__calendar .react-calendar__tile--hasActive:enabled:hover, -.react-daterange-picker__calendar .react-calendar__tile--hasActive:enabled:focus { +.react-calendar__tile--hasActive:enabled:hover, +.react-calendar__tile--hasActive:enabled:focus { background: var(--bg-daterangepicker-hover); color: var(--text-daterangepicker-hover); + @apply th-dark:bg-gray-iron-7; } -.react-daterange-picker__calendar .react-calendar__tile--active:enabled:hover, -.react-daterange-picker__calendar .react-calendar__tile--active:enabled:focus { +.react-calendar__tile--active:enabled:hover, +.react-calendar__tile--active:enabled:focus { background: var(--bg-daterangepicker-hover); color: var(--text-daterangepicker-hover); + @apply th-dark:bg-gray-iron-7; } .react-daterange-picker__calendar @@ -75,9 +84,10 @@ } /* on range select hover */ -.react-daterange-picker__calendar .react-calendar--selectRange .react-calendar__tile--hover { +.react-calendar--selectRange .react-calendar__tile--hover { background: var(--bg-daterangepicker-in-range); color: var(--text-daterangepicker-in-range); + @apply th-dark:bg-gray-iron-7; } /* @@ -111,4 +121,5 @@ .react-calendar__tile--active.react-calendar__month-view__days__day--weekend { color: var(--text-daterangepicker-active); + @apply th-dark:bg-gray-iron-7; } diff --git a/app/assets/css/theme.css b/app/assets/css/theme.css index f104f3e7b..318e0d9e4 100644 --- a/app/assets/css/theme.css +++ b/app/assets/css/theme.css @@ -3,6 +3,16 @@ --black-color: var(--ui-black); --white-color: var(--ui-white); + --graphite-600: #3a3b3f; + --graphite-700: #2e2f33; + --graphite-800: #222326; + --graphite-900: #161719; + + --mist-50: #fcfbfa; + --mist-100: #f7f6f3; + --mist-200: #f0f0ec; + --mist-300: #e8e7e2; + --grey-1: #212121; --grey-2: #181818; --grey-3: #383838; @@ -58,6 +68,8 @@ --grey-58: #ebf4f8; --grey-59: #e6e6e6; --grey-61: rgb(231, 231, 231); + --grey-62: #fdfdfd; + --grey-63: #121212; --blue-1: #219; --blue-2: #337ab7; @@ -99,17 +111,16 @@ /* Default Theme */ --bg-card-color: var(--white-color); --bg-main-color: var(--white-color); - --bg-body-color: var(--grey-9); + --bg-body-color: var(--grey-62); --bg-checkbox-border-color: var(--grey-49); - --bg-sidebar-color: var(--ui-blue-10); - --bg-sidebar-nav-color: var(--ui-blue-11); + --bg-sidebar-color: var(--graphite-700); + --bg-sidebar-nav-color: var(--graphite-600); --bg-widget-color: var(--white-color); --bg-widget-header-color: var(--grey-10); --bg-widget-table-color: var(--ui-gray-3); --bg-header-color: var(--white-color); --bg-hover-table-color: var(--grey-14); --bg-input-group-addon-color: var(--ui-gray-3); - --bg-btn-default-color: var(--ui-blue-10); --bg-blocklist-hover-color: var(--ui-blue-2); --bg-table-color: var(--white-color); --bg-md-checkbox-color: var(--grey-12); @@ -128,7 +139,8 @@ --border-pagination-color: var(--ui-white); --bg-pagination-span-color: var(--white-color); --bg-pagination-hover-color: var(--ui-blue-3); - --bg-motd-body-color: var(--grey-20); + --bg-motd-body-color: var(--mist-50); + --bg-motd-btn-color: var(--graphite-700); --bg-item-highlighted-color: var(--grey-21); --bg-item-highlighted-null-color: var(--grey-14); --bg-panel-body-color: var(--white-color); @@ -144,8 +156,6 @@ --bg-daterangepicker-in-range: var(--grey-58); --bg-daterangepicker-active: var(--blue-14); --bg-input-autofill-color: var(--bg-inputbox); - --bg-btn-default-hover-color: var(--ui-blue-9); - --bg-btn-focus: var(--grey-59); --bg-small-select-color: var(--white-color); --bg-stepper-item-active: var(--white-color); --bg-stepper-item-counter: var(--grey-61); @@ -177,7 +187,6 @@ --text-navtabs-color: var(--grey-7); --text-navtabs-hover-color: var(--grey-6); --text-nav-tab-active-color: var(--grey-25); - --text-dropdown-menu-color: var(--grey-6); --text-log-viewer-color: var(--black-color); --text-json-tree-color: var(--blue-3); @@ -189,6 +198,8 @@ --text-pagination-color: var(--grey-26); --text-pagination-span-color: var(--grey-3); --text-pagination-span-hover-color: var(--grey-3); + --text-motd-body-color: var(--black-color); + --text-motd-btn-color: var(--mist-100); --text-summary-color: var(--black-color); --text-tooltip-color: var(--white-color); --text-rzslider-color: var(--grey-36); @@ -203,6 +214,7 @@ --text-button-group-color: var(--ui-gray-9); --text-button-dangerlight-color: var(--ui-error-5); --text-stepper-active-color: var(--ui-blue-8); + --border-color: var(--grey-42); --border-widget-color: var(--grey-43); --border-sidebar-color: var(--ui-blue-9); @@ -218,7 +230,8 @@ --border-pre-color: var(--grey-43); --border-pagination-span-color: var(--ui-white); --border-pagination-hover-color: var(--ui-white); - --border-panel-color: var(--white-color); + --border-motd-body-color: var(--mist-300); + --border-panel-color: var(--mist-300); --border-input-sm-color: var(--grey-47); --border-daterangepicker-color: var(--grey-19); --border-calendar-table: var(--white-color); @@ -265,17 +278,14 @@ --text-log-viewer-color-json-red: var(--text-log-viewer-color); --text-log-viewer-color-json-blue: var(--text-log-viewer-color); - --bg-body-color: var(--grey-2); - --bg-btn-default-color: var(--grey-3); + --bg-body-color: var(--grey-63); --bg-blocklist-hover-color: var(--ui-gray-iron-10); - --bg-blocklist-item-selected-color: var(--grey-3); + --bg-blocklist-item-selected-color: var(--ui-gray-iron-10); --bg-card-color: var(--grey-1); --bg-checkbox-border-color: var(--grey-8); --bg-code-color: var(--grey-2); --bg-dropdown-menu-color: var(--ui-gray-warm-8); --bg-main-color: var(--grey-2); - --bg-sidebar-color: var(--grey-1); - --bg-sidebar-nav-color: var(--grey-2); --bg-widget-color: var(--grey-1); --bg-widget-header-color: var(--grey-3); --bg-widget-table-color: var(--grey-3); @@ -296,7 +306,8 @@ --bg-pagination-color: var(--grey-3); --bg-pagination-span-color: var(--grey-1); --bg-pagination-hover-color: var(--grey-3); - --bg-motd-body-color: var(--grey-1); + --bg-motd-body-color: var(--graphite-800); + --bg-motd-btn-color: var(--mist-100); --bg-item-highlighted-color: var(--grey-2); --bg-item-highlighted-null-color: var(--grey-2); --bg-panel-body-color: var(--grey-1); @@ -316,8 +327,6 @@ --bg-daterangepicker-in-range: var(--ui-gray-warm-11); --bg-daterangepicker-active: var(--blue-14); --bg-input-autofill-color: var(--bg-inputbox); - --bg-btn-default-hover-color: var(--grey-4); - --bg-btn-focus: var(--grey-3); --bg-small-select-color: var(--grey-2); --bg-stepper-item-active: var(--grey-1); --bg-stepper-item-counter: var(--grey-7); @@ -348,7 +357,6 @@ --text-navtabs-color: var(--grey-8); --text-navtabs-hover-color: var(--grey-9); --text-nav-tab-active-color: var(--white-color); - --text-dropdown-menu-color: var(--white-color); --text-log-viewer-color: var(--white-color); --text-json-tree-color: var(--grey-40); @@ -360,6 +368,8 @@ --text-pagination-color: var(--white-color); --text-pagination-span-color: var(--ui-white); --text-pagination-span-hover-color: var(--ui-white); + --text-motd-body-color: var(--mist-100); + --text-motd-btn-color: var(--graphite-700); --text-summary-color: var(--white-color); --text-tooltip-color: var(--white-color); --text-rzslider-color: var(--white-color); @@ -374,6 +384,7 @@ --text-button-group-color: var(--ui-white); --text-button-dangerlight-color: var(--ui-error-7); --text-stepper-active-color: var(--ui-white); + --border-color: var(--grey-3); --border-widget-color: var(--grey-1); --border-sidebar-color: var(--ui-gray-8); @@ -388,9 +399,10 @@ --border-navtabs-color: var(--grey-38); --border-pre-color: var(--grey-3); --border-blocklist: var(--ui-gray-9); - --border-blocklist-item-selected-color: var(--grey-38); + --border-blocklist-item-selected-color: var(--grey-31); --border-pagination-span-color: var(--grey-1); --border-pagination-hover-color: var(--grey-3); + --border-motd-body-color: var(--graphite-800); --border-panel-color: var(--grey-2); --border-input-sm-color: var(--grey-3); --border-daterangepicker-color: var(--grey-3); @@ -450,6 +462,7 @@ --bg-panel-body-color: var(--black-color); --bg-dropdown-menu-color: var(--ui-gray-warm-8); --bg-motd-body-color: var(--black-color); + --bg-motd-btn-color: var(--white-color); --bg-blocklist-hover-color: var(--black-color); --bg-blocklist-item-selected-color: var(--black-color); --bg-input-group-addon-color: var(--grey-3); @@ -481,11 +494,8 @@ --bg-navtabs-hover-color: var(--grey-3); --bg-nav-tab-active-color: var(--ui-black); - --bg-btn-default-color: var(--black-color); --bg-input-autofill-color: var(--bg-inputbox); --bg-code-color: var(--ui-black); - --bg-btn-default-hover-color: var(--grey-4); - --bg-btn-focus: var(--black-color); --bg-small-select-color: var(--black-color); --bg-stepper-item-active: var(--black-color); --bg-stepper-item-counter: var(--grey-3); @@ -523,6 +533,8 @@ --text-daterangepicker-end-date: var(--ui-white); --text-daterangepicker-in-range: var(--white-color); --text-daterangepicker-active: var(--white-color); + --text-motd-body-color: var(--white-color); + --text-motd-btn-color: var(--black-color); --text-json-tree-color: var(--white-color); --text-json-tree-leaf-color: var(--white-color); --text-json-tree-branch-preview-color: var(--white-color); @@ -553,6 +565,7 @@ --border-input-sm-color: var(--white-color); --border-pagination-color: var(--grey-1); --border-pagination-span-color: var(--grey-1); + --border-motd-body-color: var(--white-color); --border-daterangepicker-color: var(--white-color); --border-calendar-table: var(--black-color); --border-daterangepicker: var(--black-color); diff --git a/app/assets/css/vendor-override.css b/app/assets/css/vendor-override.css index 74fa94d4e..12e0fc947 100644 --- a/app/assets/css/vendor-override.css +++ b/app/assets/css/vendor-override.css @@ -201,8 +201,18 @@ pre { background-color: var(--bg-progress-color); } -.motd-body { - background-color: var(--bg-motd-body-color) !important; +.widget-body.motd-body { + border: 1px solid var(--border-motd-body-color); + color: var(--text-motd-body-color); + background: var(--bg-motd-body-color) url(../images/purple-gradient.svg) top right / 40% no-repeat; +} + +.widget-body.motd-body .btn.btn-link, +.widget-body.motd-body .btn.btn-link:hover { + padding: 0 5px 0 4px; + border-radius: 4px; + background-color: var(--bg-motd-btn-color); + color: var(--text-motd-btn-color); } .panel-body { @@ -408,14 +418,10 @@ input:-webkit-autofill { } .sidebar.tippy-box[data-placement^='right'] > .tippy-arrow:before { - border-right: 8px solid var(--ui-blue-9); + border-right: 8px solid var(--graphite-600); border-width: 6px 8px 6px 0; } -[theme='dark'] .sidebar.tippy-box[data-placement^='right'] > .tippy-arrow:before { - border-right: 8px solid var(--ui-gray-true-9); -} - [theme='highcontrast'] .sidebar.tippy-box[data-placement^='right'] > .tippy-arrow:before { border-right: 8px solid var(--ui-white); } diff --git a/app/assets/ico/android-chrome-192x192.png b/app/assets/ico/android-chrome-192x192.png index 8f31e405a..236db0e2b 100644 Binary files a/app/assets/ico/android-chrome-192x192.png and b/app/assets/ico/android-chrome-192x192.png differ diff --git a/app/assets/ico/android-chrome-256x256.png b/app/assets/ico/android-chrome-256x256.png index cc95d0044..52848e019 100644 Binary files a/app/assets/ico/android-chrome-256x256.png and b/app/assets/ico/android-chrome-256x256.png differ diff --git a/app/assets/ico/apple-touch-icon.png b/app/assets/ico/apple-touch-icon.png index aeea31ce8..f05e9c161 100644 Binary files a/app/assets/ico/apple-touch-icon.png and b/app/assets/ico/apple-touch-icon.png differ diff --git a/app/assets/ico/favicon-16x16.png b/app/assets/ico/favicon-16x16.png index f7a26b564..8c60e5d9f 100644 Binary files a/app/assets/ico/favicon-16x16.png and b/app/assets/ico/favicon-16x16.png differ diff --git a/app/assets/ico/favicon-32x32.png b/app/assets/ico/favicon-32x32.png index d1ccc9cea..8735718a2 100644 Binary files a/app/assets/ico/favicon-32x32.png and b/app/assets/ico/favicon-32x32.png differ diff --git a/app/assets/ico/favicon.ico b/app/assets/ico/favicon.ico index 28ed661f9..066969400 100644 Binary files a/app/assets/ico/favicon.ico and b/app/assets/ico/favicon.ico differ diff --git a/app/assets/ico/logomark.svg b/app/assets/ico/logomark.svg index b7679d482..140c1b494 100644 --- a/app/assets/ico/logomark.svg +++ b/app/assets/ico/logomark.svg @@ -1,35 +1,12 @@ - - - - - - - - - - - - - - - - - + + + + + - - - - - - - - - - - - - - + + + diff --git a/app/assets/ico/mstile-150x150.png b/app/assets/ico/mstile-150x150.png index 5e7eb6873..f48374538 100644 Binary files a/app/assets/ico/mstile-150x150.png and b/app/assets/ico/mstile-150x150.png differ diff --git a/app/assets/ico/safari-pinned-tab.svg b/app/assets/ico/safari-pinned-tab.svg index 79ce7b6fa..d0509a572 100644 --- a/app/assets/ico/safari-pinned-tab.svg +++ b/app/assets/ico/safari-pinned-tab.svg @@ -1 +1,6 @@ - \ No newline at end of file + + + + + + \ No newline at end of file diff --git a/app/assets/images/logo.png b/app/assets/images/logo.png deleted file mode 100644 index 2e46594f2..000000000 Binary files a/app/assets/images/logo.png and /dev/null differ diff --git a/app/assets/images/logo_alt.png b/app/assets/images/logo_alt.png deleted file mode 100644 index a6c6707ca..000000000 Binary files a/app/assets/images/logo_alt.png and /dev/null differ diff --git a/app/assets/images/logo_alt.svg b/app/assets/images/logo_alt.svg index 90e164ca1..8d254e4e5 100644 --- a/app/assets/images/logo_alt.svg +++ b/app/assets/images/logo_alt.svg @@ -1,60 +1,14 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + diff --git a/app/assets/images/logo_alt_black.svg b/app/assets/images/logo_alt_black.svg new file mode 100644 index 000000000..d9243b464 --- /dev/null +++ b/app/assets/images/logo_alt_black.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/app/assets/images/logo_ico.png b/app/assets/images/logo_ico.png deleted file mode 100644 index b4bfd2924..000000000 Binary files a/app/assets/images/logo_ico.png and /dev/null differ diff --git a/app/assets/images/logo_small.png b/app/assets/images/logo_small.png deleted file mode 100644 index 76d3a46b0..000000000 Binary files a/app/assets/images/logo_small.png and /dev/null differ diff --git a/app/assets/images/logo_small_alt.png b/app/assets/images/logo_small_alt.png deleted file mode 100644 index a5bc64771..000000000 Binary files a/app/assets/images/logo_small_alt.png and /dev/null differ diff --git a/app/assets/images/portainer-github-banner.png b/app/assets/images/portainer-github-banner.png index 08776d3e3..75871e3c5 100644 Binary files a/app/assets/images/portainer-github-banner.png and b/app/assets/images/portainer-github-banner.png differ diff --git a/app/assets/images/purple-gradient.svg b/app/assets/images/purple-gradient.svg new file mode 100644 index 000000000..0b3bc7160 --- /dev/null +++ b/app/assets/images/purple-gradient.svg @@ -0,0 +1,522 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/constants.ts b/app/constants.ts index 8a8e687d2..e61172d27 100644 --- a/app/constants.ts +++ b/app/constants.ts @@ -5,7 +5,6 @@ export const API_ENDPOINT_CUSTOM_TEMPLATES = 'api/custom_templates'; export const API_ENDPOINT_EDGE_GROUPS = 'api/edge_groups'; export const API_ENDPOINT_EDGE_JOBS = 'api/edge_jobs'; export const API_ENDPOINT_EDGE_STACKS = 'api/edge_stacks'; -export const API_ENDPOINT_EDGE_TEMPLATES = 'api/edge_templates'; export const API_ENDPOINT_ENDPOINTS = 'api/endpoints'; export const API_ENDPOINT_ENDPOINT_GROUPS = 'api/endpoint_groups'; export const API_ENDPOINT_KUBERNETES = 'api/kubernetes'; diff --git a/app/docker/helpers/imageHelper.js b/app/docker/helpers/imageHelper.js index 0bacd6c53..3ce3d4a45 100644 --- a/app/docker/helpers/imageHelper.js +++ b/app/docker/helpers/imageHelper.js @@ -1,4 +1,4 @@ -import { buildImageFullURIFromModel, imageContainsURL } from '@/react/docker/images/utils'; +import { buildImageFullURIFromModel, imageContainsURL, fullURIIntoRepoAndTag } from '@/react/docker/images/utils'; angular.module('portainer.docker').factory('ImageHelper', ImageHelperFactory); function ImageHelperFactory() { @@ -18,8 +18,12 @@ function ImageHelperFactory() { * @param {PorImageRegistryModel} registry */ function createImageConfigForContainer(imageModel) { + const fromImage = buildImageFullURIFromModel(imageModel); + const { tag, repo } = fullURIIntoRepoAndTag(fromImage); return { - fromImage: buildImageFullURIFromModel(imageModel), + fromImage, + tag, + repo, }; } diff --git a/app/docker/views/configs/create/createconfig.html b/app/docker/views/configs/create/createconfig.html index 945330c06..6ac0bef06 100644 --- a/app/docker/views/configs/create/createconfig.html +++ b/app/docker/views/configs/create/createconfig.html @@ -18,7 +18,7 @@
{ const gpuOptions = _.find($scope.container.HostConfig.DeviceRequests, function (o) { - return o.Driver === 'nvidia' || o.Capabilities[0][0] === 'gpu'; + return o.Driver === 'nvidia' || (o.Capabilities && o.Capabilities.length > 0 && o.Capabilities[0] > 0 && o.Capabilities[0][0] === 'gpu'); }); if (!gpuOptions) { return 'No GPU config found'; @@ -207,9 +207,9 @@ angular.module('portainer.docker').controller('ContainerController', [ async function commitContainerAsync() { $scope.config.commitInProgress = true; const registryModel = $scope.config.RegistryModel; - const imageConfig = ImageHelper.createImageConfigForContainer(registryModel); + const { repo, tag } = ImageHelper.createImageConfigForContainer(registryModel); try { - await commitContainer(endpoint.Id, { container: $transition$.params().id, repo: imageConfig.fromImage }); + await commitContainer(endpoint.Id, { container: $transition$.params().id, repo, tag }); Notifications.success('Image created', $transition$.params().id); $state.reload(); } catch (err) { diff --git a/app/docker/views/docker-features-configuration/docker-features-configuration.html b/app/docker/views/docker-features-configuration/docker-features-configuration.html index dab4efa2d..8edd5843f 100644 --- a/app/docker/views/docker-features-configuration/docker-features-configuration.html +++ b/app/docker/views/docker-features-configuration/docker-features-configuration.html @@ -67,7 +67,7 @@ @@ -114,7 +114,7 @@ @@ -125,7 +125,7 @@ @@ -136,7 +136,7 @@ @@ -146,7 +146,7 @@
- Note: The recreate/duplicate/edit feature is currently disabled (for non-admin users) by one or more security settings. + Note: The recreate/duplicate/edit feature is currently hidden (for non-admin users) by one or more security settings.
diff --git a/app/docker/views/images/build/buildimage.html b/app/docker/views/images/build/buildimage.html index 6d5446944..20c5773dc 100644 --- a/app/docker/views/images/build/buildimage.html +++ b/app/docker/views/images/build/buildimage.html @@ -94,7 +94,7 @@
diff --git a/app/docker/views/images/edit/image.html b/app/docker/views/images/edit/image.html index 7bee83a2b..af37cd0e2 100644 --- a/app/docker/views/images/edit/image.html +++ b/app/docker/views/images/edit/image.html @@ -16,19 +16,19 @@ - + - + - + diff --git a/app/docker/views/images/edit/imageController.js b/app/docker/views/images/edit/imageController.js index 78ae6107c..8ed5c9495 100644 --- a/app/docker/views/images/edit/imageController.js +++ b/app/docker/views/images/edit/imageController.js @@ -2,7 +2,6 @@ import _ from 'lodash-es'; import { PorImageRegistryModel } from 'Docker/models/porImageRegistry'; import { confirmImageExport } from '@/react/docker/images/common/ConfirmExportModal'; import { confirmDelete } from '@@/modals/confirm'; -import { fullURIIntoRepoAndTag } from '@/react/docker/images/utils'; angular.module('portainer.docker').controller('ImageController', [ '$async', @@ -71,8 +70,7 @@ angular.module('portainer.docker').controller('ImageController', [ $scope.tagImage = function () { const registryModel = $scope.formValues.RegistryModel; - const image = ImageHelper.createImageConfigForContainer(registryModel); - const { repo, tag } = fullURIIntoRepoAndTag(image.fromImage); + const { repo, tag } = ImageHelper.createImageConfigForContainer(registryModel); ImageService.tagImage($transition$.params().id, repo, tag) .then(function success() { diff --git a/app/docker/views/images/import/importImageController.js b/app/docker/views/images/import/importImageController.js index dfdb8ab1a..d5587ecb9 100644 --- a/app/docker/views/images/import/importImageController.js +++ b/app/docker/views/images/import/importImageController.js @@ -1,5 +1,4 @@ import { PorImageRegistryModel } from 'Docker/models/porImageRegistry'; -import { fullURIIntoRepoAndTag } from '@/react/docker/images/utils'; angular.module('portainer.docker').controller('ImportImageController', [ '$scope', @@ -34,8 +33,7 @@ angular.module('portainer.docker').controller('ImportImageController', [ async function tagImage(id) { const registryModel = $scope.formValues.RegistryModel; if (registryModel.Image) { - const image = ImageHelper.createImageConfigForContainer(registryModel); - const { repo, tag } = fullURIIntoRepoAndTag(image.fromImage); + const { repo, tag } = ImageHelper.createImageConfigForContainer(registryModel); try { await ImageService.tagImage(id, repo, tag); } catch (err) { diff --git a/app/docker/views/services/edit/service.html b/app/docker/views/services/edit/service.html index fc49a3e13..406c487ca 100644 --- a/app/docker/views/services/edit/service.html +++ b/app/docker/views/services/edit/service.html @@ -1,274 +1,281 @@ -
-
- -
- - - - - - + + + + + +
+
+
+

Container specification

+
+
+
+
+
+
+ +
+
+
+

Networks & ports

+
+ + + +
+
+
+ +
+
+
+

Service specification

+
+
+
+
+
+
+
+
+
+
+
+ +
- - - - - - -
-
-
-

Container specification

-
-
-
-
-
-
-
- -
-
-
-

Networks & ports

-
- - - -
-
-
- -
-
-
-

Service specification

-
-
-
-
-
-
-
-
-
-
-
- -
diff --git a/app/docker/views/services/edit/serviceController.js b/app/docker/views/services/edit/serviceController.js index 69474b398..3344ef2ab 100644 --- a/app/docker/views/services/edit/serviceController.js +++ b/app/docker/views/services/edit/serviceController.js @@ -731,6 +731,7 @@ angular.module('portainer.docker').controller('ServiceController', [ }; function initView() { + $scope.isLoading = true; var apiVersion = $scope.applicationState.endpoint.apiVersion; var agentProxy = $scope.applicationState.endpoint.mode.agentProxy; @@ -855,6 +856,9 @@ angular.module('portainer.docker').controller('ServiceController', [ $scope.secrets = []; $scope.configs = []; Notifications.error('Failure', err, 'Unable to retrieve service details'); + }) + .finally(() => { + $scope.isLoading = false; }); } diff --git a/app/edge/react/components/index.ts b/app/edge/react/components/index.ts index b4913e51c..1902af60a 100644 --- a/app/edge/react/components/index.ts +++ b/app/edge/react/components/index.ts @@ -8,6 +8,7 @@ import { EdgeAsyncIntervalsForm } from '@/react/edge/components/EdgeAsyncInterva import { EdgeCheckinIntervalField } from '@/react/edge/components/EdgeCheckInIntervalField'; import { EdgeScriptForm } from '@/react/edge/components/EdgeScriptForm'; import { EdgeGroupsSelector } from '@/react/edge/edge-stacks/components/EdgeGroupsSelector'; +import { AssociatedEdgeGroupEnvironmentsSelector } from '@/react/edge/components/AssociatedEdgeGroupEnvironmentsSelector'; const ngModule = angular .module('portainer.edge.react.components', []) @@ -61,6 +62,15 @@ const ngModule = angular 'value', 'error', ]) + ) + .component( + 'associatedEdgeGroupEnvironmentsSelector', + r2a(withReactQuery(AssociatedEdgeGroupEnvironmentsSelector), [ + 'onChange', + 'value', + 'error', + 'edgeGroupId', + ]) ); export const componentsModule = ngModule.name; diff --git a/app/index.html b/app/index.html index 370070b48..3045fe547 100644 --- a/app/index.html +++ b/app/index.html @@ -20,7 +20,7 @@ - + @@ -31,8 +31,8 @@
@@ -47,7 +47,10 @@
- +
+ + +
diff --git a/app/kubernetes/__module.js b/app/kubernetes/__module.js index 98607e569..5fb223a40 100644 --- a/app/kubernetes/__module.js +++ b/app/kubernetes/__module.js @@ -83,6 +83,13 @@ angular.module('portainer.kubernetes', ['portainer.app', registriesModule, custo }); } + // EE-5842: do not redirect shell views when the env is removed + const nextTransition = $state.transition && $state.transition.to(); + const nextTransitionName = nextTransition ? nextTransition.name : ''; + if (nextTransitionName === 'kubernetes.kubectlshell' && !endpoint) { + return; + } + const kubeTypes = [ PortainerEndpointTypes.KubernetesLocalEnvironment, PortainerEndpointTypes.AgentOnKubernetesEnvironment, @@ -120,6 +127,11 @@ angular.module('portainer.kubernetes', ['portainer.app', registriesModule, custo EndpointProvider.clean(); Notifications.error('Failed loading environment', e); } + // Prevent redirect to home for shell views when environment is unreachable + // Show toast error instead (handled above in Notifications.error) + if (nextTransitionName === 'kubernetes.kubectlshell') { + return; + } $state.go('portainer.home', params, { reload: true, inherit: false }); return false; } @@ -145,7 +157,7 @@ angular.module('portainer.kubernetes', ['portainer.app', registriesModule, custo const helmApplication = { name: 'kubernetes.helm', - url: '/helm/:namespace/:name', + url: '/helm/:namespace/:name?revision&tab', views: { 'content@': { component: 'kubernetesHelmApplicationView', @@ -424,6 +436,17 @@ angular.module('portainer.kubernetes', ['portainer.app', registriesModule, custo }, }; + const kubectlShell = { + name: 'kubernetes.kubectlshell', + url: '/kubectl-shell', + views: { + 'content@': { + component: 'kubectlShellView', + }, + 'sidebar@': {}, + }, + }; + const dashboard = { name: 'kubernetes.dashboard', url: '/dashboard', @@ -657,6 +680,7 @@ angular.module('portainer.kubernetes', ['portainer.app', registriesModule, custo $stateRegistryProvider.register(deploy); $stateRegistryProvider.register(node); $stateRegistryProvider.register(nodeStats); + $stateRegistryProvider.register(kubectlShell); $stateRegistryProvider.register(resourcePools); $stateRegistryProvider.register(namespaceCreation); $stateRegistryProvider.register(resourcePool); diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.css b/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.css deleted file mode 100644 index a618dc68b..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.css +++ /dev/null @@ -1,9 +0,0 @@ -.helm-template-item-details { - display: flex; - justify-content: space-between; - flex-wrap: wrap; -} - -.helm-template-item-details .helm-template-item-details-sub { - width: 100%; -} diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.html b/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.html deleted file mode 100644 index 43658b833..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.html +++ /dev/null @@ -1,40 +0,0 @@ - -
-
- - - - - -
- -
- - - {{ $ctrl.model.name }} - - - - - - Helm - - -
- - - -
- - {{ $ctrl.model.description }} - - - {{ $ctrl.model.annotations.category }} - -
- -
- -
- -
diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.js b/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.js deleted file mode 100644 index adde64a03..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list-item/helm-templates-list-item.js +++ /dev/null @@ -1,17 +0,0 @@ -import angular from 'angular'; -import './helm-templates-list-item.css'; -import { HelmIcon } from '../../HelmIcon'; - -angular.module('portainer.kubernetes').component('helmTemplatesListItem', { - templateUrl: './helm-templates-list-item.html', - bindings: { - model: '<', - onSelect: '<', - }, - transclude: { - actions: '?templateItemActions', - }, - controller() { - this.fallbackIcon = HelmIcon; - }, -}); diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.controller.js b/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.controller.js deleted file mode 100644 index 9ba2a579d..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.controller.js +++ /dev/null @@ -1,43 +0,0 @@ -export default class HelmTemplatesListController { - /* @ngInject */ - constructor($async, $scope, HelmService, Notifications) { - this.$async = $async; - this.$scope = $scope; - this.HelmService = HelmService; - this.Notifications = Notifications; - - this.state = { - textFilter: '', - selectedCategory: '', - categories: [], - }; - - this.updateCategories = this.updateCategories.bind(this); - this.onCategoryChange = this.onCategoryChange.bind(this); - } - - async updateCategories() { - try { - const annotationCategories = this.charts - .map((t) => t.annotations) // get annotations - .filter((a) => a) // filter out undefined/nulls - .map((c) => c.category); // get annotation category - const availableCategories = [...new Set(annotationCategories)].sort(); // unique and sort - this.state.categories = availableCategories.map((cat) => ({ label: cat, value: cat })); - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve helm charts categories'); - } - } - - onCategoryChange(value) { - return this.$scope.$evalAsync(() => { - this.state.selectedCategory = value || ''; - }); - } - - $onChanges() { - if (this.charts.length > 0) { - this.updateCategories(); - } - } -} diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.html b/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.html deleted file mode 100644 index 8f3f2fb4a..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.html +++ /dev/null @@ -1,55 +0,0 @@ -
-
-
{{ $ctrl.titleText }}
- - -
- -
-
-
-
Select the Helm chart to use. Bring further Helm charts into your selection list via - User settings - Helm repositories.
- -
- -
- - -
No Helm charts found
-
- Loading... -
Initial download of Helm charts can take a few minutes
-
-
No helm charts available.
-
-
diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.js b/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.js deleted file mode 100644 index 2366e8d5a..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates-list/helm-templates-list.js +++ /dev/null @@ -1,14 +0,0 @@ -import angular from 'angular'; -import controller from './helm-templates-list.controller'; - -angular.module('portainer.kubernetes').component('helmTemplatesList', { - templateUrl: './helm-templates-list.html', - controller, - bindings: { - loading: '<', - titleText: '@', - charts: '<', - tableKey: '@', - selectAction: '<', - }, -}); diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates.controller.js b/app/kubernetes/components/helm/helm-templates/helm-templates.controller.js deleted file mode 100644 index 8b62c6af0..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates.controller.js +++ /dev/null @@ -1,207 +0,0 @@ -import _ from 'lodash-es'; -import KubernetesNamespaceHelper from 'Kubernetes/helpers/namespaceHelper'; -import { confirmWebEditorDiscard } from '@@/modals/confirm'; -import { HelmIcon } from './HelmIcon'; -export default class HelmTemplatesController { - /* @ngInject */ - constructor($analytics, $async, $state, $window, $anchorScroll, Authentication, HelmService, KubernetesResourcePoolService, Notifications) { - this.$analytics = $analytics; - this.$async = $async; - this.$window = $window; - this.$state = $state; - this.$anchorScroll = $anchorScroll; - this.Authentication = Authentication; - this.HelmService = HelmService; - this.KubernetesResourcePoolService = KubernetesResourcePoolService; - this.Notifications = Notifications; - - this.fallbackIcon = HelmIcon; - - this.editorUpdate = this.editorUpdate.bind(this); - this.uiCanExit = this.uiCanExit.bind(this); - this.installHelmchart = this.installHelmchart.bind(this); - this.getHelmValues = this.getHelmValues.bind(this); - this.selectHelmChart = this.selectHelmChart.bind(this); - this.getHelmRepoURLs = this.getHelmRepoURLs.bind(this); - this.getLatestCharts = this.getLatestCharts.bind(this); - this.getResourcePools = this.getResourcePools.bind(this); - this.clearHelmChart = this.clearHelmChart.bind(this); - - $window.onbeforeunload = () => { - if (this.state.isEditorDirty) { - return ''; - } - }; - } - - clearHelmChart() { - this.state.chart = null; - this.onSelectHelmChart(''); - } - - editorUpdate(contentvalues) { - if (this.state.originalvalues === contentvalues) { - this.state.isEditorDirty = false; - } else { - this.state.values = contentvalues; - this.state.isEditorDirty = true; - } - } - - async uiCanExit() { - if (this.state.isEditorDirty) { - return confirmWebEditorDiscard(); - } - } - - async installHelmchart() { - this.state.actionInProgress = true; - try { - const payload = { - Name: this.name, - Repo: this.state.chart.repo, - Chart: this.state.chart.name, - Values: this.state.values, - Namespace: this.namespace, - }; - await this.HelmService.install(this.endpoint.Id, payload); - this.Notifications.success('Success', 'Helm chart successfully installed'); - this.$analytics.eventTrack('kubernetes-helm-install', { category: 'kubernetes', metadata: { 'chart-name': this.state.chart.name } }); - this.state.isEditorDirty = false; - this.$state.go('kubernetes.applications'); - } catch (err) { - this.Notifications.error('Installation error', err); - } finally { - this.state.actionInProgress = false; - } - } - - async getHelmValues() { - this.state.loadingValues = true; - try { - const { values } = await this.HelmService.values(this.state.chart.repo, this.state.chart.name); - this.state.values = values; - this.state.originalvalues = values; - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve helm chart values.'); - } finally { - this.state.loadingValues = false; - } - } - - async selectHelmChart(chart) { - window.scrollTo(0, 0); - this.state.showCustomValues = false; - this.state.chart = chart; - this.onSelectHelmChart(chart.name); - await this.getHelmValues(); - } - - /** - * @description This function is used to get the helm repo urls for the endpoint and user - * @returns {Promise} list of helm repo urls - */ - async getHelmRepoURLs() { - this.state.reposLoading = true; - try { - // fetch globally set helm repo and user helm repos (parallel) - const { GlobalRepository, UserRepositories } = await this.HelmService.getHelmRepositories(this.user.ID); - this.state.globalRepository = GlobalRepository; - const userHelmReposUrls = UserRepositories.map((repo) => repo.URL); - const uniqueHelmRepos = [...new Set([GlobalRepository, ...userHelmReposUrls])].map((url) => url.toLowerCase()).filter((url) => url); // remove duplicates and blank, to lowercase - this.state.repos = uniqueHelmRepos; - return uniqueHelmRepos; - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve helm repo urls.'); - } finally { - this.state.reposLoading = false; - } - } - - /** - * @description This function is used to fetch the respective index.yaml files for the provided helm repo urls - * @param {string[]} helmRepos list of helm repositories - * @param {bool} append append charts returned from repo to existing list of helm charts - */ - async getLatestCharts(helmRepos) { - this.state.chartsLoading = true; - try { - const promiseList = helmRepos.map((repo) => this.HelmService.search(repo)); - // fetch helm charts from all the provided helm repositories (parallel) - // Promise.allSettled is used to account for promise failure(s) - in cases the user has provided invalid helm repo - const chartPromises = await Promise.allSettled(promiseList); - const latestCharts = chartPromises - .filter((tp) => tp.status === 'fulfilled') // remove failed promises - .map((tp) => ({ entries: tp.value.entries, repo: helmRepos[chartPromises.indexOf(tp)] })) // extract chart entries with respective repo data - .flatMap( - ({ entries, repo }) => Object.values(entries).map((charts) => ({ ...charts[0], repo })) // flatten chart entries to single array with respective repo - ); - - this.state.charts = latestCharts; - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve helm repo charts.'); - } finally { - this.state.chartsLoading = false; - } - } - - async getResourcePools() { - this.state.resourcePoolsLoading = true; - try { - const resourcePools = await this.KubernetesResourcePoolService.get(); - - const nonSystemNamespaces = resourcePools.filter( - (resourcePool) => !KubernetesNamespaceHelper.isSystemNamespace(resourcePool.Namespace.Name) && resourcePool.Namespace.Status === 'Active' - ); - this.state.resourcePools = _.sortBy(nonSystemNamespaces, ({ Namespace }) => (Namespace.Name === 'default' ? 0 : 1)); - this.state.resourcePool = this.state.resourcePools[0]; - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve initial helm data.'); - } finally { - this.state.resourcePoolsLoading = false; - } - } - - $onInit() { - return this.$async(async () => { - this.user = this.Authentication.getUserDetails(); - - this.state = { - appName: '', - chart: null, - showCustomValues: false, - actionInProgress: false, - resourcePools: [], - resourcePool: '', - values: null, - originalvalues: null, - repos: [], - charts: [], - loadingValues: false, - isEditorDirty: false, - chartsLoading: false, - resourcePoolsLoading: false, - viewReady: false, - isAdmin: this.Authentication.isAdmin(), - globalRepository: undefined, - }; - - const helmRepos = await this.getHelmRepoURLs(); - if (helmRepos) { - await Promise.all([this.getLatestCharts(helmRepos), this.getResourcePools()]); - } - if (this.state.charts.length > 0 && this.$state.params.chartName) { - const chart = this.state.charts.find((chart) => chart.name === this.$state.params.chartName); - if (chart) { - this.selectHelmChart(chart); - } - } - - this.state.viewReady = true; - }); - } - - $onDestroy() { - this.state.isEditorDirty = false; - } -} diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates.html b/app/kubernetes/components/helm/helm-templates/helm-templates.html deleted file mode 100644 index a5f8dc960..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates.html +++ /dev/null @@ -1,113 +0,0 @@ -
- -
- -
-
-
- -
-
- {{ $ctrl.state.chart.name }} - - - Helm - -
-
-
-
-
-
-
- -
-
-
-
- -
-
-
- - - - - -
-
- -
- -
-
- - - - - You can get more information about Helm values file format in the - official documentation. - - - -
-
- -
- - -
Actions
-
-
- -
-
- -
-
- -
- - -
-
- - -
-
- diff --git a/app/kubernetes/components/helm/helm-templates/helm-templates.js b/app/kubernetes/components/helm/helm-templates/helm-templates.js deleted file mode 100644 index 9cd4b1158..000000000 --- a/app/kubernetes/components/helm/helm-templates/helm-templates.js +++ /dev/null @@ -1,14 +0,0 @@ -import angular from 'angular'; -import controller from './helm-templates.controller'; - -angular.module('portainer.kubernetes').component('helmTemplatesView', { - templateUrl: './helm-templates.html', - controller, - bindings: { - endpoint: '<', - namespace: '<', - stackName: '<', - onSelectHelmChart: '<', - name: '<', - }, -}); diff --git a/app/kubernetes/components/kubernetes-configuration-data/kubernetesConfigurationData.html b/app/kubernetes/components/kubernetes-configuration-data/kubernetesConfigurationData.html index ca2caebef..193ce8be0 100644 --- a/app/kubernetes/components/kubernetes-configuration-data/kubernetesConfigurationData.html +++ b/app/kubernetes/components/kubernetes-configuration-data/kubernetesConfigurationData.html @@ -165,7 +165,7 @@ value="$ctrl.formValues.DataYaml" on-change="($ctrl.editorUpdate)" yml="true" - placeholder="Define or paste key-value pairs, one pair per line" + text-tip="Define or paste key-value pairs, one pair per line" >
diff --git a/app/kubernetes/react/components/index.ts b/app/kubernetes/react/components/index.ts index 9775b9453..cfb103823 100644 --- a/app/kubernetes/react/components/index.ts +++ b/app/kubernetes/react/components/index.ts @@ -58,6 +58,7 @@ import { AppDeploymentTypeFormSection } from '@/react/kubernetes/applications/co import { EnvironmentVariablesFormSection } from '@/react/kubernetes/applications/components/EnvironmentVariablesFormSection/EnvironmentVariablesFormSection'; import { kubeEnvVarValidationSchema } from '@/react/kubernetes/applications/components/EnvironmentVariablesFormSection/kubeEnvVarValidationSchema'; import { IntegratedAppsDatatable } from '@/react/kubernetes/components/IntegratedAppsDatatable/IntegratedAppsDatatable'; +import { HelmTemplates } from '@/react/kubernetes/helm/HelmTemplates/HelmTemplates'; import { namespacesModule } from './namespaces'; import { clusterManagementModule } from './clusterManagement'; @@ -91,6 +92,7 @@ export const ngModule = angular 'onChange', 'placeholder', 'value', + 'allowSelectAll', ]) ) .component( @@ -205,6 +207,14 @@ export const ngModule = angular 'tableTitle', 'dataCy', ]) + ) + .component( + 'helmTemplatesView', + r2a(withUIRouter(withCurrentUser(HelmTemplates)), [ + 'onSelectHelmChart', + 'namespace', + 'name', + ]) ); export const componentsModule = ngModule.name; diff --git a/app/kubernetes/react/views/index.ts b/app/kubernetes/react/views/index.ts index a6c440de3..aeb161dcd 100644 --- a/app/kubernetes/react/views/index.ts +++ b/app/kubernetes/react/views/index.ts @@ -22,6 +22,9 @@ import { VolumesView } from '@/react/kubernetes/volumes/ListView/VolumesView'; import { NamespaceView } from '@/react/kubernetes/namespaces/ItemView/NamespaceView'; import { AccessView } from '@/react/kubernetes/namespaces/AccessView/AccessView'; import { JobsView } from '@/react/kubernetes/more-resources/JobsView/JobsView'; +import { ClusterView } from '@/react/kubernetes/cluster/ClusterView'; +import { HelmApplicationView } from '@/react/kubernetes/helm/HelmApplicationView'; +import { KubectlShellView } from '@/react/kubernetes/cluster/KubectlShell/KubectlShellView'; export const viewsModule = angular .module('portainer.kubernetes.react.views', []) @@ -78,6 +81,18 @@ export const viewsModule = angular [] ) ) + .component( + 'kubernetesHelmApplicationView', + r2a(withUIRouter(withReactQuery(withCurrentUser(HelmApplicationView))), []) + ) + .component( + 'kubectlShellView', + r2a(withUIRouter(withReactQuery(withCurrentUser(KubectlShellView))), []) + ) + .component( + 'kubernetesClusterView', + r2a(withUIRouter(withReactQuery(withCurrentUser(ClusterView))), []) + ) .component( 'kubernetesConfigureView', r2a(withUIRouter(withReactQuery(withCurrentUser(ConfigureView))), []) diff --git a/app/kubernetes/registries/kube-registry-access-view/kube-registry-access-view.html b/app/kubernetes/registries/kube-registry-access-view/kube-registry-access-view.html index 11184ae0f..5c5e68255 100644 --- a/app/kubernetes/registries/kube-registry-access-view/kube-registry-access-view.html +++ b/app/kubernetes/registries/kube-registry-access-view/kube-registry-access-view.html @@ -19,6 +19,7 @@ namespaces="$ctrl.resourcePools" placeholder="'Select one or more namespaces'" on-change="($ctrl.onChangeResourcePools)" + allow-select-all="true" >
diff --git a/app/kubernetes/services/resourcePoolService.js b/app/kubernetes/services/resourcePoolService.js index 639c44db1..d829afa0b 100644 --- a/app/kubernetes/services/resourcePoolService.js +++ b/app/kubernetes/services/resourcePoolService.js @@ -3,6 +3,7 @@ import _ from 'lodash-es'; import angular from 'angular'; import KubernetesResourcePoolConverter from 'Kubernetes/converters/resourcePool'; import KubernetesResourceQuotaHelper from 'Kubernetes/helpers/resourceQuotaHelper'; +import { getNamespaces } from '@/react/kubernetes/namespaces/queries/useNamespacesQuery'; /* @ngInject */ export function KubernetesResourcePoolService( @@ -11,7 +12,8 @@ export function KubernetesResourcePoolService( KubernetesNamespaceService, KubernetesResourceQuotaService, KubernetesIngressService, - KubernetesPortainerNamespaces + KubernetesPortainerNamespaces, + EndpointProvider ) { return { get, @@ -37,9 +39,14 @@ export function KubernetesResourcePoolService( // getting the quota for all namespaces is costly by default, so disable getting it by default async function getAll({ getQuota = false }) { - const namespaces = await KubernetesNamespaceService.get(); + const namespaces = await getNamespaces(EndpointProvider.endpointID()); + // there is a lot of downstream logic using the angular namespace type with a '.Status' field (not '.Status.phase'), so format the status here to match this logic + const namespacesFormattedStatus = namespaces.map((namespace) => ({ + ...namespace, + Status: namespace.Status.phase, + })); const pools = await Promise.all( - _.map(namespaces, async (namespace) => { + _.map(namespacesFormattedStatus, async (namespace) => { const name = namespace.Name; const pool = KubernetesResourcePoolConverter.apiToResourcePool(namespace); if (getQuota) { diff --git a/app/kubernetes/views/applications/helm/helm.controller.js b/app/kubernetes/views/applications/helm/helm.controller.js deleted file mode 100644 index 0b027c2ef..000000000 --- a/app/kubernetes/views/applications/helm/helm.controller.js +++ /dev/null @@ -1,52 +0,0 @@ -import PortainerError from 'Portainer/error'; - -export default class KubernetesHelmApplicationController { - /* @ngInject */ - constructor($async, $state, Authentication, Notifications, HelmService) { - this.$async = $async; - this.$state = $state; - this.Authentication = Authentication; - this.Notifications = Notifications; - this.HelmService = HelmService; - } - - /** - * APPLICATION - */ - async getHelmApplication() { - try { - this.state.dataLoading = true; - const releases = await this.HelmService.listReleases(this.endpoint.Id, { filter: `^${this.state.params.name}$`, namespace: this.state.params.namespace }); - if (releases.length > 0) { - this.state.release = releases[0]; - } else { - throw new PortainerError(`Release ${this.state.params.name} not found`); - } - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve helm application details'); - } finally { - this.state.dataLoading = false; - } - } - - $onInit() { - return this.$async(async () => { - this.state = { - dataLoading: true, - viewReady: false, - params: { - name: this.$state.params.name, - namespace: this.$state.params.namespace, - }, - release: { - name: undefined, - chart: undefined, - app_version: undefined, - }, - }; - - await this.getHelmApplication(); - this.state.viewReady = true; - }); - } -} diff --git a/app/kubernetes/views/applications/helm/helm.css b/app/kubernetes/views/applications/helm/helm.css deleted file mode 100644 index 784f878fd..000000000 --- a/app/kubernetes/views/applications/helm/helm.css +++ /dev/null @@ -1,5 +0,0 @@ -.release-table tr { - display: grid; - grid-auto-flow: column; - grid-template-columns: 1fr 4fr; -} diff --git a/app/kubernetes/views/applications/helm/helm.html b/app/kubernetes/views/applications/helm/helm.html deleted file mode 100644 index a815e8a9d..000000000 --- a/app/kubernetes/views/applications/helm/helm.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - -
-
-
- -
-
-
- -
- - Release -
-
- - - - - - - - - - - - - - - - -
Name - {{ $ctrl.state.release.name }} -
Chart - {{ $ctrl.state.release.chart }} -
App version - {{ $ctrl.state.release.app_version }} -
-
-
-
-
-
diff --git a/app/kubernetes/views/applications/helm/index.js b/app/kubernetes/views/applications/helm/index.js deleted file mode 100644 index b99f41d4b..000000000 --- a/app/kubernetes/views/applications/helm/index.js +++ /dev/null @@ -1,11 +0,0 @@ -import angular from 'angular'; -import controller from './helm.controller'; -import './helm.css'; - -angular.module('portainer.kubernetes').component('kubernetesHelmApplicationView', { - templateUrl: './helm.html', - controller, - bindings: { - endpoint: '<', - }, -}); diff --git a/app/kubernetes/views/applications/logs/logsController.js b/app/kubernetes/views/applications/logs/logsController.js index 66601d98b..37cae6cad 100644 --- a/app/kubernetes/views/applications/logs/logsController.js +++ b/app/kubernetes/views/applications/logs/logsController.js @@ -77,6 +77,7 @@ class KubernetesApplicationLogsController { await this.getApplicationLogsAsync(); } catch (err) { this.Notifications.error('Failure', err, 'Unable to retrieve application logs'); + this.stopRepeater(); } finally { this.state.viewReady = true; } diff --git a/app/kubernetes/views/cluster/cluster.html b/app/kubernetes/views/cluster/cluster.html deleted file mode 100644 index 46acbf931..000000000 --- a/app/kubernetes/views/cluster/cluster.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - -
-
-
- - - -
- - -
- -
-
-
-
- -
- -
-
diff --git a/app/kubernetes/views/cluster/cluster.js b/app/kubernetes/views/cluster/cluster.js deleted file mode 100644 index 708076037..000000000 --- a/app/kubernetes/views/cluster/cluster.js +++ /dev/null @@ -1,8 +0,0 @@ -angular.module('portainer.kubernetes').component('kubernetesClusterView', { - templateUrl: './cluster.html', - controller: 'KubernetesClusterController', - controllerAs: 'ctrl', - bindings: { - endpoint: '<', - }, -}); diff --git a/app/kubernetes/views/cluster/clusterController.js b/app/kubernetes/views/cluster/clusterController.js deleted file mode 100644 index b8f6df290..000000000 --- a/app/kubernetes/views/cluster/clusterController.js +++ /dev/null @@ -1,139 +0,0 @@ -import angular from 'angular'; -import _ from 'lodash-es'; -import filesizeParser from 'filesize-parser'; -import KubernetesResourceReservationHelper from 'Kubernetes/helpers/resourceReservationHelper'; -import { KubernetesResourceReservation } from 'Kubernetes/models/resource-reservation/models'; -import { getMetricsForAllNodes, getTotalResourcesForAllApplications } from '@/react/kubernetes/metrics/metrics.ts'; - -class KubernetesClusterController { - /* @ngInject */ - constructor($async, $state, Notifications, LocalStorage, Authentication, KubernetesNodeService, KubernetesApplicationService, KubernetesEndpointService, EndpointService) { - this.$async = $async; - this.$state = $state; - this.Authentication = Authentication; - this.Notifications = Notifications; - this.LocalStorage = LocalStorage; - this.KubernetesNodeService = KubernetesNodeService; - this.KubernetesApplicationService = KubernetesApplicationService; - this.KubernetesEndpointService = KubernetesEndpointService; - this.EndpointService = EndpointService; - - this.onInit = this.onInit.bind(this); - this.getNodes = this.getNodes.bind(this); - this.getNodesAsync = this.getNodesAsync.bind(this); - this.getApplicationsAsync = this.getApplicationsAsync.bind(this); - this.getEndpointsAsync = this.getEndpointsAsync.bind(this); - this.hasResourceUsageAccess = this.hasResourceUsageAccess.bind(this); - } - - async getEndpointsAsync() { - try { - const endpoints = await this.KubernetesEndpointService.get(); - const systemEndpoints = _.filter(endpoints, { Namespace: 'kube-system' }); - this.systemEndpoints = _.filter(systemEndpoints, (ep) => ep.HolderIdentity); - - const kubernetesEndpoint = _.find(endpoints, { Name: 'kubernetes' }); - if (kubernetesEndpoint && kubernetesEndpoint.Subsets) { - const ips = _.flatten(_.map(kubernetesEndpoint.Subsets, 'Ips')); - _.forEach(this.nodes, (node) => { - node.Api = _.includes(ips, node.IPAddress); - }); - } - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve environments'); - } - } - - getEndpoints() { - return this.$async(this.getEndpointsAsync); - } - - async getNodesAsync() { - try { - const nodes = await this.KubernetesNodeService.get(); - _.forEach(nodes, (node) => (node.Memory = filesizeParser(node.Memory))); - this.nodes = nodes; - this.CPULimit = _.reduce(this.nodes, (acc, node) => node.CPU + acc, 0); - this.CPULimit = Math.round(this.CPULimit * 10000) / 10000; - this.MemoryLimit = _.reduce(this.nodes, (acc, node) => KubernetesResourceReservationHelper.megaBytesValue(node.Memory) + acc, 0); - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve nodes'); - } - } - - getNodes() { - return this.$async(this.getNodesAsync); - } - - async getApplicationsAsync() { - try { - this.state.applicationsLoading = true; - - const applicationsResources = await getTotalResourcesForAllApplications(this.endpoint.Id); - this.resourceReservation = new KubernetesResourceReservation(); - this.resourceReservation.CPU = Math.round(applicationsResources.CpuRequest / 1000); - this.resourceReservation.Memory = KubernetesResourceReservationHelper.megaBytesValue(applicationsResources.MemoryRequest); - - if (this.hasResourceUsageAccess()) { - await this.getResourceUsage(this.endpoint.Id); - } - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve applications'); - } finally { - this.state.applicationsLoading = false; - } - } - - getApplications() { - return this.$async(this.getApplicationsAsync); - } - - async getResourceUsage(endpointId) { - try { - const nodeMetrics = await getMetricsForAllNodes(endpointId); - const resourceUsageList = nodeMetrics.items.map((i) => i.usage); - const clusterResourceUsage = resourceUsageList.reduce((total, u) => { - total.CPU += KubernetesResourceReservationHelper.parseCPU(u.cpu); - total.Memory += KubernetesResourceReservationHelper.megaBytesValue(u.memory); - return total; - }, new KubernetesResourceReservation()); - this.resourceUsage = clusterResourceUsage; - } catch (err) { - this.Notifications.error('Failure', err, 'Unable to retrieve cluster resource usage'); - } - } - - /** - * Check if resource usage stats can be displayed - * @returns {boolean} - */ - hasResourceUsageAccess() { - return this.isAdmin && this.state.useServerMetrics; - } - - async onInit() { - this.endpoint = await this.EndpointService.endpoint(this.endpoint.Id); - this.isAdmin = this.Authentication.isAdmin(); - const useServerMetrics = this.endpoint.Kubernetes.Configuration.UseServerMetrics; - - this.state = { - applicationsLoading: true, - viewReady: false, - useServerMetrics, - }; - - await this.getNodes(); - if (this.isAdmin) { - await Promise.allSettled([this.getEndpoints(), this.getApplicationsAsync()]); - } - - this.state.viewReady = true; - } - - $onInit() { - return this.$async(this.onInit); - } -} - -export default KubernetesClusterController; -angular.module('portainer.kubernetes').controller('KubernetesClusterController', KubernetesClusterController); diff --git a/app/kubernetes/views/configurations/configmap/edit/configMap.html b/app/kubernetes/views/configurations/configmap/edit/configMap.html index 89e1c38c5..70a2d4f1a 100644 --- a/app/kubernetes/views/configurations/configmap/edit/configMap.html +++ b/app/kubernetes/views/configurations/configmap/edit/configMap.html @@ -58,7 +58,7 @@ diff --git a/app/kubernetes/views/configurations/secret/edit/secret.html b/app/kubernetes/views/configurations/secret/edit/secret.html index 0309d356c..2e939c87e 100644 --- a/app/kubernetes/views/configurations/secret/edit/secret.html +++ b/app/kubernetes/views/configurations/secret/edit/secret.html @@ -65,7 +65,7 @@ diff --git a/app/kubernetes/views/deploy/deploy.html b/app/kubernetes/views/deploy/deploy.html index 35701b76c..60e7b0144 100644 --- a/app/kubernetes/views/deploy/deploy.html +++ b/app/kubernetes/views/deploy/deploy.html @@ -31,7 +31,7 @@ -
+
- -
- + is-disabled="ctrl.formValues.namespace_toggle && ctrl.state.BuildMethod !== ctrl.BuildMethods.HELM || ctrl.state.isNamespaceInputDisabled" + value="ctrl.formValues.Namespace" + on-change="(ctrl.onChangeNamespace)" + options="ctrl.namespaceOptions" + > Namespaces specified in the manifest will be used @@ -66,10 +64,10 @@
-
+
Resource names specified in the manifest will be used
-
+
-
+
-
Selected Helm chart
- +
diff --git a/app/kubernetes/views/deploy/deployController.js b/app/kubernetes/views/deploy/deployController.js index 50c923d58..9f17d214e 100644 --- a/app/kubernetes/views/deploy/deployController.js +++ b/app/kubernetes/views/deploy/deployController.js @@ -6,17 +6,18 @@ import PortainerError from '@/portainer/error'; import { KubernetesDeployManifestTypes, KubernetesDeployBuildMethods, KubernetesDeployRequestMethods, RepositoryMechanismTypes } from 'Kubernetes/models/deploy'; import { isTemplateVariablesEnabled, renderTemplate } from '@/react/portainer/custom-templates/components/utils'; import { getDeploymentOptions } from '@/react/portainer/environments/environment.service'; -import { kubernetes } from '@@/BoxSelector/common-options/deployment-methods'; -import { editor, git, customTemplate, url, helm } from '@@/BoxSelector/common-options/build-methods'; import { parseAutoUpdateResponse, transformAutoUpdateViewModel } from '@/react/portainer/gitops/AutoUpdateFieldset/utils'; import { baseStackWebhookUrl, createWebhookId } from '@/portainer/helpers/webhookHelper'; -import { confirmWebEditorDiscard } from '@@/modals/confirm'; import { getVariablesFieldDefaultValues } from '@/react/portainer/custom-templates/components/CustomTemplatesVariablesField'; import { KUBE_STACK_NAME_VALIDATION_REGEX } from '@/react/kubernetes/DeployView/StackName/constants'; +import { confirmWebEditorDiscard } from '@@/modals/confirm'; +import { editor, git, customTemplate, url, helm } from '@@/BoxSelector/common-options/build-methods'; +import { kubernetes } from '@@/BoxSelector/common-options/deployment-methods'; class KubernetesDeployController { /* @ngInject */ - constructor($async, $state, $window, Authentication, Notifications, KubernetesResourcePoolService, StackService, CustomTemplateService, KubernetesApplicationService) { + constructor($scope, $async, $state, $window, Authentication, Notifications, KubernetesResourcePoolService, StackService, CustomTemplateService, KubernetesApplicationService) { + this.$scope = $scope; this.$async = $async; this.$state = $state; this.$window = $window; @@ -33,10 +34,10 @@ class KubernetesDeployController { this.methodOptions = [ { ...git, value: KubernetesDeployBuildMethods.GIT }, + { ...helm, value: KubernetesDeployBuildMethods.HELM }, { ...editor, value: KubernetesDeployBuildMethods.WEB_EDITOR }, { ...url, value: KubernetesDeployBuildMethods.URL }, { ...customTemplate, value: KubernetesDeployBuildMethods.CUSTOM_TEMPLATE }, - { ...helm, value: KubernetesDeployBuildMethods.HELM }, ]; let buildMethod = Number(this.$state.params.buildMethod) || KubernetesDeployBuildMethods.GIT; @@ -100,9 +101,10 @@ class KubernetesDeployController { this.onChangeNamespace = this.onChangeNamespace.bind(this); } - onChangeNamespace() { + onChangeNamespace(namespaceName) { return this.$async(async () => { - const applications = await this.KubernetesApplicationService.get(this.formValues.Namespace); + this.formValues.Namespace = namespaceName; + const applications = await this.KubernetesApplicationService.get(namespaceName); const stacks = _.map(applications, (item) => item.StackName).filter((item) => item !== ''); this.stacks = _.uniq(stacks); }); @@ -110,6 +112,9 @@ class KubernetesDeployController { onSelectHelmChart(chart) { this.state.selectedHelmChart = chart; + + // Force a digest cycle to ensure the change is reflected in the UI + this.$scope.$apply(); } onChangeTemplateVariables(value) { @@ -367,6 +372,10 @@ class KubernetesDeployController { if (this.namespaces.length > 0) { this.formValues.Namespace = this.namespaces[0].Name; } + this.namespaceOptions = _.map(namespaces, (namespace) => ({ + label: namespace.Name, + value: namespace.Name, + })); } catch (err) { this.Notifications.error('Failure', err, 'Unable to load namespaces data'); } @@ -400,7 +409,8 @@ class KubernetesDeployController { } } - this.onChangeNamespace(); + this.onChangeNamespace(this.formValues.Namespace); + this.state.viewReady = true; this.$window.onbeforeunload = () => { diff --git a/app/kubernetes/views/stacks/logs/logsController.js b/app/kubernetes/views/stacks/logs/logsController.js index 536ea2ae4..d4e1b5ba7 100644 --- a/app/kubernetes/views/stacks/logs/logsController.js +++ b/app/kubernetes/views/stacks/logs/logsController.js @@ -104,6 +104,7 @@ class KubernetesStackLogsController { await this.getStackLogsAsync(); } catch (err) { this.Notifications.error('Failure', err, 'Unable to retrieve stack logs'); + this.stopRepeater(); } finally { this.state.viewReady = true; } diff --git a/app/ng-constants.ts b/app/ng-constants.ts index 527d42132..87fa45b97 100644 --- a/app/ng-constants.ts +++ b/app/ng-constants.ts @@ -7,7 +7,6 @@ import { API_ENDPOINT_EDGE_GROUPS, API_ENDPOINT_EDGE_JOBS, API_ENDPOINT_EDGE_STACKS, - API_ENDPOINT_EDGE_TEMPLATES, API_ENDPOINT_ENDPOINTS, API_ENDPOINT_ENDPOINT_GROUPS, API_ENDPOINT_KUBERNETES, @@ -42,7 +41,6 @@ export const constantsModule = angular .constant('API_ENDPOINT_EDGE_GROUPS', API_ENDPOINT_EDGE_GROUPS) .constant('API_ENDPOINT_EDGE_JOBS', API_ENDPOINT_EDGE_JOBS) .constant('API_ENDPOINT_EDGE_STACKS', API_ENDPOINT_EDGE_STACKS) - .constant('API_ENDPOINT_EDGE_TEMPLATES', API_ENDPOINT_EDGE_TEMPLATES) .constant('API_ENDPOINT_ENDPOINTS', API_ENDPOINT_ENDPOINTS) .constant('API_ENDPOINT_ENDPOINT_GROUPS', API_ENDPOINT_ENDPOINT_GROUPS) .constant('API_ENDPOINT_KUBERNETES', API_ENDPOINT_KUBERNETES) diff --git a/app/portainer/components/code-editor/code-editor.html b/app/portainer/components/code-editor/code-editor.html index f7ad81fc4..446da5dab 100644 --- a/app/portainer/components/code-editor/code-editor.html +++ b/app/portainer/components/code-editor/code-editor.html @@ -1,9 +1,10 @@ diff --git a/app/portainer/components/code-editor/code-editor.js b/app/portainer/components/code-editor/code-editor.js index c20f6e6fb..25a77c3c9 100644 --- a/app/portainer/components/code-editor/code-editor.js +++ b/app/portainer/components/code-editor/code-editor.js @@ -5,7 +5,7 @@ angular.module('portainer.app').component('codeEditor', { controller, bindings: { identifier: '@', - placeholder: '@', + textTip: '@', yml: '<', dockerFile: '<', shell: '<', @@ -13,5 +13,6 @@ angular.module('portainer.app').component('codeEditor', { onChange: '<', value: '<', height: '@', + schema: '<', }, }); diff --git a/app/portainer/components/custom-template-selector/custom-template-selector.html b/app/portainer/components/custom-template-selector/custom-template-selector.html index ce05aa1ce..cde665d49 100644 --- a/app/portainer/components/custom-template-selector/custom-template-selector.html +++ b/app/portainer/components/custom-template-selector/custom-template-selector.html @@ -1,7 +1,7 @@
-
+

diff --git a/app/portainer/components/form-components/web-editor-form/index.js b/app/portainer/components/form-components/web-editor-form/index.js index 5fa476e61..804966f10 100644 --- a/app/portainer/components/form-components/web-editor-form/index.js +++ b/app/portainer/components/form-components/web-editor-form/index.js @@ -6,13 +6,14 @@ export const webEditorForm = { bindings: { identifier: '@', - placeholder: '@', + textTip: '@', yml: '<', value: '<', readOnly: '<', onChange: '<', hideTitle: '<', height: '@', + schema: '<', }, transclude: { diff --git a/app/portainer/components/form-components/web-editor-form/web-editor-form.html b/app/portainer/components/form-components/web-editor-form/web-editor-form.html index 9dd875abf..336da3bce 100644 --- a/app/portainer/components/form-components/web-editor-form/web-editor-form.html +++ b/app/portainer/components/form-components/web-editor-form/web-editor-form.html @@ -42,12 +42,13 @@

diff --git a/app/portainer/components/forms/git-form/index.ts b/app/portainer/components/forms/git-form/index.ts index e478b62ca..26793f584 100644 --- a/app/portainer/components/forms/git-form/index.ts +++ b/app/portainer/components/forms/git-form/index.ts @@ -7,7 +7,7 @@ import { gitFormRefField } from './git-form-ref-field'; export const gitFormModule = angular .module('portainer.app.components.git-form', []) - .component('gitForm', gitForm) + .component('gitForm', gitForm) // kube deploy + docker stack create .component('gitFormAuthFieldset', gitFormAuthFieldset) .component('gitFormAutoUpdateFieldset', gitFormAutoUpdate) .component('gitFormRefField', gitFormRefField).name; diff --git a/app/portainer/helpers/strings.ts b/app/portainer/helpers/strings.ts index dc130b0e5..fb8d69bc6 100644 --- a/app/portainer/helpers/strings.ts +++ b/app/portainer/helpers/strings.ts @@ -1,7 +1,7 @@ -export function pluralize(val: number, word: string, plural = `${word}s`) { - return [1, -1].includes(Number(val)) ? word : plural; -} - -export function addPlural(value: number, word: string, plural = `${word}s`) { - return `${value} ${pluralize(value, word, plural)}`; -} +// Re-exporting so we don't have to update one meeeeellion files that are already importing these +// functions from here. +export { + pluralize, + addPlural, + grammaticallyJoin, +} from '@/react/common/string-utils'; diff --git a/app/portainer/react/components/git-form.ts b/app/portainer/react/components/git-form.ts index bc74e3715..af0c7d460 100644 --- a/app/portainer/react/components/git-form.ts +++ b/app/portainer/react/components/git-form.ts @@ -29,6 +29,7 @@ export const gitFormModule = angular 'webhookId', 'webhooksDocs', 'createdFromCustomTemplateId', + 'isAutoUpdateVisible', ]) ) .component( diff --git a/app/portainer/react/components/index.ts b/app/portainer/react/components/index.ts index db071b5f5..1b6d0df7a 100644 --- a/app/portainer/react/components/index.ts +++ b/app/portainer/react/components/index.ts @@ -9,6 +9,7 @@ import { withFormValidation } from '@/react-tools/withFormValidation'; import { GroupAssociationTable } from '@/react/portainer/environments/environment-groups/components/GroupAssociationTable'; import { AssociatedEnvironmentsSelector } from '@/react/portainer/environments/environment-groups/components/AssociatedEnvironmentsSelector'; import { withControlledInput } from '@/react-tools/withControlledInput'; +import { NamespacePortainerSelect } from '@/react/kubernetes/applications/components/NamespaceSelector/NamespaceSelector'; import { EnvironmentVariablesFieldset, @@ -97,7 +98,7 @@ export const ngModule = angular r2a(Tooltip, ['message', 'position', 'className', 'setHtmlMessage', 'size']) ) .component('terminalTooltip', r2a(TerminalTooltip, [])) - .component('badge', r2a(Badge, ['type', 'className'])) + .component('badge', r2a(Badge, ['type', 'className', 'data-cy'])) .component('fileUploadField', fileUploadField) .component('porSwitchField', switchField) .component( @@ -199,11 +200,22 @@ export const ngModule = angular 'onChange', 'options', 'isMulti', + 'filterOption', 'isClearable', 'components', 'isLoading', 'noOptionsMessage', 'aria-label', + 'loadingMessage', + ]) + ) + .component( + 'namespacePortainerSelect', + r2a(NamespacePortainerSelect, [ + 'value', + 'onChange', + 'isDisabled', + 'options', ]) ) .component( @@ -223,7 +235,7 @@ export const ngModule = angular 'reactCodeEditor', r2a(CodeEditor, [ 'id', - 'placeholder', + 'textTip', 'type', 'readonly', 'onChange', @@ -232,6 +244,10 @@ export const ngModule = angular 'data-cy', 'versions', 'onVersionChange', + 'schema', + 'fileName', + 'placeholder', + 'showToolbar', ]) ) .component( diff --git a/app/portainer/services/axios.ts b/app/portainer/services/axios.ts index 7f7683cd1..d5c8d5840 100644 --- a/app/portainer/services/axios.ts +++ b/app/portainer/services/axios.ts @@ -12,6 +12,7 @@ import { } from 'axios-cache-interceptor'; import { loadProgressBar } from 'axios-progress-bar'; import 'axios-progress-bar/dist/nprogress.css'; +import qs from 'qs'; import PortainerError from '@/portainer/error'; @@ -53,6 +54,9 @@ function headerInterpreter( const axios = Axios.create({ baseURL: 'api', maxDockerAPIVersion: MAX_DOCKER_API_VERSION, + paramsSerializer: { + serialize: (params) => qs.stringify(params, { arrayFormat: 'brackets' }), + }, }); axios.interceptors.request.use((req) => { dispatchCacheRefreshEventIfNeeded(req); diff --git a/app/portainer/services/localStorage.js b/app/portainer/services/localStorage.js index d00150d6c..038c75244 100644 --- a/app/portainer/services/localStorage.js +++ b/app/portainer/services/localStorage.js @@ -30,7 +30,7 @@ angular.module('portainer.app').factory('LocalStorage', [ return localStorageService.get('UI_STATE'); }, getUserId() { - localStorageService.get('USER_ID'); + return localStorageService.get('USER_ID'); }, storeUserId: function (userId) { localStorageService.set('USER_ID', userId); diff --git a/app/portainer/views/auth/auth.html b/app/portainer/views/auth/auth.html index 09dadf050..2504141a7 100644 --- a/app/portainer/views/auth/auth.html +++ b/app/portainer/views/auth/auth.html @@ -4,7 +4,10 @@
- +
+ + +
diff --git a/app/portainer/views/init/admin/initAdmin.html b/app/portainer/views/init/admin/initAdmin.html index b5cfcfeb4..afff165b2 100644 --- a/app/portainer/views/init/admin/initAdmin.html +++ b/app/portainer/views/init/admin/initAdmin.html @@ -5,7 +5,10 @@
- +
+ + +
diff --git a/app/portainer/views/logout/logout.html b/app/portainer/views/logout/logout.html index fe9b2513d..95299d5d0 100644 --- a/app/portainer/views/logout/logout.html +++ b/app/portainer/views/logout/logout.html @@ -4,7 +4,10 @@
- +
+ + +
diff --git a/app/portainer/views/stacks/create/createStackController.js b/app/portainer/views/stacks/create/createStackController.js index a07c4aa3f..118e2d23a 100644 --- a/app/portainer/views/stacks/create/createStackController.js +++ b/app/portainer/views/stacks/create/createStackController.js @@ -10,6 +10,7 @@ import { confirmWebEditorDiscard } from '@@/modals/confirm'; import { parseAutoUpdateResponse, transformAutoUpdateViewModel } from '@/react/portainer/gitops/AutoUpdateFieldset/utils'; import { baseStackWebhookUrl, createWebhookId } from '@/portainer/helpers/webhookHelper'; import { getVariablesFieldDefaultValues } from '@/react/portainer/custom-templates/components/CustomTemplatesVariablesField'; +import { getDockerComposeSchema } from '@/react/hooks/useDockerComposeSchema/useDockerComposeSchema'; angular .module('portainer.app') @@ -351,6 +352,12 @@ angular } catch (err) { Notifications.error('Failure', err, 'Unable to retrieve Containers'); } + + try { + $scope.dockerComposeSchema = await getDockerComposeSchema(); + } catch (err) { + Notifications.error('Failure', err, 'Unable to load schema validation for editor'); + } } this.uiCanExit = async function () { diff --git a/app/portainer/views/stacks/create/createstack.html b/app/portainer/views/stacks/create/createstack.html index 4674c6e3a..25773d35a 100644 --- a/app/portainer/views/stacks/create/createstack.html +++ b/app/portainer/views/stacks/create/createstack.html @@ -128,8 +128,9 @@ on-change="(onChangeFileContent)" ng-required="true" yml="true" - placeholder="Define or paste the content of your docker compose file here" + text-tip="Define or paste the content of your docker compose file here" read-only="state.isEditorReadOnly" + schema="dockerComposeSchema" >

diff --git a/app/portainer/views/stacks/edit/stack.html b/app/portainer/views/stacks/edit/stack.html index caf7a4f92..8ecf54e06 100644 --- a/app/portainer/views/stacks/edit/stack.html +++ b/app/portainer/views/stacks/edit/stack.html @@ -150,8 +150,9 @@ You can get more information about Compose file format in the official documentation. -

- {{ state.yamlError }} + +
+ {{ state.yamlError || ' ' }}
@@ -159,10 +160,11 @@
diff --git a/app/portainer/views/stacks/edit/stackController.js b/app/portainer/views/stacks/edit/stackController.js index 385b7101f..8e3db07ec 100644 --- a/app/portainer/views/stacks/edit/stackController.js +++ b/app/portainer/views/stacks/edit/stackController.js @@ -8,6 +8,7 @@ import { confirmStackUpdate } from '@/react/common/stacks/common/confirm-stack-u import { confirm, confirmDelete, confirmWebEditorDiscard } from '@@/modals/confirm'; import { ModalType } from '@@/modals'; import { buildConfirmButton } from '@@/modals/utils'; +import { getDockerComposeSchema } from '@/react/hooks/useDockerComposeSchema/useDockerComposeSchema'; angular.module('portainer.app').controller('StackController', [ '$async', @@ -491,6 +492,12 @@ angular.module('portainer.app').controller('StackController', [ } $scope.composeSyntaxMaxVersion = endpoint.ComposeSyntaxMaxVersion; + + try { + $scope.dockerComposeSchema = await getDockerComposeSchema(); + } catch (err) { + Notifications.error('Failure', err, 'Unable to load schema validation for editor'); + } } initView(); diff --git a/app/react-tools/test-mocks.ts b/app/react-tools/test-mocks.ts index 556d7e4c2..d9c7d273c 100644 --- a/app/react-tools/test-mocks.ts +++ b/app/react-tools/test-mocks.ts @@ -1,4 +1,5 @@ import _ from 'lodash'; +import { QueryObserverResult } from '@tanstack/react-query'; import { Team } from '@/react/portainer/users/teams/types'; import { Role, User, UserId } from '@/portainer/users/types'; @@ -9,7 +10,7 @@ import { export function createMockUsers( count: number, - roles: Role | Role[] | ((id: UserId) => Role) = () => _.random(1, 3) + roles: Role | Role[] | ((id: UserId) => Role) ): User[] { return _.range(1, count + 1).map((value) => ({ Id: value, @@ -39,7 +40,14 @@ function getRoles( return roles; } - return roles[id]; + // Roles is an array + if (roles.length === 0) { + throw new Error('No roles provided'); + } + + // The number of roles is not necessarily the same length as the number of users + // so we need to distribute the roles evenly and consistently + return roles[(id - 1) % roles.length]; } export function createMockTeams(count: number): Team[] { @@ -134,3 +142,38 @@ export function createMockEnvironment(): Environment { }, }; } + +export function createMockQueryResult( + data: TData, + overrides?: Partial> +) { + const defaultResult = { + data, + dataUpdatedAt: 0, + error: null, + errorUpdatedAt: 0, + failureCount: 0, + errorUpdateCount: 0, + failureReason: null, + isError: false, + isFetched: true, + isFetchedAfterMount: true, + isFetching: false, + isInitialLoading: false, + isLoading: false, + isLoadingError: false, + isPaused: false, + isPlaceholderData: false, + isPreviousData: false, + isRefetchError: false, + isRefetching: false, + isStale: false, + isSuccess: true, + refetch: async () => defaultResult, + remove: () => {}, + status: 'success', + fetchStatus: 'idle', + }; + + return { ...defaultResult, ...overrides }; +} diff --git a/app/react/common/api/common.test.ts b/app/react/common/api/common.test.ts new file mode 100644 index 000000000..97d22c2be --- /dev/null +++ b/app/react/common/api/common.test.ts @@ -0,0 +1,117 @@ +import { + queryOptionsFromTableState, + queryParamsFromQueryOptions, +} from './listQueryParams'; +import { + withPaginationHeaders, + withPaginationQueryParams, +} from './pagination.types'; +import { + makeIsSortTypeFunc, + sortOptionsFromColumns, + withSortQuery, +} from './sort.types'; + +const sortOptions = sortOptionsFromColumns([ + { enableSorting: true }, + { id: 'one' }, + { id: 'two', enableSorting: true }, + { accessorKey: 'three', enableSorting: true }, + { id: 'four', enableSorting: true, accessorKey: 'four_key' }, +]); + +describe('listQueryParams', () => { + test('queryOptionsFromTableState', () => { + const fns = { + setPageSize: () => {}, + setSearch: () => {}, + setSortBy: () => {}, + }; + + expect( + queryOptionsFromTableState( + { + page: 5, + pageSize: 10, + search: 'something', + sortBy: { id: 'one', desc: false }, + ...fns, + }, + sortOptions + ) + ).toStrictEqual({ + search: 'something', + sort: 'one', + order: 'asc', + page: 5, + pageLimit: 10, + }); + }); + + test('queryParamsFromQueryOptions', () => { + expect( + queryParamsFromQueryOptions({ + search: 'something', + page: 5, + pageLimit: 10, + sort: 'one', + order: 'asc', + }) + ).toStrictEqual({ + search: 'something', + sort: 'one', + order: 'asc', + start: 50, + limit: 10, + }); + }); +}); + +describe('pagination.types', () => { + test('withPaginationQueryParams', () => { + expect(withPaginationQueryParams({ page: 5, pageLimit: 10 })).toStrictEqual( + { + start: 50, + limit: 10, + } + ); + }); + + test('withPaginationHeaders', () => { + expect( + withPaginationHeaders({ + data: [], + headers: { 'x-total-count': 10, 'x-total-available': 100 }, + }) + ).toStrictEqual({ + data: [], + totalCount: 10, + totalAvailable: 100, + }); + }); +}); + +describe('sort.types', () => { + test('makeIsSortType', () => { + const isSortType = makeIsSortTypeFunc(sortOptions); + expect(typeof isSortType).toBe('function'); + expect(isSortType('one')).toBe(true); + expect(isSortType('something_else')).toBe(false); + }); + + test('withSortQuery', () => { + expect( + withSortQuery({ id: 'one', desc: false }, sortOptions) + ).toStrictEqual({ sort: 'one', order: 'asc' }); + expect( + withSortQuery({ id: 'three', desc: true }, sortOptions) + ).toStrictEqual({ sort: 'three', order: 'desc' }); + expect( + withSortQuery({ id: 'something_else', desc: true }, sortOptions) + ).toStrictEqual({ sort: undefined, order: 'desc' }); + }); + + test('sortOptionsFromColumns', () => { + expect(sortOptions).toEqual(['one', 'two', 'three', 'four']); + }); +}); diff --git a/app/react/common/api/listQueryParams.ts b/app/react/common/api/listQueryParams.ts new file mode 100644 index 000000000..dc2101c5b --- /dev/null +++ b/app/react/common/api/listQueryParams.ts @@ -0,0 +1,65 @@ +import { BasicTableSettings } from '@@/datatables/types'; +import { TableState } from '@@/datatables/useTableState'; + +import { + PaginationQuery, + PaginationQueryParams, + withPaginationQueryParams, +} from './pagination.types'; +import { SearchQuery, SearchQueryParams } from './search.types'; +import { + SortOptions, + SortQuery, + SortQueryParams, + withSortQuery, +} from './sort.types'; + +export type BaseQueryOptions = SearchQuery & + SortQuery & + PaginationQuery; + +/** + * Utility function to transform a TableState (base form) to a query options object + * Used to unify backend pagination common cases + * + * @param tableState TableState {search, sortBy: {id:string, desc:bool }, page, pageSize} + * @param sortOptions SortOptions (generated from columns) + * @returns BaseQuery {search, sort, order, page, pageLimit} + */ +export function queryOptionsFromTableState( + tableState: TableState & { page: number }, + sortOptions: T +): BaseQueryOptions { + return { + // search/filter + search: tableState.search, + // sorting + ...withSortQuery(tableState.sortBy, sortOptions), + // pagination + page: tableState.page, + pageLimit: tableState.pageSize, + }; +} + +export type BaseQueryParams = SearchQueryParams & + SortQueryParams & + PaginationQueryParams; + +/** + * + * @param query BaseQueryOptions + * @returns BaseQueryParams {search, sort, order, start, limit} + */ +export function queryParamsFromQueryOptions( + query: BaseQueryOptions +): BaseQueryParams { + return { + // search/filter + search: query.search, + // sorting + sort: query.sort, + order: query.order, + // paginattion + ...withPaginationQueryParams(query), + }; +} diff --git a/app/react/common/api/pagination.types.ts b/app/react/common/api/pagination.types.ts new file mode 100644 index 000000000..390765251 --- /dev/null +++ b/app/react/common/api/pagination.types.ts @@ -0,0 +1,114 @@ +import { AxiosResponse } from 'axios'; + +/** + * Used to define axios query functions parameters for queries that support backend pagination + * + * **Example** + * + * ```ts + * type QueryParams = PaginationQueryParams; + * + * async function getSomething({ start, limit }: QueryParams = {}) { + * try { + * const { data } = await axios.get( + * buildUrl(), + * { params: { start, limit } }, + * ); + * return data; + * } catch (err) { + * throw parseAxiosError(err as Error, 'Unable to retrieve something'); + * } + * } + *``` + */ +export type PaginationQueryParams = { + start?: number; + limit?: number; +}; + +/** + * Used to define react-query query functions parameters for queries that support backend pagination + * + * Example: + * + * ```ts + * type Query = PaginationQuery; + * + * function useSomething({ + * page = 0, + * pageLimit = 10, + * ...query + * }: Query = {}) { + * return useQuery( + * [ ...queryKeys.base(), { page, pageLimit, ...query } ], + * async () => { + * const start = (page - 1) * pageLimit + 1; + * return getSomething({ start, limit: pageLimit, ...query }); + * }, + * { + * ...withError('Failure retrieving something'), + * } + * ); + * } + * ``` + */ +export type PaginationQuery = { + page?: number; + pageLimit?: number; +}; + +/** + * Utility function to convert PaginationQuery to PaginationQueryParams + * + * **Example** + * + * ```ts + * function getSomething(params: PaginationQueryParams) {...} + * + * function useSomething(query: PaginationQuery) { + * return useQuery( + * [ ...queryKeys.base(), query ], + * async () => getSomething({ ...query, ...withPaginationQueryParams(query) }) + * ) + * } + * ``` + */ +export function withPaginationQueryParams({ + page = 0, + pageLimit = 10, +}: PaginationQuery): PaginationQueryParams { + const start = page * pageLimit; + return { + start, + limit: pageLimit, + }; +} + +export type PaginatedResults = { + data: T; + totalCount: number; + totalAvailable: number; +}; + +/** + * Utility function to extract total count from AxiosResponse headers + * + * @param param0 AxiosReponse-like object {data, headers} + * @returns PaginatedResults {data, totalCount, totalAvailable} + */ +export function withPaginationHeaders({ + data, + headers, +}: { + data: AxiosResponse['data']; + headers: AxiosResponse['headers']; +}): PaginatedResults { + const totalCount = parseInt(headers['x-total-count'], 10); + const totalAvailable = parseInt(headers['x-total-available'], 10); + + return { + data, + totalCount, + totalAvailable, + }; +} diff --git a/app/react/common/api/search.types.ts b/app/react/common/api/search.types.ts new file mode 100644 index 000000000..e146bd8ba --- /dev/null +++ b/app/react/common/api/search.types.ts @@ -0,0 +1,47 @@ +/** + * Used to define axios query functions parameters for queries that support backend filtering by search + * + * **Example** + * + * ```ts + * type QueryParams = SearchQueryParams; + * + * async function getSomething({ search }: QueryParams = {}) { + * try { + * const { data } = await axios.get( + * buildUrl(), + * { params: { search } }, + * ); + * return data; + * } catch (err) { + * throw parseAxiosError(err as Error, 'Unable to retrieve something'); + * } + * } + *``` + */ +export type SearchQueryParams = { + search?: string; +}; + +/** + * Used to define react-query query functions parameters for queries that support backend filtering by search + * + * Example: + * + * ```ts + * type Query = SearchQuery; + * + * function useSomething({ search, ...query }: Query = {}) { + * return useQuery( + * [ ...queryKeys.base(), { search, ...query } ], + * async () => getSomething({ search, ...query }), + * { + * ...withError('Failure retrieving something'), + * } + * ); + * } + * ``` + */ +export type SearchQuery = { + search?: string; +}; diff --git a/app/react/common/api/sort.types.ts b/app/react/common/api/sort.types.ts new file mode 100644 index 000000000..bccfff1c8 --- /dev/null +++ b/app/react/common/api/sort.types.ts @@ -0,0 +1,139 @@ +import { compact } from 'lodash'; + +import { SortableTableSettings } from '@@/datatables/types'; + +export type SortOptions = readonly string[]; +export type SortType = T[number]; + +/** + * Used to generate the validation function that allows to check if the sort key is supported or not + * + * **Example** + * + * ```ts + * export const sortOptions: SortOptions = ['Id', 'Name'] as const; + * export const isSortType = makeIsSortTypeFunc(sortOptions) + * ``` + * + * **Usage** + * + * ```ts + * // react-query hook definition + * export function useSomething({ sort, order }: SortQuery) { ... } + * + * // component using the react-query hook, validating the parameters used by the query + * function MyComponent() { + * const tableState = useTableState(settingsStore, tableKey); + * const { data } = useSomething( + * { + * sort: isSortType(tableState.sortBy.id) ? tableState.sortBy.id : undefined, + * order: tableState.sortBy.desc ? 'desc' : 'asc', + * }, + * ); + * ... + * } + * ``` + * + * @param sortOptions list of supported keys + * @returns validation function + */ +export function makeIsSortTypeFunc(sortOptions: T) { + return (value?: string): value is SortType => + sortOptions.includes(value as SortType); +} + +/** + * Used to define axios query functions parameters for queries that support backend sorting + * + * **Example** + * + * ```ts + * const sortOptions: SortOptions = ['Id', 'Name'] as const; // or generated with `sortOptionsFromColumns` + * type QueryParams = SortQueryParams; + * + * async function getSomething({ sort, order = 'asc' }: QueryParams = {}) { + * try { + * const { data } = await axios.get( + * buildUrl(), + * { params: { sort, order } }, + * ); + * return data; + * } catch (err) { + * throw parseAxiosError(err as Error, 'Unable to retrieve something'); + * } + * } + *``` + */ +export type SortQueryParams = { + sort?: SortType; + order?: 'asc' | 'desc'; +}; + +/** + * Used to define react-query query functions parameters for queries that support backend sorting + * + * Example: + * + * ```ts + * const sortOptions: SortOptions = ['Id', 'Name'] as const; + * type Query = SortQuery; + * + * function useSomething({ + * sort, + * order = 'asc', + * ...query + * }: Query = {}) { + * return useQuery( + * [ ...queryKeys.base(), { ...query, sort, order } ], + * async () => getSomething({ ...query, sort, order }), + * { + * ...withError('Failure retrieving something'), + * } + * ); + * } + * ``` + */ +export type SortQuery = { + sort?: SortType; + order?: 'asc' | 'desc'; +}; + +/** + * Utility function to convert react-table `sortBy` state to `SortQuery` query parameter + * + * @param sortBy tableState.sortBy + * @param sortOptions SortOptions - either defined manually, or generated with `sortOptionsFromColumns` + * @returns SortQuery - object usable by react-query functions that have params extending SortQuery + */ +export function withSortQuery( + sortBy: SortableTableSettings['sortBy'], + sortOptions: T +): SortQuery { + if (!sortBy) { + return { + sort: undefined, + order: 'asc', + }; + } + + const isSortType = makeIsSortTypeFunc(sortOptions); + return { + sort: isSortType(sortBy.id) ? sortBy.id : undefined, + order: sortBy.desc ? 'desc' : 'asc', + }; +} + +/** + * Utility function to generate SortOptions from columns definitions + * @param columns Column-like objects { id?:string; enableSorting?:boolean } to extract SortOptions from + * @returns SortOptions + */ +export function sortOptionsFromColumns( + columns: { id?: string; enableSorting?: boolean; accessorKey?: string }[] +): SortOptions { + return compact( + columns.map((c) => + c.enableSorting === false ? undefined : c.id ?? c.accessorKey + ) + ); +} diff --git a/app/react/common/date-utils.ts b/app/react/common/date-utils.ts new file mode 100644 index 000000000..608400ac8 --- /dev/null +++ b/app/react/common/date-utils.ts @@ -0,0 +1,16 @@ +/** + * Format a date to a human-readable string based on the user's locale. + */ +export function localizeDate(date: Date) { + return date + .toLocaleString(undefined, { + year: 'numeric', + month: 'short', + day: 'numeric', + hour: 'numeric', + minute: '2-digit', + hour12: true, + }) + .replace('am', 'AM') + .replace('pm', 'PM'); +} diff --git a/app/react/common/string-utils.ts b/app/react/common/string-utils.ts new file mode 100644 index 000000000..591d6de90 --- /dev/null +++ b/app/react/common/string-utils.ts @@ -0,0 +1,27 @@ +export function capitalize(s: string) { + return s.slice(0, 1).toUpperCase() + s.slice(1); +} + +export function pluralize(val: number, word: string, plural = `${word}s`) { + return [1, -1].includes(Number(val)) ? word : plural; +} + +export function addPlural(value: number, word: string, plural = `${word}s`) { + return `${value} ${pluralize(value, word, plural)}`; +} + +/** + * Joins an array of strings into a grammatically correct sentence. + */ +export function grammaticallyJoin( + values: string[], + separator = ', ', + lastSeparator = ' and ' +) { + if (values.length === 0) return ''; + if (values.length === 1) return values[0]; + + const allButLast = values.slice(0, -1); + const last = values[values.length - 1]; + return `${allButLast.join(separator)}${lastSeparator}${last}`; +} diff --git a/app/react/common/utils/numbers.test.ts b/app/react/common/utils/numbers.test.ts new file mode 100644 index 000000000..0cd6004a5 --- /dev/null +++ b/app/react/common/utils/numbers.test.ts @@ -0,0 +1,75 @@ +/* eslint-disable @typescript-eslint/no-loss-of-precision */ + +import { abbreviateNumber } from './numbers'; + +describe('abbreviateNumber', () => { + test('errors', () => { + expect(() => abbreviateNumber(Number.NaN)).toThrowError(); + expect(() => abbreviateNumber(1, -1)).toThrowError(); + expect(() => abbreviateNumber(1, 21)).toThrowError(); + }); + + test('zero', () => { + expect(abbreviateNumber(0)).toBe('0'); + expect(abbreviateNumber(-0)).toBe('0'); + }); + + test('decimals=0', () => { + const cases: [number, string][] = [ + [123, '123'], + [123_123, '123k'], + [123_123_123, '123M'], + [123_123_123_123, '123G'], + [123_123_123_123_123, '123T'], + [123_123_123_123_123_123, '123P'], + [123_123_123_123_123_123_123, '123E'], + [123_123_123_123_123_123_123_123, '123Z'], + [123_123_123_123_123_123_123_123_123, '123Y'], + [123_123_123_123_123_123_123_123_123_123, '123123Y'], + ]; + cases.forEach(([num, str]) => { + expect(abbreviateNumber(num, 0)).toBe(str); + expect(abbreviateNumber(-num, 0)).toBe(`-${str}`); + }); + }); + + test('decimals=1 (default)', () => { + const cases: [number, string][] = [ + [123, '123'], + [123_123, '123.1k'], + [123_123_123, '123.1M'], + [123_123_123_123, '123.1G'], + [123_123_123_123_123, '123.1T'], + [123_123_123_123_123_123, '123.1P'], + [123_123_123_123_123_123_123, '123.1E'], + [123_123_123_123_123_123_123_123, '123.1Z'], + [123_123_123_123_123_123_123_123_123, '123.1Y'], + [123_123_123_123_123_123_123_123_123_123, '123123.1Y'], + ]; + cases.forEach(([num, str]) => { + expect(abbreviateNumber(num)).toBe(str); + expect(abbreviateNumber(-num)).toBe(`-${str}`); + }); + }); + + test('decimals=10', () => { + const cases: [number, string][] = [ + [123, '123'], + [123_123, '123.123k'], + [123_123_123, '123.123123M'], + [123_123_123_123, '123.123123123G'], + [123_123_123_123_123, '123.1231231231T'], + [123_123_123_123_123_123, '123.1231231231P'], + [123_123_123_123_123_123_123, '123.1231231231E'], + [123_123_123_123_123_123_123_123, '123.1231231231Z'], + [123_123_123_123_123_123_123_123_123, '123.1231231231Y'], + [123_123_123_123_123_123_123_123_123_123, '123123.1231231231Y'], + ]; + cases.forEach(([num, str]) => { + expect(abbreviateNumber(num, 10)).toBe(str); + expect(abbreviateNumber(-num, 10)).toBe(`-${str}`); + }); + }); +}); + +/* eslint-enable @typescript-eslint/no-loss-of-precision */ diff --git a/app/react/common/utils/numbers.ts b/app/react/common/utils/numbers.ts new file mode 100644 index 000000000..617da47ae --- /dev/null +++ b/app/react/common/utils/numbers.ts @@ -0,0 +1,45 @@ +const suffixes = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']; + +/** + * Converts a number to a human-readable abbreviated format + * Uses base 10 and standard SI prefixes + * + * @param num - The number to abbreviate + * @param decimals - Number of decimal places (default: 1) + * @returns Abbreviated number as string (e.g., "90k", "123M") + */ +export function abbreviateNumber(num: number, decimals: number = 1): string { + if (Number.isNaN(num)) { + throw new Error('Invalid number'); + } + + if (decimals < 0 || decimals > 20) { + throw new Error('Invalid decimals. Must be in [0;20] range'); + } + + const isNegative = num < 0; + const absNum = Math.abs(num); + + if (absNum === 0) { + return '0'; + } + + let exponent = Math.floor(Math.log10(absNum) / 3); + + if (exponent > suffixes.length - 1) { + exponent = suffixes.length - 1; + } + + if (exponent < 0) { + exponent = 0; + } + + const value = absNum / 1000 ** exponent; + + const roundedValue = + exponent > 0 ? Number(value.toFixed(decimals)) : Math.floor(value); + + const finalValue = isNegative ? -roundedValue : roundedValue; + + return `${finalValue}${suffixes[exponent]}`; +} diff --git a/app/react/components/Alert/Alert.tsx b/app/react/components/Alert/Alert.tsx index 02c6e995e..a61e62c98 100644 --- a/app/react/components/Alert/Alert.tsx +++ b/app/react/components/Alert/Alert.tsx @@ -6,36 +6,36 @@ import { Icon } from '@@/Icon'; type AlertType = 'success' | 'error' | 'info' | 'warn'; -const alertSettings: Record< +export const alertSettings: Record< AlertType, { container: string; header: string; body: string; icon: ReactNode } > = { success: { container: - 'border-green-4 bg-green-2 th-dark:bg-green-3 th-dark:border-green-5', - header: 'text-green-8', - body: 'text-green-7', + 'border-green-4 bg-green-2 th-dark:bg-green-10 th-dark:border-green-8 th-highcontrast:bg-green-10 th-highcontrast:border-green-8', + header: 'text-green-8 th-dark:text-white th-highcontrast:text-white', + body: 'text-green-7 th-dark:text-white th-highcontrast:text-white', icon: CheckCircle, }, error: { container: - 'border-error-4 bg-error-2 th-dark:bg-error-3 th-dark:border-error-5', - header: 'text-error-8', - body: 'text-error-7', + 'border-error-4 bg-error-2 th-dark:bg-error-10 th-dark:border-error-8 th-highcontrast:bg-error-10 th-highcontrast:border-error-8', + header: 'text-error-8 th-dark:text-white th-highcontrast:text-white', + body: 'text-error-7 th-dark:text-white th-highcontrast:text-white', icon: XCircle, }, info: { container: - 'border-blue-4 bg-blue-2 th-dark:bg-blue-3 th-dark:border-blue-5', - header: 'text-blue-8', - body: 'text-blue-7', + 'border-blue-4 bg-blue-2 th-dark:bg-blue-10 th-dark:border-blue-8 th-highcontrast:bg-blue-10 th-highcontrast:border-blue-8', + header: 'text-blue-8 th-dark:text-white th-highcontrast:text-white', + body: 'text-blue-7 th-dark:text-white th-highcontrast:text-white', icon: AlertCircle, }, warn: { container: - 'border-warning-4 bg-warning-2 th-dark:bg-warning-3 th-dark:border-warning-5', - header: 'text-warning-8', - body: 'text-warning-7', + 'border-warning-4 bg-warning-2 th-dark:bg-warning-10 th-dark:border-warning-8 th-highcontrast:bg-warning-10 th-highcontrast:border-warning-8', + header: 'text-warning-8 th-dark:text-white th-highcontrast:text-white', + body: 'text-warning-7 th-dark:text-white th-highcontrast:text-white', icon: AlertTriangle, }, }; @@ -76,12 +76,18 @@ export function Alert({ ); } -function AlertContainer({ +export function AlertContainer({ className, children, }: PropsWithChildren<{ className?: string }>) { return ( -
+
{children}
); diff --git a/app/react/components/Badge/Badge.stories.tsx b/app/react/components/Badge/Badge.stories.tsx index 41f42cbc8..c7b958502 100644 --- a/app/react/components/Badge/Badge.stories.tsx +++ b/app/react/components/Badge/Badge.stories.tsx @@ -18,6 +18,7 @@ export default { 'dangerSecondary', 'warnSecondary', 'infoSecondary', + 'muted', ], }, }, @@ -35,6 +36,7 @@ function Template({ type = 'success' }: Props) { dangerSecondary: 'dangerSecondary badge', warnSecondary: 'warnSecondary badge', infoSecondary: 'infoSecondary badge', + muted: 'muted badge', }; return {message[type]}; } diff --git a/app/react/components/Badge/Badge.tsx b/app/react/components/Badge/Badge.tsx index e31e3b122..f0a4fc55c 100644 --- a/app/react/components/Badge/Badge.tsx +++ b/app/react/components/Badge/Badge.tsx @@ -1,6 +1,8 @@ import clsx from 'clsx'; import { PropsWithChildren } from 'react'; +import { AutomationTestingProps } from '@/types'; + export type BadgeType = | 'success' | 'danger' @@ -9,7 +11,8 @@ export type BadgeType = | 'successSecondary' | 'dangerSecondary' | 'warnSecondary' - | 'infoSecondary'; + | 'infoSecondary' + | 'muted'; // the classes are typed in full because tailwind doesn't render the interpolated classes const typeClasses: Record = { @@ -54,6 +57,11 @@ const typeClasses: Record = { 'th-dark:text-blue-3 th-dark:bg-blue-9', 'th-highcontrast:text-blue-3 th-highcontrast:bg-blue-9' ), + muted: clsx( + 'text-gray-9 bg-gray-3', + 'th-dark:text-gray-3 th-dark:bg-gray-9', + 'th-highcontrast:text-gray-3 th-highcontrast:bg-gray-9' + ), }; export interface Props { @@ -67,7 +75,8 @@ export function Badge({ type = 'info', className, children, -}: PropsWithChildren) { + 'data-cy': dataCy, +}: PropsWithChildren & Partial) { const baseClasses = 'inline-flex w-fit items-center !text-xs font-medium rounded-full px-2 py-0.5'; @@ -75,6 +84,7 @@ export function Badge({ {children} diff --git a/app/react/components/Badge/ExternalBadge.tsx b/app/react/components/Badge/ExternalBadge.tsx index 5cf85b844..e49afd07a 100644 --- a/app/react/components/Badge/ExternalBadge.tsx +++ b/app/react/components/Badge/ExternalBadge.tsx @@ -1,5 +1,9 @@ import { Badge } from '@@/Badge'; -export function ExternalBadge() { - return External; +export function ExternalBadge({ className }: { className?: string }) { + return ( + + External + + ); } diff --git a/app/react/components/Badge/SystemBadge.tsx b/app/react/components/Badge/SystemBadge.tsx index e09b944ff..204d01bbd 100644 --- a/app/react/components/Badge/SystemBadge.tsx +++ b/app/react/components/Badge/SystemBadge.tsx @@ -1,5 +1,9 @@ import { Badge } from '@@/Badge'; -export function SystemBadge() { - return System; +export function SystemBadge({ className }: { className?: string }) { + return ( + + System + + ); } diff --git a/app/react/components/Blocklist/BlocklistItem.stories.tsx b/app/react/components/Blocklist/BlocklistItem.stories.tsx new file mode 100644 index 000000000..e38d1933e --- /dev/null +++ b/app/react/components/Blocklist/BlocklistItem.stories.tsx @@ -0,0 +1,75 @@ +import type { Meta, StoryObj } from '@storybook/react'; + +import { localizeDate } from '@/react/common/date-utils'; + +import { Badge } from '@@/Badge'; + +import { BlocklistItem } from './BlocklistItem'; + +const meta: Meta = { + title: 'Components/Blocklist/BlocklistItem', + component: BlocklistItem, + parameters: { + layout: 'centered', + }, + tags: ['autodocs'], + decorators: [ + (Story) => ( +
+ +
+ ), + ], +}; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + children: 'Default Blocklist Item', + }, +}; + +export const Selected: Story = { + args: { + children: 'Selected Blocklist Item', + isSelected: true, + }, +}; + +export const AsDiv: Story = { + args: { + children: 'Blocklist Item as div', + as: 'div', + }, +}; + +export const WithCustomContent: Story = { + args: { + children: ( +
+
+ Deployed + Revision #4 +
+
+ my-app-1.0.0 + + {localizeDate(new Date('2000-01-01'))} + +
+
+ ), + }, +}; + +export const MultipleItems: Story = { + render: () => ( +
+ First Item + Second Item (Selected) + Third Item +
+ ), +}; diff --git a/app/react/components/Card/Card.tsx b/app/react/components/Card/Card.tsx index 1839f649e..b5e8bf5f1 100644 --- a/app/react/components/Card/Card.tsx +++ b/app/react/components/Card/Card.tsx @@ -10,7 +10,7 @@ export function Card({ className, children }: PropsWithChildren) {
{children} diff --git a/app/react/components/CodeEditor.tsx b/app/react/components/CodeEditor.tsx deleted file mode 100644 index 5850fae87..000000000 --- a/app/react/components/CodeEditor.tsx +++ /dev/null @@ -1,155 +0,0 @@ -import CodeMirror from '@uiw/react-codemirror'; -import { StreamLanguage, LanguageSupport } from '@codemirror/language'; -import { yaml } from '@codemirror/legacy-modes/mode/yaml'; -import { dockerFile } from '@codemirror/legacy-modes/mode/dockerfile'; -import { shell } from '@codemirror/legacy-modes/mode/shell'; -import { useCallback, useMemo, useState } from 'react'; -import { createTheme } from '@uiw/codemirror-themes'; -import { tags as highlightTags } from '@lezer/highlight'; - -import { AutomationTestingProps } from '@/types'; - -import { CopyButton } from '@@/buttons/CopyButton'; - -import { useDebounce } from '../hooks/useDebounce'; - -import styles from './CodeEditor.module.css'; -import { TextTip } from './Tip/TextTip'; -import { StackVersionSelector } from './StackVersionSelector'; - -type Type = 'yaml' | 'shell' | 'dockerfile'; -interface Props extends AutomationTestingProps { - id: string; - placeholder?: string; - type?: Type; - readonly?: boolean; - onChange: (value: string) => void; - value: string; - height?: string; - versions?: number[]; - onVersionChange?: (version: number) => void; -} - -const theme = createTheme({ - theme: 'light', - settings: { - background: 'var(--bg-codemirror-color)', - foreground: 'var(--text-codemirror-color)', - caret: 'var(--border-codemirror-cursor-color)', - selection: 'var(--bg-codemirror-selected-color)', - selectionMatch: 'var(--bg-codemirror-selected-color)', - gutterBackground: 'var(--bg-codemirror-gutters-color)', - }, - styles: [ - { tag: highlightTags.atom, color: 'var(--text-cm-default-color)' }, - { tag: highlightTags.meta, color: 'var(--text-cm-meta-color)' }, - { - tag: [highlightTags.string, highlightTags.special(highlightTags.brace)], - color: 'var(--text-cm-string-color)', - }, - { tag: highlightTags.number, color: 'var(--text-cm-number-color)' }, - { tag: highlightTags.keyword, color: 'var(--text-cm-keyword-color)' }, - { tag: highlightTags.comment, color: 'var(--text-cm-comment-color)' }, - { - tag: highlightTags.variableName, - color: 'var(--text-cm-variable-name-color)', - }, - ], -}); - -const yamlLanguage = new LanguageSupport(StreamLanguage.define(yaml)); -const dockerFileLanguage = new LanguageSupport( - StreamLanguage.define(dockerFile) -); -const shellLanguage = new LanguageSupport(StreamLanguage.define(shell)); - -const docTypeExtensionMap: Record = { - yaml: yamlLanguage, - dockerfile: dockerFileLanguage, - shell: shellLanguage, -}; - -export function CodeEditor({ - id, - onChange, - placeholder, - readonly, - value, - versions, - onVersionChange, - height = '500px', - type, - 'data-cy': dataCy, -}: Props) { - const [isRollback, setIsRollback] = useState(false); - - const extensions = useMemo(() => { - const extensions = []; - if (type && docTypeExtensionMap[type]) { - extensions.push(docTypeExtensionMap[type]); - } - return extensions; - }, [type]); - - const handleVersionChange = useCallback( - (version: number) => { - if (versions && versions.length > 1) { - setIsRollback(version < versions[0]); - } - onVersionChange?.(version); - }, - [onVersionChange, versions] - ); - - const [debouncedValue, debouncedOnChange] = useDebounce(value, onChange); - - return ( - <> -
-
-
- {!!placeholder && {placeholder}} -
- -
- - Copy to clipboard - -
-
- {versions && ( -
-
- -
-
- )} -
- - - ); -} diff --git a/app/react/components/CodeEditor.module.css b/app/react/components/CodeEditor/CodeEditor.module.css similarity index 61% rename from app/react/components/CodeEditor.module.css rename to app/react/components/CodeEditor/CodeEditor.module.css index d7ff66ce2..6a02d3ed5 100644 --- a/app/react/components/CodeEditor.module.css +++ b/app/react/components/CodeEditor/CodeEditor.module.css @@ -11,6 +11,8 @@ --bg-codemirror-gutters-color: var(--grey-17); --bg-codemirror-selected-color: var(--grey-22); --border-codemirror-cursor-color: var(--black-color); + --bg-tooltip-color: var(--white-color); + --text-tooltip-color: var(--black-color); } :global([theme='dark']) .root { @@ -24,6 +26,8 @@ --bg-codemirror-gutters-color: var(--grey-3); --bg-codemirror-selected-color: var(--grey-3); --border-codemirror-cursor-color: var(--white-color); + --bg-tooltip-color: var(--grey-3); + --text-tooltip-color: var(--white-color); } :global([theme='highcontrast']) .root { @@ -37,21 +41,39 @@ --bg-codemirror-gutters-color: var(--ui-gray-warm-11); --bg-codemirror-selected-color: var(--grey-3); --border-codemirror-cursor-color: var(--white-color); + --bg-tooltip-color: var(--black-color); + --text-tooltip-color: var(--white-color); } .root :global(.cm-editor .cm-gutters) { border-right: 0px; + @apply bg-gray-2 th-dark:bg-gray-10 th-highcontrast:bg-black; +} + +.root :global(.cm-merge-b) { + @apply border-0 border-l border-solid border-l-gray-5 th-dark:border-l-gray-7 th-highcontrast:border-l-gray-2; } .root :global(.cm-editor .cm-gutters .cm-lineNumbers .cm-gutterElement) { text-align: left; } -.root :global(.cm-editor), -.root :global(.cm-editor .cm-scroller) { +.codeEditor :global(.cm-editor), +.codeEditor :global(.cm-editor .cm-scroller) { border-radius: 8px; } +/* code mirror merge side-by-side editor */ +.root :global(.cm-merge-a), +.root :global(.cm-merge-a .cm-scroller) { + @apply !rounded-r-none; +} + +.root :global(.cm-merge-b), +.root :global(.cm-merge-b .cm-scroller) { + @apply !rounded-l-none; +} + /* Search Panel */ /* Ideally we would use a react component for that, but this is the easy solution for onw */ @@ -119,9 +141,11 @@ } .root :global(.cm-content[aria-readonly='true']) { - @apply bg-gray-3; - @apply th-dark:bg-gray-iron-10; - @apply th-highcontrast:bg-black; + /* make sure the bg has transparency, so that the selected text is visible */ + /* https://discuss.codemirror.net/t/how-do-i-get-selected-text-to-highlight/7115/2 */ + @apply bg-gray-3/50; + @apply th-dark:bg-gray-iron-10/50; + @apply th-highcontrast:bg-black/50; } .root :global(.cm-textfield) { @@ -138,3 +162,47 @@ .root :global(.cm-panel.cm-search label) { @apply text-xs; } + +/* Tooltip styles for all themes */ +.root :global(.cm-tooltip) { + @apply bg-white border border-solid border-gray-5 shadow-md text-xs rounded h-min; + @apply th-dark:bg-gray-9 th-dark:border-gray-7 th-dark:text-white; + @apply th-highcontrast:bg-black th-highcontrast:border-gray-7 th-highcontrast:text-white; +} + +/* Hide the completionInfo tooltip when it's empty */ +/* note: I only chose the complicated selector because the simple selector `.cm-tooltip.cm-completionInfo:empty` didn't work */ +.root :global(.cm-tooltip.cm-completionInfo:not(:has(*:not(:empty)))) { + display: none; +} + +/* Active line gutter styles for all themes */ +.root :global(.cm-activeLineGutter) { + @apply bg-inherit; +} + +/* Collapsed lines gutter styles for all themes */ +.root :global(.cm-editor .cm-collapsedLines) { + /* inherit bg, instead of using styles from library */ + background: inherit; + @apply bg-blue-2 th-dark:bg-blue-10 th-highcontrast:bg-white th-dark:text-white th-highcontrast:text-black; +} +.root :global(.cm-editor .cm-collapsedLines):hover { + @apply bg-blue-3 th-dark:bg-blue-9 th-highcontrast:bg-white th-dark:text-white th-highcontrast:text-black; +} + +.root :global(.cm-editor .cm-collapsedLines:before) { + content: '↧ Expand all'; + background: var(--bg-tooltip-color); + color: var(--text-tooltip-color); + padding: 4px 8px; + border-radius: 4px; + font-size: 12px; + white-space: nowrap; + z-index: 1000; + margin-left: 4px; +} +/* override the default content */ +.root :global(.cm-editor .cm-collapsedLines:after) { + content: ''; +} diff --git a/app/react/components/CodeEditor/CodeEditor.test.tsx b/app/react/components/CodeEditor/CodeEditor.test.tsx new file mode 100644 index 000000000..a269cd192 --- /dev/null +++ b/app/react/components/CodeEditor/CodeEditor.test.tsx @@ -0,0 +1,138 @@ +import { render, screen } from '@testing-library/react'; +import userEvent from '@testing-library/user-event'; +import { Extension } from '@codemirror/state'; + +import { CodeEditor } from './CodeEditor'; + +const mockExtension: Extension = { extension: [] }; +vi.mock('yaml-schema', () => ({ + // yamlSchema has 5 return values (all extensions) + yamlSchema: () => [ + mockExtension, + mockExtension, + mockExtension, + mockExtension, + mockExtension, + ], + yamlCompletion: () => () => ({}), +})); + +const defaultProps = { + id: 'test-editor', + onChange: vi.fn(), + value: '', + 'data-cy': 'test-editor', +}; + +beforeEach(() => { + vi.clearAllMocks(); +}); + +test('should render with basic props', () => { + render(); + expect(screen.getByRole('textbox')).toBeInTheDocument(); +}); + +test('should display placeholder when provided', async () => { + const placeholder = 'Enter your code here'; + const { findByText } = render( + + ); + + const placeholderText = await findByText(placeholder); + expect(placeholderText).toBeVisible(); +}); + +test('should show copy button and copy content', async () => { + const testValue = 'test content'; + const { findByText } = render( + + ); + + const mockClipboard = { + writeText: vi.fn(), + }; + Object.assign(navigator, { + clipboard: mockClipboard, + }); + + const copyButton = await findByText('Copy'); + expect(copyButton).toBeVisible(); + + await userEvent.click(copyButton); + expect(navigator.clipboard.writeText).toHaveBeenCalledWith(testValue); +}); + +test('should handle read-only mode', async () => { + const { findByRole } = render(); + const editor = await findByRole('textbox'); + // the editor should not editable + await userEvent.type(editor, 'test'); + expect(editor).not.toHaveValue('test'); +}); + +test('should show version selector when versions are provided', async () => { + const versions = [1, 2, 3]; + const onVersionChange = vi.fn(); + const { findByRole } = render( + + ); + + const selector = await findByRole('combobox'); + expect(selector).toBeVisible(); +}); + +test('should handle YAML indentation correctly', async () => { + const onChange = vi.fn(); + const yamlContent = 'services:'; + + const { findByRole } = render( + + ); + + const editor = await findByRole('textbox'); + await userEvent.type(editor, '{enter}'); + await userEvent.keyboard('database:'); + await userEvent.keyboard('{enter}'); + await userEvent.keyboard('image: nginx'); + await userEvent.keyboard('{enter}'); + await userEvent.keyboard('name: database'); + + // Wait for the debounced onChange to be called + setTimeout(() => { + expect(onChange).toHaveBeenCalledWith( + 'services:\n database:\n image: nginx\n name: database' + ); + // debounce timeout is 300ms, so 500ms is enough + }, 500); +}); + +test('should apply custom height', async () => { + const customHeight = '300px'; + const { findByRole } = render( + + ); + + const editor = await findByRole('textbox'); + expect(editor).toHaveStyle({ height: customHeight }); +}); + +test('should render with file name header when provided', async () => { + const fileName = 'example.yaml'; + const testValue = 'file content'; + const { findByText } = render( + + ); + + expect(await findByText(fileName)).toBeInTheDocument(); + expect(await findByText(testValue)).toBeInTheDocument(); +}); diff --git a/app/react/components/CodeEditor/CodeEditor.tsx b/app/react/components/CodeEditor/CodeEditor.tsx new file mode 100644 index 000000000..d841400f0 --- /dev/null +++ b/app/react/components/CodeEditor/CodeEditor.tsx @@ -0,0 +1,162 @@ +import CodeMirror from '@uiw/react-codemirror'; +import { useCallback, useState } from 'react'; +import { createTheme } from '@uiw/codemirror-themes'; +import { tags as highlightTags } from '@lezer/highlight'; +import type { JSONSchema7 } from 'json-schema'; +import clsx from 'clsx'; + +import { AutomationTestingProps } from '@/types'; + +import { CopyButton } from '@@/buttons/CopyButton'; + +import { useDebounce } from '../../hooks/useDebounce'; +import { TextTip } from '../Tip/TextTip'; +import { StackVersionSelector } from '../StackVersionSelector'; + +import styles from './CodeEditor.module.css'; +import { + useCodeEditorExtensions, + CodeEditorType, +} from './useCodeEditorExtensions'; +import { FileNameHeader, FileNameHeaderRow } from './FileNameHeader'; + +interface Props extends AutomationTestingProps { + id: string; + textTip?: string; + type?: CodeEditorType; + readonly?: boolean; + onChange?: (value: string) => void; + value: string; + height?: string; + versions?: number[]; + onVersionChange?: (version: number) => void; + schema?: JSONSchema7; + fileName?: string; + placeholder?: string; + showToolbar?: boolean; +} + +export const theme = createTheme({ + theme: 'light', + settings: { + background: 'var(--bg-codemirror-color)', + foreground: 'var(--text-codemirror-color)', + caret: 'var(--border-codemirror-cursor-color)', + selection: 'var(--bg-codemirror-selected-color)', + selectionMatch: 'var(--bg-codemirror-selected-color)', + }, + styles: [ + { tag: highlightTags.atom, color: 'var(--text-cm-default-color)' }, + { tag: highlightTags.meta, color: 'var(--text-cm-meta-color)' }, + { + tag: [highlightTags.string, highlightTags.special(highlightTags.brace)], + color: 'var(--text-cm-string-color)', + }, + { tag: highlightTags.number, color: 'var(--text-cm-number-color)' }, + { tag: highlightTags.keyword, color: 'var(--text-cm-keyword-color)' }, + { tag: highlightTags.comment, color: 'var(--text-cm-comment-color)' }, + { + tag: highlightTags.variableName, + color: 'var(--text-cm-variable-name-color)', + }, + ], +}); + +export function CodeEditor({ + id, + onChange = () => {}, + textTip, + readonly, + value, + versions, + onVersionChange, + height = '500px', + type, + schema, + 'data-cy': dataCy, + fileName, + placeholder, + showToolbar = true, +}: Props) { + const [isRollback, setIsRollback] = useState(false); + + const extensions = useCodeEditorExtensions(type, schema); + + const handleVersionChange = useCallback( + (version: number) => { + if (versions && versions.length > 1) { + setIsRollback(version < versions[0]); + } + onVersionChange?.(version); + }, + [onVersionChange, versions] + ); + + const [debouncedValue, debouncedOnChange] = useDebounce(value, onChange); + + return ( + <> + {showToolbar && ( +
+
+
+ {!!textTip && {textTip}} +
+ {/* the copy button is in the file name header, when fileName is provided */} + {!fileName && ( +
+ + Copy + +
+ )} +
+ {versions && ( +
+
+ +
+
+ )} +
+ )} +
+ {fileName && ( + + + + )} + +
+ + ); +} diff --git a/app/react/components/CodeEditor/DiffViewer.test.tsx b/app/react/components/CodeEditor/DiffViewer.test.tsx new file mode 100644 index 000000000..9e8b1b8ec --- /dev/null +++ b/app/react/components/CodeEditor/DiffViewer.test.tsx @@ -0,0 +1,101 @@ +import { render } from '@testing-library/react'; +import { vi, describe, it, expect, beforeEach } from 'vitest'; + +import { DiffViewer } from './DiffViewer'; + +// Mock CodeMirror +vi.mock('@uiw/react-codemirror', () => ({ + __esModule: true, + default: () =>
, + oneDarkHighlightStyle: {}, + keymap: { + of: () => ({}), + }, +})); + +// Mock react-codemirror-merge +vi.mock('react-codemirror-merge', () => { + function CodeMirrorMerge({ children }: { children: React.ReactNode }) { + return
{children}
; + } + function Original({ value }: { value: string }) { + return
{value}
; + } + function Modified({ value }: { value: string }) { + return
{value}
; + } + + CodeMirrorMerge.Original = Original; + CodeMirrorMerge.Modified = Modified; + + return { + __esModule: true, + default: CodeMirrorMerge, + CodeMirrorMerge, + }; +}); + +describe('DiffViewer', () => { + beforeEach(() => { + // Clear any mocks or state before each test + vi.clearAllMocks(); + }); + + it('should render with basic props', () => { + const { getByText } = render( + + ); + + // Check if the component renders with the expected content + expect(getByText('Original text')).toBeInTheDocument(); + expect(getByText('New text')).toBeInTheDocument(); + }); + + it('should render with file name headers when provided', () => { + const { getByText } = render( + + ); + + // Look for elements with the expected class structure + const headerOriginal = getByText('Original File'); + const headerModified = getByText('Modified File'); + expect(headerOriginal).toBeInTheDocument(); + expect(headerModified).toBeInTheDocument(); + }); + + it('should apply custom height when provided', () => { + const customHeight = '800px'; + const { container } = render( + + ); + + // Find the element with the style containing the height + const divWithStyle = container.querySelector('[style*="height"]'); + expect(divWithStyle).toBeInTheDocument(); + + // Check that the style contains the expected height + expect(divWithStyle?.getAttribute('style')).toContain( + `height: ${customHeight}` + ); + }); +}); diff --git a/app/react/components/CodeEditor/DiffViewer.tsx b/app/react/components/CodeEditor/DiffViewer.tsx new file mode 100644 index 000000000..9a66407af --- /dev/null +++ b/app/react/components/CodeEditor/DiffViewer.tsx @@ -0,0 +1,138 @@ +import CodeMirrorMerge from 'react-codemirror-merge'; +import clsx from 'clsx'; + +import { AutomationTestingProps } from '@/types'; + +import { FileNameHeader, FileNameHeaderRow } from './FileNameHeader'; +import styles from './CodeEditor.module.css'; +import { + CodeEditorType, + useCodeEditorExtensions, +} from './useCodeEditorExtensions'; +import { theme } from './CodeEditor'; + +const { Original } = CodeMirrorMerge; +const { Modified } = CodeMirrorMerge; + +type Props = { + originalCode: string; + newCode: string; + id: string; + type?: CodeEditorType; + placeholder?: string; + height?: string; + fileNames?: { + original: string; + modified: string; + }; + className?: string; +} & AutomationTestingProps; + +const defaultCollapseUnchanged = { + margin: 10, + minSize: 10, +}; + +export function DiffViewer({ + originalCode, + newCode, + id, + 'data-cy': dataCy, + type, + placeholder = 'No values found', + + height = '500px', + fileNames, + className, +}: Props) { + const extensions = useCodeEditorExtensions(type); + const hasFileNames = !!fileNames?.original && !!fileNames?.modified; + return ( +
+ {hasFileNames && ( + + )} + {/* additional div, so that the scroll gutter doesn't overlap with the rounded border, and always show scrollbar, so that the file name headers align */} +
+ .cm-scroller]:!min-h-[var(--editor-min-height)]' + )} + id={id} + data-cy={dataCy} + collapseUnchanged={defaultCollapseUnchanged} + > + + + +
+
+ ); +} + +function DiffFileNameHeaders({ + originalCopyText, + modifiedCopyText, + originalFileName, + modifiedFileName, +}: { + originalCopyText: string; + modifiedCopyText: string; + originalFileName: string; + modifiedFileName: string; +}) { + return ( + +
+ +
+
+
+ +
+ + ); +} diff --git a/app/react/components/CodeEditor/FileNameHeader.tsx b/app/react/components/CodeEditor/FileNameHeader.tsx new file mode 100644 index 000000000..eb3418f2a --- /dev/null +++ b/app/react/components/CodeEditor/FileNameHeader.tsx @@ -0,0 +1,71 @@ +import clsx from 'clsx'; + +import { AutomationTestingProps } from '@/types'; + +import { CopyButton } from '@@/buttons/CopyButton'; + +type FileNameHeaderProps = { + fileName: string; + copyText: string; + className?: string; + style?: React.CSSProperties; +} & AutomationTestingProps; + +/** + * FileNameHeaderRow: Outer container for file name headers (single or multiple columns). + * Use this to wrap one or more components (and optional dividers). + */ +export function FileNameHeaderRow({ + children, + className, + style, +}: { + children: React.ReactNode; + className?: string; + style?: React.CSSProperties; +}) { + return ( +
+ {children} +
+ ); +} + +/** + * FileNameHeader: Renders a file name with a copy button, styled for use above a code editor or diff viewer. + * Should be used inside FileNameHeaderRow. + */ +export function FileNameHeader({ + fileName, + copyText, + className = '', + style, + 'data-cy': dataCy, +}: FileNameHeaderProps) { + return ( +
+ {fileName} + + Copy + +
+ ); +} diff --git a/app/react/components/CodeEditor/ShortcutsTooltip.tsx b/app/react/components/CodeEditor/ShortcutsTooltip.tsx new file mode 100644 index 000000000..e04cda2c8 --- /dev/null +++ b/app/react/components/CodeEditor/ShortcutsTooltip.tsx @@ -0,0 +1,52 @@ +import { BROWSER_OS_PLATFORM } from '@/react/constants'; + +import { Tooltip } from '@@/Tip/Tooltip'; + +const otherEditorConfig = { + tooltip: ( + <> +
Ctrl+F - Start searching
+
Ctrl+G - Find next
+
Ctrl+Shift+G - Find previous
+
Ctrl+Shift+F - Replace
+
Ctrl+Shift+R - Replace all
+
Alt+G - Jump to line
+
Persistent search:
+
Enter - Find next
+
Shift+Enter - Find previous
+ + ), + searchCmdLabel: 'Ctrl+F for search', +} as const; + +export const editorConfig = { + mac: { + tooltip: ( + <> +
Cmd+F - Start searching
+
Cmd+G - Find next
+
Cmd+Shift+G - Find previous
+
Cmd+Option+F - Replace
+
Cmd+Option+R - Replace all
+
Option+G - Jump to line
+
Persistent search:
+
Enter - Find next
+
Shift+Enter - Find previous
+ + ), + searchCmdLabel: 'Cmd+F for search', + }, + + lin: otherEditorConfig, + win: otherEditorConfig, +} as const; + +export function ShortcutsTooltip() { + return ( +
+ {editorConfig[BROWSER_OS_PLATFORM].searchCmdLabel} + + +
+ ); +} diff --git a/app/react/components/CodeEditor/index.ts b/app/react/components/CodeEditor/index.ts new file mode 100644 index 000000000..aa4b20325 --- /dev/null +++ b/app/react/components/CodeEditor/index.ts @@ -0,0 +1 @@ +export * from './CodeEditor'; diff --git a/app/react/components/CodeEditor/useCodeEditorExtensions.ts b/app/react/components/CodeEditor/useCodeEditorExtensions.ts new file mode 100644 index 000000000..8050b59da --- /dev/null +++ b/app/react/components/CodeEditor/useCodeEditorExtensions.ts @@ -0,0 +1,91 @@ +import { useMemo } from 'react'; +import { + StreamLanguage, + LanguageSupport, + syntaxHighlighting, + indentService, +} from '@codemirror/language'; +import { dockerFile } from '@codemirror/legacy-modes/mode/dockerfile'; +import { shell } from '@codemirror/legacy-modes/mode/shell'; +import { + oneDarkHighlightStyle, + keymap, + Extension, +} from '@uiw/react-codemirror'; +import type { JSONSchema7 } from 'json-schema'; +import { lintKeymap, lintGutter } from '@codemirror/lint'; +import { defaultKeymap } from '@codemirror/commands'; +import { autocompletion, completionKeymap } from '@codemirror/autocomplete'; +import { yamlCompletion, yamlSchema } from 'yaml-schema'; +import { compact } from 'lodash'; +import { lineNumbers } from '@codemirror/view'; + +export type CodeEditorType = 'yaml' | 'shell' | 'dockerfile'; + +// Custom indentation service for YAML +const yamlIndentExtension = indentService.of((context, pos) => { + const prevLine = context.lineAt(pos, -1); + const prevIndent = /^\s*/.exec(prevLine.text)?.[0].length || 0; + if (/:\s*$/.test(prevLine.text)) { + return prevIndent + 2; + } + return prevIndent; +}); + +const dockerFileLanguage = new LanguageSupport( + StreamLanguage.define(dockerFile) +); +const shellLanguage = new LanguageSupport(StreamLanguage.define(shell)); + +function yamlLanguage(schema?: JSONSchema7) { + const [yaml, linter, , , stateExtensions] = yamlSchema(schema); + + return compact([ + yaml, + linter, + stateExtensions, + yamlIndentExtension, + syntaxHighlighting(oneDarkHighlightStyle), + // explicitly setting lineNumbers() as an extension ensures that the gutter order is the same between the diff viewer and the code editor + lineNumbers(), + !!schema && lintGutter(), + keymap.of([...defaultKeymap, ...completionKeymap, ...lintKeymap]), + // only show completions when a schema is provided + !!schema && + autocompletion({ + icons: false, + activateOnTypingDelay: 300, + selectOnOpen: true, + activateOnTyping: true, + override: [ + (ctx) => { + const getCompletions = yamlCompletion(); + const completions = getCompletions(ctx); + if (Array.isArray(completions)) { + return null; + } + completions.validFor = /^\w*$/; + return completions; + }, + ], + }), + ]); +} + +export function useCodeEditorExtensions( + type?: CodeEditorType, + schema?: JSONSchema7 +): Extension[] { + return useMemo(() => { + switch (type) { + case 'dockerfile': + return [dockerFileLanguage]; + case 'shell': + return [shellLanguage]; + case 'yaml': + return yamlLanguage(schema); + default: + return []; + } + }, [type, schema]); +} diff --git a/app/react/components/ExpandableMessageByLines.stories.tsx b/app/react/components/ExpandableMessageByLines.stories.tsx new file mode 100644 index 000000000..2a3c5a2a2 --- /dev/null +++ b/app/react/components/ExpandableMessageByLines.stories.tsx @@ -0,0 +1,115 @@ +import { Meta, StoryObj } from '@storybook/react'; + +import { ExpandableMessageByLines } from './ExpandableMessageByLines'; + +export default { + component: ExpandableMessageByLines, + title: 'Components/ExpandableMessageByLines', + argTypes: { + maxLines: { + control: { + type: 'select', + options: [2, 5, 10, 20, 50], + }, + description: 'Maximum number of lines to show before truncating', + }, + children: { + control: 'text', + description: 'The text content to display', + }, + }, +} as Meta; + +interface Args { + children: string; + maxLines?: 2 | 5 | 10 | 20 | 50; +} + +// Short text that won't be truncated +export const ShortText: StoryObj = { + args: { + children: 'This is a short message that should not be truncated.', + maxLines: 10, + }, +}; + +// Long text that will be truncated +export const LongText: StoryObj = { + args: { + children: `This is a very long message that should be truncated after the specified number of lines. +It contains multiple lines of text to demonstrate the expandable functionality. +The component will show a "Show more" button when the content exceeds the maxLines limit. +When clicked, it will expand to show the full content and change to "Show less". +This is useful for displaying long error messages, logs, or any text content that might be too long for the UI.`, + maxLines: 5, + }, +}; + +// Text with line breaks +export const TextWithLineBreaks: StoryObj = { + args: { + children: `Line 1: This is the first line +Line 2: This is the second line +Line 3: This is the third line +Line 4: This is the fourth line +Line 5: This is the fifth line +Line 6: This is the sixth line +Line 7: This is the seventh line +Line 8: This is the eighth line +Line 9: This is the ninth line +Line 10: This is the tenth line`, + maxLines: 5, + }, +}; + +// Very short maxLines +export const VeryShortMaxLines: StoryObj = { + args: { + children: `This text will be truncated after just 2 lines. +This is the second line. +This is the third line that should be hidden initially. +This is the fourth line that should also be hidden.`, + maxLines: 2, + }, +}; + +// Error message example +export const ErrorMessage: StoryObj = { + args: { + children: `Error: Failed to connect to the Docker daemon at unix:///var/run/docker.sock. +Is the docker daemon running? + +This error typically occurs when: +1. Docker daemon is not running +2. User doesn't have permission to access the Docker socket +3. Docker socket path is incorrect +4. Docker service has crashed + +To resolve this issue: +1. Start the Docker daemon: sudo systemctl start docker +2. Add user to docker group: sudo usermod -aG docker $USER +3. Verify Docker is running: docker ps +4. Check Docker socket permissions: ls -la /var/run/docker.sock`, + maxLines: 5, + }, +}; + +// Log output example +export const LogOutput: StoryObj = { + args: { + children: `2024-01-15T10:30:45.123Z INFO [ContainerService] Starting container nginx:latest +2024-01-15T10:30:45.234Z DEBUG [ContainerService] Container ID: abc123def456 +2024-01-15T10:30:45.345Z INFO [ContainerService] Container started successfully +2024-01-15T10:30:45.456Z DEBUG [NetworkService] Creating network bridge +2024-01-15T10:30:45.567Z INFO [NetworkService] Network created: portainer_network +2024-01-15T10:30:45.678Z DEBUG [VolumeService] Mounting volume /data +2024-01-15T10:30:45.789Z INFO [VolumeService] Volume mounted successfully +2024-01-15T10:30:45.890Z DEBUG [ContainerService] Setting up port mapping 80:80 +2024-01-15T10:30:45.901Z INFO [ContainerService] Port mapping configured +2024-01-15T10:30:45.912Z DEBUG [ContainerService] Setting environment variables +2024-01-15T10:30:45.923Z INFO [ContainerService] Environment variables set +2024-01-15T10:30:45.934Z DEBUG [ContainerService] Starting container process +2024-01-15T10:30:45.945Z INFO [ContainerService] Container process started`, + maxLines: 10, + }, +}; diff --git a/app/react/components/ExpandableMessageByLines.test.tsx b/app/react/components/ExpandableMessageByLines.test.tsx new file mode 100644 index 000000000..d403fcb9c --- /dev/null +++ b/app/react/components/ExpandableMessageByLines.test.tsx @@ -0,0 +1,137 @@ +import { render, screen } from '@testing-library/react'; +import userEvent from '@testing-library/user-event'; +import { vi } from 'vitest'; + +import { ExpandableMessageByLines } from '@@/ExpandableMessageByLines'; + +describe('ExpandableMessageByLines', () => { + // Mock scrollHeight and clientHeight for testing truncation + const mockScrollHeight = vi.fn(); + const mockClientHeight = vi.fn(); + + beforeEach(() => { + // Mock the properties on HTMLDivElement prototype + Object.defineProperty(HTMLDivElement.prototype, 'scrollHeight', { + get: mockScrollHeight, + configurable: true, + }); + + Object.defineProperty(HTMLDivElement.prototype, 'clientHeight', { + get: mockClientHeight, + configurable: true, + }); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + describe('Basic Rendering', () => { + it('should render text content', () => { + const text = 'This is test content'; + // Mock non-truncated content (scrollHeight === clientHeight) + mockScrollHeight.mockReturnValue(100); + mockClientHeight.mockReturnValue(100); + + render({text}); + + expect(screen.getByText(text)).toBeInTheDocument(); + }); + + it('should show expand button only when text is truncated', () => { + const text = 'This is test content that should be truncated'; + // Mock truncated content (scrollHeight > clientHeight) + mockScrollHeight.mockReturnValue(300); + mockClientHeight.mockReturnValue(200); + + render({text}); + + expect( + screen.getByRole('button', { name: 'Show more' }) + ).toBeInTheDocument(); + expect( + screen.getByTestId('expandable-message-lines-button') + ).toBeInTheDocument(); + }); + + it('should hide expand button when text is not truncated', () => { + const text = 'Short text'; + // Mock non-truncated content (scrollHeight === clientHeight) + mockScrollHeight.mockReturnValue(50); + mockClientHeight.mockReturnValue(50); + + render({text}); + + expect(screen.queryByRole('button')).not.toBeInTheDocument(); + expect( + screen.queryByTestId('expandable-message-lines-button') + ).not.toBeInTheDocument(); + }); + }); + + describe('Expand/Collapse Functionality', () => { + it('should toggle between Show more and Show less when button is clicked', async () => { + const user = userEvent.setup(); + const text = + 'This is a long text that should be truncated and show the expand button'; + + // Mock truncated content (scrollHeight > clientHeight) + mockScrollHeight.mockReturnValue(400); + mockClientHeight.mockReturnValue(200); + + render({text}); + + const button = screen.getByRole('button'); + + // Initially should show "Show more" + expect(screen.getByText('Show more')).toBeInTheDocument(); + + // Click to expand + await user.click(button); + expect(screen.getByText('Show less')).toBeInTheDocument(); + + // Click to collapse + await user.click(button); + expect(screen.getByText('Show more')).toBeInTheDocument(); + }); + }); + + describe('Text Content Handling', () => { + it('should not show button for single space strings when not truncated', () => { + // Mock non-truncated content (scrollHeight === clientHeight) + mockScrollHeight.mockReturnValue(20); + mockClientHeight.mockReturnValue(20); + + render( ); + + expect(screen.queryByRole('button')).not.toBeInTheDocument(); + }); + + it('should show button for single space strings when truncated', () => { + // Mock truncated content (scrollHeight > clientHeight) + mockScrollHeight.mockReturnValue(100); + mockClientHeight.mockReturnValue(50); + + render( ); + + expect(screen.getByRole('button')).toBeInTheDocument(); + }); + + it('should handle different maxLines values', () => { + const longText = + 'Line 1\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\nLine 9\nLine 10\nLine 11\nLine 12'; + // Mock truncated content for 5 lines (scrollHeight > clientHeight) + mockScrollHeight.mockReturnValue(240); + mockClientHeight.mockReturnValue(100); + + render( + + {longText} + + ); + + expect(screen.getByRole('button')).toBeInTheDocument(); + expect(screen.getByText('Show more')).toBeInTheDocument(); + }); + }); +}); diff --git a/app/react/components/ExpandableMessageByLines.tsx b/app/react/components/ExpandableMessageByLines.tsx new file mode 100644 index 000000000..36bf1035c --- /dev/null +++ b/app/react/components/ExpandableMessageByLines.tsx @@ -0,0 +1,76 @@ +import { useRef, useState, useEffect, useCallback } from 'react'; + +import { Button } from '@@/buttons'; + +// use enum so that the tailwind classes aren't interpolated +type MaxLines = 2 | 5 | 10 | 20 | 50; +const lineClampClasses: Record = { + 2: 'line-clamp-[2]', + 5: 'line-clamp-[5]', + 10: 'line-clamp-[10]', + 20: 'line-clamp-[20]', + 50: 'line-clamp-[50]', +}; + +interface LineBasedProps { + children: string; + maxLines?: MaxLines; +} + +export function ExpandableMessageByLines({ + children, + maxLines = 10, +}: LineBasedProps) { + const [isExpanded, setIsExpanded] = useState(false); + const [isTruncated, setIsTruncated] = useState(false); + const contentRef = useRef(null); + + const checkTruncation = useCallback(() => { + const el = contentRef.current; + if (el) { + setIsTruncated(el.scrollHeight > el.clientHeight); + } + }, []); + + useEffect(() => { + checkTruncation(); + + // Use requestAnimationFrame for better performance + let rafId: number; + function handleResize() { + if (rafId) cancelAnimationFrame(rafId); + rafId = requestAnimationFrame(checkTruncation); + } + + window.addEventListener('resize', handleResize); + + return () => { + window.removeEventListener('resize', handleResize); + if (rafId) cancelAnimationFrame(rafId); + }; + }, [children, maxLines, checkTruncation, isExpanded]); + + return ( +
+
+ {children} +
+ {(isTruncated || isExpanded) && ( + + )} +
+ ); +} diff --git a/app/react/components/ExternalLink.tsx b/app/react/components/ExternalLink.tsx new file mode 100644 index 000000000..1bd839cad --- /dev/null +++ b/app/react/components/ExternalLink.tsx @@ -0,0 +1,32 @@ +import { ArrowUpRight } from 'lucide-react'; +import { PropsWithChildren } from 'react'; +import clsx from 'clsx'; + +import { AutomationTestingProps } from '@/types'; + +interface Props { + to: string; + className?: string; + showIcon?: boolean; +} + +export function ExternalLink({ + to, + className, + children, + showIcon = true, + 'data-cy': dataCy, +}: PropsWithChildren) { + return ( + + {children} + {showIcon && } + + ); +} diff --git a/app/react/components/FallbackImage.tsx b/app/react/components/FallbackImage.tsx index ee6956f24..eaa4f1272 100644 --- a/app/react/components/FallbackImage.tsx +++ b/app/react/components/FallbackImage.tsx @@ -27,5 +27,5 @@ export function FallbackImage({ src, fallbackIcon, alt, className }: Props) { } // fallback icon if there is an error loading the image - return <>{fallbackIcon}; + return
{fallbackIcon}
; } diff --git a/app/react/components/InformationPanel.tsx b/app/react/components/InformationPanel.tsx index b5c9dafc9..f25afadba 100644 --- a/app/react/components/InformationPanel.tsx +++ b/app/react/components/InformationPanel.tsx @@ -19,7 +19,7 @@ export function InformationPanel({ children, }: PropsWithChildren) { return ( - +
{title && ( diff --git a/app/react/components/InlineLoader/InlineLoader.tsx b/app/react/components/InlineLoader/InlineLoader.tsx index b5d75eae6..d91354de7 100644 --- a/app/react/components/InlineLoader/InlineLoader.tsx +++ b/app/react/components/InlineLoader/InlineLoader.tsx @@ -13,21 +13,21 @@ export type Props = { }; const sizeStyles: Record = { - xs: 'text-xs', - sm: 'text-sm', - md: 'text-md', + xs: 'text-xs gap-1', + sm: 'text-sm gap-2', + md: 'text-md gap-2', }; export function InlineLoader({ children, className, size = 'sm' }: Props) { return (
- + {children}
); diff --git a/app/react/components/NavTabs/index.ts b/app/react/components/NavTabs/index.ts index 20f2d03a6..cacdb47c6 100644 --- a/app/react/components/NavTabs/index.ts +++ b/app/react/components/NavTabs/index.ts @@ -1 +1,2 @@ export { NavTabs } from './NavTabs'; +export type { Option } from './NavTabs'; diff --git a/app/react/components/RadioGroup/RadioGroup.tsx b/app/react/components/RadioGroup/RadioGroup.tsx index 4380d1d9c..5aacf03c0 100644 --- a/app/react/components/RadioGroup/RadioGroup.tsx +++ b/app/react/components/RadioGroup/RadioGroup.tsx @@ -1,10 +1,19 @@ -import { Option } from '@@/form-components/PortainerSelect'; +import { ReactNode } from 'react'; + +// allow custom labels +export interface RadioGroupOption { + value: TValue; + label: ReactNode; + disabled?: boolean; +} interface Props { - options: Array> | ReadonlyArray>; + options: Array> | ReadonlyArray>; selectedOption: T; name: string; onOptionChange: (value: T) => void; + groupClassName?: string; + itemClassName?: string; } export function RadioGroup({ @@ -12,13 +21,18 @@ export function RadioGroup({ selectedOption, name, onOptionChange, + groupClassName, + itemClassName, }: Props) { return ( -
+
{options.map((option) => ( diff --git a/app/react/components/Sheet.tsx b/app/react/components/Sheet.tsx new file mode 100644 index 000000000..3d5983877 --- /dev/null +++ b/app/react/components/Sheet.tsx @@ -0,0 +1,159 @@ +import { + ComponentPropsWithoutRef, + forwardRef, + ElementRef, + PropsWithChildren, +} from 'react'; +import * as SheetPrimitive from '@radix-ui/react-dialog'; +import { cva, type VariantProps } from 'class-variance-authority'; +import clsx from 'clsx'; +import { RefreshCw, X } from 'lucide-react'; + +import { Button } from './buttons'; + +// modified from shadcn sheet component +const Sheet = SheetPrimitive.Root; + +const SheetTrigger = SheetPrimitive.Trigger; + +const SheetClose = SheetPrimitive.Close; + +const SheetPortal = SheetPrimitive.Portal; + +const SheetDescription = SheetPrimitive.Description; + +type SheetTitleProps = { + title: string; + onReload?(): Promise | void; +}; + +// similar to the PageHeader component with simplified props and no breadcrumbs +function SheetHeader({ + onReload, + title, + children, +}: PropsWithChildren) { + return ( +
+
+
+ + {title} + + {onReload ? ( + + ) : null} +
+ {children} +
+
+ ); +} + +const SheetOverlay = forwardRef< + ElementRef, + ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)); +SheetOverlay.displayName = SheetPrimitive.Overlay.displayName; + +const sheetVariants = cva( + 'fixed gap-4 bg-widget-color p-5 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:duration-300 data-[state=open]:duration-500', + { + variants: { + side: { + top: 'inset-x-0 top-0 border-b data-[state=closed]:slide-out-to-top data-[state=open]:slide-in-from-top', + bottom: + 'inset-x-0 bottom-0 border-t data-[state=closed]:slide-out-to-bottom data-[state=open]:slide-in-from-bottom', + left: 'inset-y-0 left-0 h-full w-[70vw] lg:w-[50vw] border-r data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left max-w-2xl', + right: + 'inset-y-0 right-0 h-full w-[70vw] lg:w-[50vw] border-l data-[state=closed]:slide-out-to-right data-[state=open]:slide-in-from-right max-w-2xl', + }, + }, + defaultVariants: { + side: 'right', + }, + } +); + +interface SheetContentProps + extends ComponentPropsWithoutRef, + VariantProps { + showCloseButton?: boolean; +} + +const SheetContent = forwardRef< + ElementRef, + SheetContentProps +>( + ( + { + side = 'right', + className, + children, + title, + showCloseButton = true, + ...props + }, + ref + ) => ( + + + + {title ? : null} + {children} + {showCloseButton && ( + + + + )} + + + ) +); +SheetContent.displayName = SheetPrimitive.Content.displayName; + +export { + Sheet, + SheetPortal, + SheetOverlay, + SheetTrigger, + SheetClose, + SheetContent, + SheetDescription, + SheetHeader, +}; diff --git a/app/react/components/StatusBadge.tsx b/app/react/components/StatusBadge.tsx index 81b03f6ad..688fcd81f 100644 --- a/app/react/components/StatusBadge.tsx +++ b/app/react/components/StatusBadge.tsx @@ -3,6 +3,56 @@ import { AriaAttributes, PropsWithChildren } from 'react'; import { Icon, IconProps } from '@@/Icon'; +export type StatusBadgeType = + | 'success' + | 'danger' + | 'warning' + | 'info' + | 'successLite' + | 'dangerLite' + | 'warningLite' + | 'mutedLite' + | 'infoLite' + | 'default'; + +const typeClasses: Record = { + success: clsx( + 'text-white bg-success-7', + 'th-dark:text-white th-dark:bg-success-9' + ), + warning: clsx( + 'text-white bg-warning-7', + 'th-dark:text-white th-dark:bg-warning-9' + ), + danger: clsx( + 'text-white bg-error-7', + 'th-dark:text-white th-dark:bg-error-9' + ), + info: clsx('text-white bg-blue-7', 'th-dark:text-white th-dark:bg-blue-9'), + // the lite classes are a bit lighter in light mode and the same in dark mode + successLite: clsx( + 'text-success-9 bg-success-3', + 'th-dark:text-white th-dark:bg-success-9' + ), + warningLite: clsx( + 'text-warning-9 bg-warning-3', + 'th-dark:text-white th-dark:bg-warning-9' + ), + dangerLite: clsx( + 'text-error-9 bg-error-3', + 'th-dark:text-white th-dark:bg-error-9' + ), + mutedLite: clsx( + 'text-gray-9 bg-gray-3', + 'th-dark:text-white th-dark:bg-gray-9' + ), + infoLite: clsx( + 'text-blue-9 bg-blue-3', + 'th-dark:text-white th-dark:bg-blue-9' + ), + default: '', +}; + export function StatusBadge({ className, children, @@ -12,7 +62,7 @@ export function StatusBadge({ }: PropsWithChildren< { className?: string; - color?: 'success' | 'danger' | 'warning' | 'info' | 'default'; + color?: StatusBadgeType; icon?: IconProps['icon']; } & AriaAttributes >) { @@ -21,13 +71,8 @@ export function StatusBadge({ className={clsx( 'inline-flex items-center gap-1 rounded', 'w-fit px-1.5 py-0.5', - 'text-sm font-medium text-white', - { - 'bg-success-7 th-dark:bg-success-9': color === 'success', - 'bg-warning-7 th-dark:bg-warning-9': color === 'warning', - 'bg-error-7 th-dark:bg-error-9': color === 'danger', - 'bg-blue-9': color === 'info', - }, + 'text-sm font-medium', + typeClasses[color], className )} // eslint-disable-next-line react/jsx-props-no-spreading diff --git a/app/react/components/ViewLoading/ViewLoading.tsx b/app/react/components/ViewLoading/ViewLoading.tsx index 5ade961bb..139c67637 100644 --- a/app/react/components/ViewLoading/ViewLoading.tsx +++ b/app/react/components/ViewLoading/ViewLoading.tsx @@ -1,7 +1,4 @@ import clsx from 'clsx'; -import { Settings } from 'lucide-react'; - -import { Icon } from '@@/Icon'; import styles from './ViewLoading.module.css'; @@ -18,12 +15,7 @@ export function ViewLoading({ message }: Props) {
- {message && ( - - {message} - - - )} + {message && {message}}
); } diff --git a/app/react/components/WebEditorForm.tsx b/app/react/components/WebEditorForm.tsx index b9b95d4fd..84c37c0df 100644 --- a/app/react/components/WebEditorForm.tsx +++ b/app/react/components/WebEditorForm.tsx @@ -1,61 +1,21 @@ import { + ReactNode, ComponentProps, PropsWithChildren, - ReactNode, - useEffect, useMemo, + useEffect, } from 'react'; import { useTransitionHook } from '@uirouter/react'; - -import { BROWSER_OS_PLATFORM } from '@/react/constants'; +import { JSONSchema7 } from 'json-schema'; import { CodeEditor } from '@@/CodeEditor'; -import { Tooltip } from '@@/Tip/Tooltip'; import { FormSectionTitle } from './form-components/FormSectionTitle'; import { FormError } from './form-components/FormError'; import { confirm } from './modals/confirm'; import { ModalType } from './modals'; import { buildConfirmButton } from './modals/utils'; - -const otherEditorConfig = { - tooltip: ( - <> -
Ctrl+F - Start searching
-
Ctrl+G - Find next
-
Ctrl+Shift+G - Find previous
-
Ctrl+Shift+F - Replace
-
Ctrl+Shift+R - Replace all
-
Alt+G - Jump to line
-
Persistent search:
-
Enter - Find next
-
Shift+Enter - Find previous
- - ), - searchCmdLabel: 'Ctrl+F for search', -} as const; - -export const editorConfig = { - mac: { - tooltip: ( - <> -
Cmd+F - Start searching
-
Cmd+G - Find next
-
Cmd+Shift+G - Find previous
-
Cmd+Option+F - Replace
-
Cmd+Option+R - Replace all
-
Option+G - Jump to line
-
Persistent search:
-
Enter - Find next
-
Shift+Enter - Find previous
- - ), - searchCmdLabel: 'Cmd+F for search', - }, - - lin: otherEditorConfig, - win: otherEditorConfig, -} as const; +import { ShortcutsTooltip } from './CodeEditor/ShortcutsTooltip'; type CodeEditorProps = ComponentProps; @@ -63,24 +23,24 @@ interface Props extends CodeEditorProps { titleContent?: ReactNode; hideTitle?: boolean; error?: string; + schema?: JSONSchema7; } export function WebEditorForm({ id, - titleContent = '', + titleContent = 'Web editor', hideTitle, children, error, + schema, + textTip, ...props }: PropsWithChildren) { return (
{!hideTitle && ( - <> - - {titleContent ?? null} - + {titleContent ?? null} )} {children && (
@@ -94,6 +54,9 @@ export function WebEditorForm({
@@ -104,15 +67,11 @@ export function WebEditorForm({ ); } -function DefaultTitle({ id }: { id: string }) { +function DefaultTitle({ id, children }: { id: string; children?: ReactNode }) { return ( - Web editor -
- {editorConfig[BROWSER_OS_PLATFORM].searchCmdLabel} - - -
+ {children} +
); } diff --git a/app/react/components/Widget/WidgetIcon.tsx b/app/react/components/Widget/WidgetIcon.tsx new file mode 100644 index 000000000..0309d185e --- /dev/null +++ b/app/react/components/Widget/WidgetIcon.tsx @@ -0,0 +1,11 @@ +import { ReactNode } from 'react'; + +import { Icon } from '@@/Icon'; + +export function WidgetIcon({ icon }: { icon: ReactNode }) { + return ( +
+ +
+ ); +} diff --git a/app/react/components/Widget/WidgetTitle.tsx b/app/react/components/Widget/WidgetTitle.tsx index 2623c374c..3ed390e33 100644 --- a/app/react/components/Widget/WidgetTitle.tsx +++ b/app/react/components/Widget/WidgetTitle.tsx @@ -1,9 +1,7 @@ import clsx from 'clsx'; import { PropsWithChildren, ReactNode } from 'react'; -import { Icon } from '@/react/components/Icon'; - -import { useWidgetContext } from './Widget'; +import { WidgetIcon } from './WidgetIcon'; interface Props { title: ReactNode; @@ -17,16 +15,12 @@ export function WidgetTitle({ className, children, }: PropsWithChildren) { - useWidgetContext(); - return (
-
- -
-

{title}

+ +

{title}

{children}
diff --git a/app/react/components/buttons/CopyButton/CopyButton.stories.tsx b/app/react/components/buttons/CopyButton/CopyButton.stories.tsx index 42368f9b0..2a3df593b 100644 --- a/app/react/components/buttons/CopyButton/CopyButton.stories.tsx +++ b/app/react/components/buttons/CopyButton/CopyButton.stories.tsx @@ -26,13 +26,13 @@ function Template({ export const Primary: Story> = Template.bind({}); Primary.args = { - children: 'Copy to clipboard', + children: 'Copy', copyText: 'this will be copied to clipboard', }; export const NoCopyText: Story> = Template.bind({}); NoCopyText.args = { - children: 'Copy to clipboard without copied text', + children: 'Copy without copied text', copyText: 'clipboard override', displayText: '', }; diff --git a/app/react/components/datatables/Datatable.test.tsx b/app/react/components/datatables/Datatable.test.tsx index 25e1ed1eb..a40822848 100644 --- a/app/react/components/datatables/Datatable.test.tsx +++ b/app/react/components/datatables/Datatable.test.tsx @@ -1,4 +1,5 @@ import { render, screen, fireEvent } from '@testing-library/react'; +import userEvent from '@testing-library/user-event'; import { describe, it, expect } from 'vitest'; import { createColumnHelper, @@ -8,10 +9,9 @@ import { import { Datatable, defaultGlobalFilterFn, Props } from './Datatable'; import { - BasicTableSettings, createPersistedStore, refreshableSettings, - RefreshableTableSettings, + TableSettingsWithRefreshable, } from './types'; import { useTableState } from './useTableState'; @@ -29,13 +29,14 @@ const mockColumns = [ ]; // mock table settings / state -export interface TableSettings - extends BasicTableSettings, - RefreshableTableSettings {} function createStore(storageKey: string) { - return createPersistedStore(storageKey, 'name', (set) => ({ - ...refreshableSettings(set), - })); + return createPersistedStore( + storageKey, + 'name', + (set) => ({ + ...refreshableSettings(set), + }) + ); } const storageKey = 'test-table'; const settingsStore = createStore(storageKey); @@ -154,6 +155,84 @@ describe('Datatable', () => { ); expect(screen.getByText('No data available')).toBeInTheDocument(); + const selectAllCheckbox: HTMLInputElement = + screen.getByLabelText('Select all rows'); + expect(selectAllCheckbox.checked).toBe(false); + }); + + it('selects/deselects only page rows when select all is clicked', () => { + render( + + ); + + const selectAllCheckbox = screen.getByLabelText('Select all rows'); + fireEvent.click(selectAllCheckbox); + + // Check if all rows on the page are selected + expect(screen.getByText('2 items selected')).toBeInTheDocument(); + + // Deselect + fireEvent.click(selectAllCheckbox); + const checkboxes: HTMLInputElement[] = screen.queryAllByRole('checkbox'); + expect(checkboxes.filter((checkbox) => checkbox.checked).length).toBe(0); + }); + + it('selects/deselects all rows including other pages when select all is clicked with shift key', () => { + render( + + ); + + const selectAllCheckbox = screen.getByLabelText('Select all rows'); + fireEvent.click(selectAllCheckbox, { shiftKey: true }); + + // Check if all rows on the page are selected + expect(screen.getByText('3 items selected')).toBeInTheDocument(); + + // Deselect + fireEvent.click(selectAllCheckbox, { shiftKey: true }); + const checkboxes: HTMLInputElement[] = screen.queryAllByRole('checkbox'); + expect(checkboxes.filter((checkbox) => checkbox.checked).length).toBe(0); + }); + + it('shows indeterminate state and correct footer text when hidden rows are selected', async () => { + const user = userEvent.setup(); + render( + + ); + + // Select Jane + const checkboxes = screen.getAllByRole('checkbox'); + await user.click(checkboxes[2]); // Select the second row + + // Search for John (will hide selected Jane) + const searchInput = screen.getByPlaceholderText('Search...'); + await user.type(searchInput, 'John'); + + // Check if the footer text is correct + expect( + await screen.findByText('1 item selected (1 hidden by filters)') + ).toBeInTheDocument(); + + // Check if the checkbox is indeterminate + const selectAllCheckbox: HTMLInputElement = + screen.getByLabelText('Select all rows'); + expect(selectAllCheckbox.indeterminate).toBe(true); + expect(selectAllCheckbox.checked).toBe(false); }); }); diff --git a/app/react/components/datatables/Datatable.tsx b/app/react/components/datatables/Datatable.tsx index f845e221d..e4c80051c 100644 --- a/app/react/components/datatables/Datatable.tsx +++ b/app/react/components/datatables/Datatable.tsx @@ -58,7 +58,7 @@ export interface Props extends AutomationTestingProps { getRowId?(row: D): string; isRowSelectable?(row: Row): boolean; emptyContentLabel?: string; - title?: string; + title?: React.ReactNode; titleIcon?: IconProps['icon']; titleId?: string; initialTableState?: Partial; @@ -70,6 +70,10 @@ export interface Props extends AutomationTestingProps { getRowCanExpand?(row: Row): boolean; noWidget?: boolean; extendTableOptions?: (options: TableOptions) => TableOptions; + onSearchChange?: (search: string) => void; + includeSearch?: boolean; + ariaLabel?: string; + id?: string; } export function Datatable({ @@ -94,10 +98,14 @@ export function Datatable({ getRowCanExpand, 'data-cy': dataCy, onPageChange = () => {}, + onSearchChange = () => {}, page, totalCount = dataset.length, isServerSidePagination = false, extendTableOptions = (value) => value, + includeSearch, + ariaLabel, + id, }: Props & PaginationProps) { const pageCount = useMemo( () => Math.ceil(totalCount / settings.pageSize), @@ -152,7 +160,12 @@ export function Datatable({ getRowCanExpand, getColumnCanGlobalFilter, ...(isServerSidePagination - ? { manualPagination: true, pageCount } + ? { + pageCount, + manualPagination: true, + manualFiltering: true, + manualSorting: true, + } : { getSortedRowModel: getSortedRowModel(), }), @@ -171,9 +184,22 @@ export function Datatable({ const selectedRowModel = tableInstance.getSelectedRowModel(); const selectedItems = selectedRowModel.rows.map((row) => row.original); + const filteredItems = tableInstance + .getFilteredRowModel() + .rows.map((row) => row.original); + + const hiddenSelectedItems = useMemo( + () => _.difference(selectedItems, filteredItems), + [selectedItems, filteredItems] + ); + const { titleAriaLabel, contentAriaLabel } = getAriaLabels( + ariaLabel, + title, + titleId + ); return ( - + ({ renderTableActions={() => renderTableActions(selectedItems)} renderTableSettings={() => renderTableSettings(tableInstance)} data-cy={`${dataCy}-header`} + includeSearch={includeSearch} /> @@ -193,7 +220,7 @@ export function Datatable({ isLoading={isLoading} onSortChange={handleSortChange} data-cy={dataCy} - aria-label={`${title} table`} + aria-label={contentAriaLabel} /> ({ pageSize={tableState.pagination.pageSize} pageCount={tableInstance.getPageCount()} totalSelected={selectedItems.length} + totalHiddenSelected={hiddenSelectedItems.length} /> ); @@ -210,6 +238,7 @@ export function Datatable({ function handleSearchBarChange(search: string) { tableInstance.setGlobalFilter({ search }); settings.setSearch(search); + onSearchChange(search); } function handlePageChange(page: number) { @@ -227,6 +256,23 @@ export function Datatable({ } } +function getAriaLabels( + titleAriaLabel?: string, + title?: ReactNode, + titleId?: string +) { + if (titleAriaLabel) { + return { titleAriaLabel, contentAriaLabel: `${titleAriaLabel} table` }; + } + if (typeof title === 'string') { + return { titleAriaLabel: title, contentAriaLabel: `${title} table` }; + } + if (titleId) { + return { titleAriaLabel: titleId, contentAriaLabel: `${titleId} table` }; + } + return { titleAriaLabel: 'table', contentAriaLabel: 'table' }; +} + function defaultRenderRow( row: Row, highlightedItemId?: string diff --git a/app/react/components/datatables/DatatableFooter.tsx b/app/react/components/datatables/DatatableFooter.tsx index 61e7b4dbf..0907ae260 100644 --- a/app/react/components/datatables/DatatableFooter.tsx +++ b/app/react/components/datatables/DatatableFooter.tsx @@ -5,6 +5,7 @@ import { SelectedRowsCount } from './SelectedRowsCount'; interface Props { totalSelected: number; + totalHiddenSelected: number; pageSize: number; page: number; onPageChange(page: number): void; @@ -14,6 +15,7 @@ interface Props { export function DatatableFooter({ totalSelected, + totalHiddenSelected, pageSize, page, onPageChange, @@ -22,7 +24,7 @@ export function DatatableFooter({ }: Props) { return ( - +