1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-07-23 07:19:41 +02:00
portainer/api/bolt/migrator/migrate_dbversion31.go
zees-dev 2a60b8fcdf
feat(helm/templates): helm app templates EE-943 (#5449)
* feat(helm): add helm chart backport to ce EE-1409 (#5425)

* EE-1311 Helm Chart Backport from EE

* backport to ce

Co-authored-by: Matt Hook <hookenz@gmail.com>

* feat(helm): list and configure helm chart (#5431)

* backport and tidyup code

* --amend

* using rocket icon for charts

* helm chart bugfix - clear category button

* added matomo analytics for helm chart install

* fix web editor exit warning without changes

* editor modified exit bugfix

* fixed notifications typo

* updated helm template text

* helper text to convey slow helm templates load

Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* removing redundant time-consuming api call by using prop attribute

* feat(helm) helm chart backport from ee EE-1311 (#5436)

* Add missing defaultHelmRepoUrl and mock testing

* Backport EE-1477

* Backport updates to helm tests from EE

* add https by default changes and ssl to tls renaming from EE

* Port install integration test. Disabled by default to pass CI checks

* merged changes from EE for the integration test

* kube proxy whitelist updated to support internal helm install command

Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* Pull in all changes from tech review in EE-943

* added helm to sidebar after rebase, sync CE with EE

* bugfix: kubectl shell not opening - bearer token bug

* tidy go modules & remove yarn-error.log

* removed redundant handler (not used) - to match EE

* resolved merge conflicts, updated code

* feat(helm/views): helm release and application views EE-1236  (#5529)

* feat(helm): add helm chart backport to ce EE-1409 (#5425)

* EE-1311 Helm Chart Backport from EE

* backport to ce

Co-authored-by: Matt Hook <hookenz@gmail.com>

* Pull in all changes from tech review in EE-943

* added helm to sidebar after rebase, sync CE with EE

* removed redundant handler (not used) - to match EE

* feat(helm) display helm charts - backend EE-1236

* copy over components for new applications view EE-1236

* Add new applications datatable component

* Add more migrated files

* removed test not applicable to CE

* baclkported EE app data table code to CE

* removed redundant helm repo url

* resolved conflicts, updated code

* using endpoint middleware

* PR review fixes

* using constants, openapi updated

Co-authored-by: Richard Wei <54336863+WaysonWei@users.noreply.github.com>
Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* fixed test conflicts, go linted

* feat(helm/templates-add): helm templates add repo for user support EE-1278 (#5514)

* feat(helm): add helm chart backport to ce EE-1409 (#5425)

* EE-1311 Helm Chart Backport from EE

* backport to ce

Co-authored-by: Matt Hook <hookenz@gmail.com>

* feat(helm) helm chart backport from ee EE-1311 (#5436)

* Add missing defaultHelmRepoUrl and mock testing

* Backport EE-1477

* Backport updates to helm tests from EE

* add https by default changes and ssl to tls renaming from EE

* Port install integration test. Disabled by default to pass CI checks

* merged changes from EE for the integration test

* kube proxy whitelist updated to support internal helm install command

Co-authored-by: zees-dev <dev.786zshan@gmail.com>

* Pull in all changes from tech review in EE-943

* feat(helm): add helm chart backport to ce EE-1409 (#5425)

* EE-1311 Helm Chart Backport from EE

* backport to ce

Co-authored-by: Matt Hook <hookenz@gmail.com>

* Pull in all changes from tech review in EE-943

* added helm to sidebar after rebase, sync CE with EE

* backport EE-1278, squashed, diffed, updated

* helm install openapi spec update

* resolved conflicts, updated code

* - matching ee codebase at 0afe57034449ee0e9f333d92c252a13995a93019
- helm install using endpoint middleware
- remove trailing slash from added/persisted helm repo urls

* feat(helm) use libhelm url validator and improved path assembly EE-1554 (#5561)

* feat(helm/userrepos) fix getting global repo for ordinary users EE-1562 (#5567)

* feat(helm/userrepos) fix getting global repo for ordinary users EE-1562

* post review changes and further backported changes from EE

* resolved conflicts, updated code

* fixed helm_install handler unit test

* user cannot add existing repo if suffix is '/' (#5571)

* feat(helm/docs) fix broken swagger docs EE-1278 (#5572)

* Fix swagger docs

* minor correction

* fix(helm): migrating code from user handler to helm handler (#5573)

* - migrated user_helm_repos to helm endpoint handler
- migrated api operations from user factory/service to helm factory/service
- passing endpointId into helm service/factory as endpoint provider is deprecated

* upgrade libhelm to hide secrets

Co-authored-by: Matt Hook <hookenz@gmail.com>

* removed duplicate file - due to merge conflict

* dependency injection in helm factory

Co-authored-by: Richard Wei <54336863+WaysonWei@users.noreply.github.com>
Co-authored-by: Matt Hook <hookenz@gmail.com>

* kubernetes.templates -> kubernetes.templates.helm name conflict fix

* Validate the URL added as a public helm repo (#5579)

* fix(helm): helm app deletion fix EE-1581 (#5582)

* updated helm lib to show correct error on uninstall failure

* passing down helm app namespace on deletion

* fix(k8s): EE-1591 non-admin users cannot deploy charts containing secrets (#5590)

Co-authored-by: Simon Meng <simon.meng@portainer.io>

* fix(helm): helm epic bugfixes EE-1582 EE-1593 (#5585)

* - trim trailing slash and lowercase before persisting helm repo
- browser helm templates url /kubernetes/templates/templates -> /kubernetes/templates/helm
- fix publish url
- fix helm repo add refresh
- semi-fix k8s app expansion

* Tidy up swagger documentation related to helm. Make json consistent

* fixed helm release page for non-default namespaces

* k8s app view table expansion bugfix

* EE-1593: publish url load balancer fallback

Co-authored-by: Matt Hook <hookenz@gmail.com>

* k8s app list fix for charts with deployments containing multiple pods - which use the same label (#5599)

* fix(kubernetes): app list view fix for secrets with long keys or values EE-1600 (#5600)

* k8s app secrets key value text overflow ellipses

* wrapping key value pairs instead of ellipses

* fix(helm): helm apps bundling issue across different namespaces EE-1619 (#5602)

* helm apps bundling issue across different namespaces

* - code comments and indentation to ease reading
- moved namespace calc out of loop

* feat(helm/test) disable slow helm search test by default EE-1599 (#5598)

* skip helm_repo_search as it's an integration test

* switch to portainer built in integration test checker

* make module order match EE

* don't print test struct out when skipping integration test

Co-authored-by: Richard Wei <54336863+WaysonWei@users.noreply.github.com>
Co-authored-by: Matt Hook <hookenz@gmail.com>
Co-authored-by: cong meng <mcpacino@gmail.com>
Co-authored-by: Simon Meng <simon.meng@portainer.io>
2021-09-10 14:06:57 +12:00

239 lines
6.5 KiB
Go

package migrator
import (
"fmt"
portainer "github.com/portainer/portainer/api"
"github.com/portainer/portainer/api/bolt/errors"
"github.com/portainer/portainer/api/internal/endpointutils"
snapshotutils "github.com/portainer/portainer/api/internal/snapshot"
)
func (m *Migrator) migrateDBVersionToDB32() error {
err := m.updateRegistriesToDB32()
if err != nil {
return err
}
err = m.updateDockerhubToDB32()
if err != nil {
return err
}
if err := m.updateVolumeResourceControlToDB32(); err != nil {
return err
}
if err := m.kubeconfigExpiryToDB32(); err != nil {
return err
}
if err := m.helmRepositoryURLToDB32(); err != nil {
return err
}
return nil
}
func (m *Migrator) updateRegistriesToDB32() error {
registries, err := m.registryService.Registries()
if err != nil {
return err
}
endpoints, err := m.endpointService.Endpoints()
if err != nil {
return err
}
for _, registry := range registries {
registry.RegistryAccesses = portainer.RegistryAccesses{}
for _, endpoint := range endpoints {
filteredUserAccessPolicies := portainer.UserAccessPolicies{}
for userId, registryPolicy := range registry.UserAccessPolicies {
if _, found := endpoint.UserAccessPolicies[userId]; found {
filteredUserAccessPolicies[userId] = registryPolicy
}
}
filteredTeamAccessPolicies := portainer.TeamAccessPolicies{}
for teamId, registryPolicy := range registry.TeamAccessPolicies {
if _, found := endpoint.TeamAccessPolicies[teamId]; found {
filteredTeamAccessPolicies[teamId] = registryPolicy
}
}
registry.RegistryAccesses[endpoint.ID] = portainer.RegistryAccessPolicies{
UserAccessPolicies: filteredUserAccessPolicies,
TeamAccessPolicies: filteredTeamAccessPolicies,
Namespaces: []string{},
}
}
m.registryService.UpdateRegistry(registry.ID, &registry)
}
return nil
}
func (m *Migrator) updateDockerhubToDB32() error {
dockerhub, err := m.dockerhubService.DockerHub()
if err == errors.ErrObjectNotFound {
return nil
} else if err != nil {
return err
}
if !dockerhub.Authentication {
return nil
}
registry := &portainer.Registry{
Type: portainer.DockerHubRegistry,
Name: "Dockerhub (authenticated - migrated)",
URL: "docker.io",
Authentication: true,
Username: dockerhub.Username,
Password: dockerhub.Password,
RegistryAccesses: portainer.RegistryAccesses{},
}
endpoints, err := m.endpointService.Endpoints()
if err != nil {
return err
}
for _, endpoint := range endpoints {
if endpoint.Type != portainer.KubernetesLocalEnvironment &&
endpoint.Type != portainer.AgentOnKubernetesEnvironment &&
endpoint.Type != portainer.EdgeAgentOnKubernetesEnvironment {
userAccessPolicies := portainer.UserAccessPolicies{}
for userId := range endpoint.UserAccessPolicies {
if _, found := endpoint.UserAccessPolicies[userId]; found {
userAccessPolicies[userId] = portainer.AccessPolicy{
RoleID: 0,
}
}
}
teamAccessPolicies := portainer.TeamAccessPolicies{}
for teamId := range endpoint.TeamAccessPolicies {
if _, found := endpoint.TeamAccessPolicies[teamId]; found {
teamAccessPolicies[teamId] = portainer.AccessPolicy{
RoleID: 0,
}
}
}
registry.RegistryAccesses[endpoint.ID] = portainer.RegistryAccessPolicies{
UserAccessPolicies: userAccessPolicies,
TeamAccessPolicies: teamAccessPolicies,
Namespaces: []string{},
}
}
}
return m.registryService.CreateRegistry(registry)
}
func (m *Migrator) updateVolumeResourceControlToDB32() error {
endpoints, err := m.endpointService.Endpoints()
if err != nil {
return fmt.Errorf("failed fetching environments: %w", err)
}
resourceControls, err := m.resourceControlService.ResourceControls()
if err != nil {
return fmt.Errorf("failed fetching resource controls: %w", err)
}
toUpdate := map[portainer.ResourceControlID]string{}
volumeResourceControls := map[string]*portainer.ResourceControl{}
for i := range resourceControls {
resourceControl := resourceControls[i]
if resourceControl.Type == portainer.VolumeResourceControl {
volumeResourceControls[resourceControl.ResourceID] = &resourceControl
}
}
for _, endpoint := range endpoints {
if !endpointutils.IsDockerEndpoint(&endpoint) {
continue
}
totalSnapshots := len(endpoint.Snapshots)
if totalSnapshots == 0 {
continue
}
snapshot := endpoint.Snapshots[totalSnapshots-1]
endpointDockerID, err := snapshotutils.FetchDockerID(snapshot)
if err != nil {
return fmt.Errorf("failed fetching environment docker id: %w", err)
}
if volumesData, done := snapshot.SnapshotRaw.Volumes.(map[string]interface{}); done {
if volumesData["Volumes"] == nil {
continue
}
findResourcesToUpdateForDB32(endpointDockerID, volumesData, toUpdate, volumeResourceControls)
}
}
for _, resourceControl := range volumeResourceControls {
if newResourceID, ok := toUpdate[resourceControl.ID]; ok {
resourceControl.ResourceID = newResourceID
err := m.resourceControlService.UpdateResourceControl(resourceControl.ID, resourceControl)
if err != nil {
return fmt.Errorf("failed updating resource control %d: %w", resourceControl.ID, err)
}
} else {
err := m.resourceControlService.DeleteResourceControl(resourceControl.ID)
if err != nil {
return fmt.Errorf("failed deleting resource control %d: %w", resourceControl.ID, err)
}
}
}
return nil
}
func findResourcesToUpdateForDB32(dockerID string, volumesData map[string]interface{}, toUpdate map[portainer.ResourceControlID]string, volumeResourceControls map[string]*portainer.ResourceControl) {
volumes := volumesData["Volumes"].([]interface{})
for _, volumeMeta := range volumes {
volume := volumeMeta.(map[string]interface{})
volumeName := volume["Name"].(string)
oldResourceID := fmt.Sprintf("%s%s", volumeName, volume["CreatedAt"].(string))
resourceControl, ok := volumeResourceControls[oldResourceID]
if ok {
toUpdate[resourceControl.ID] = fmt.Sprintf("%s_%s", volumeName, dockerID)
}
}
}
func (m *Migrator) kubeconfigExpiryToDB32() error {
settings, err := m.settingsService.Settings()
if err != nil {
return err
}
settings.KubeconfigExpiry = portainer.DefaultKubeconfigExpiry
return m.settingsService.UpdateSettings(settings)
}
func (m *Migrator) helmRepositoryURLToDB32() error {
settings, err := m.settingsService.Settings()
if err != nil {
return err
}
settings.HelmRepositoryURL = portainer.DefaultHelmRepositoryURL
return m.settingsService.UpdateSettings(settings)
}