1
0
Fork 0
mirror of https://github.com/portainer/portainer.git synced 2025-08-04 05:15:25 +02:00

Merge branch 'release/1.20.0'

This commit is contained in:
Anthony Lapenna 2018-12-12 17:03:31 +13:00
commit dbda568481
389 changed files with 11012 additions and 1893 deletions

View file

@ -77,14 +77,14 @@ The subject contains succinct description of the change:
## Contribution process ## Contribution process
Our contribution process is described below. Some of the steps can be visualized inside Github via specific `contrib/` labels, such as `contrib/func-review-in-progress` or `contrib/tech-review-approved`. Our contribution process is described below. Some of the steps can be visualized inside Github via specific `status/` labels, such as `status/1-functional-review` or `status/2-technical-review`.
### Bug report ### Bug report
![portainer_bugreport_workflow](https://user-images.githubusercontent.com/5485061/43569306-5571b3a0-9637-11e8-8559-786cfc82a14f.png) ![portainer_bugreport_workflow](https://user-images.githubusercontent.com/5485061/45727219-50190a00-bbf5-11e8-9fe8-3a563bb8d5d7.png)
### Feature request ### Feature request
The feature request process is similar to the bug report process but has an extra functional validation before the technical validation. The feature request process is similar to the bug report process but has an extra functional validation before the technical validation as well as a documentation validation before the testing phase.
![portainer_featurerequest_workflow](https://user-images.githubusercontent.com/5485061/43569315-5d30a308-9637-11e8-8292-3c62b5612925.png) ![portainer_featurerequest_workflow](https://user-images.githubusercontent.com/5485061/45727229-5ad39f00-bbf5-11e8-9550-16ba66c50615.png)

View file

@ -6,7 +6,7 @@
[![Docker Pulls](https://img.shields.io/docker/pulls/portainer/portainer.svg)](https://hub.docker.com/r/portainer/portainer/) [![Docker Pulls](https://img.shields.io/docker/pulls/portainer/portainer.svg)](https://hub.docker.com/r/portainer/portainer/)
[![Microbadger](https://images.microbadger.com/badges/image/portainer/portainer.svg)](http://microbadger.com/images/portainer/portainer "Image size") [![Microbadger](https://images.microbadger.com/badges/image/portainer/portainer.svg)](http://microbadger.com/images/portainer/portainer "Image size")
[![Documentation Status](https://readthedocs.org/projects/portainer/badge/?version=stable)](http://portainer.readthedocs.io/en/stable/?badge=stable) [![Documentation Status](https://readthedocs.org/projects/portainer/badge/?version=stable)](http://portainer.readthedocs.io/en/stable/?badge=stable)
[![Build Status](https://semaphoreci.com/api/v1/portainer/portainer/branches/develop/badge.svg)](https://semaphoreci.com/portainer/portainer) [![Build Status](https://semaphoreci.com/api/v1/portainer/portainer-ci/branches/develop/badge.svg)](https://semaphoreci.com/portainer/portainer-ci)
[![Code Climate](https://codeclimate.com/github/portainer/portainer/badges/gpa.svg)](https://codeclimate.com/github/portainer/portainer) [![Code Climate](https://codeclimate.com/github/portainer/portainer/badges/gpa.svg)](https://codeclimate.com/github/portainer/portainer)
[![Slack](https://portainer.io/slack/badge.svg)](https://portainer.io/slack/) [![Slack](https://portainer.io/slack/badge.svg)](https://portainer.io/slack/)
[![Gitter](https://badges.gitter.im/portainer/Lobby.svg)](https://gitter.im/portainer/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Gitter](https://badges.gitter.im/portainer/Lobby.svg)](https://gitter.im/portainer/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)

View file

@ -7,13 +7,13 @@ import (
// TarFileInBuffer will create a tar archive containing a single file named via fileName and using the content // TarFileInBuffer will create a tar archive containing a single file named via fileName and using the content
// specified in fileContent. Returns the archive as a byte array. // specified in fileContent. Returns the archive as a byte array.
func TarFileInBuffer(fileContent []byte, fileName string) ([]byte, error) { func TarFileInBuffer(fileContent []byte, fileName string, mode int64) ([]byte, error) {
var buffer bytes.Buffer var buffer bytes.Buffer
tarWriter := tar.NewWriter(&buffer) tarWriter := tar.NewWriter(&buffer)
header := &tar.Header{ header := &tar.Header{
Name: fileName, Name: fileName,
Mode: 0600, Mode: mode,
Size: int64(len(fileContent)), Size: int64(len(fileContent)),
} }

48
api/archive/zip.go Normal file
View file

@ -0,0 +1,48 @@
package archive
import (
"archive/zip"
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
)
// UnzipArchive will unzip an archive from bytes into the dest destination folder on disk
func UnzipArchive(archiveData []byte, dest string) error {
zipReader, err := zip.NewReader(bytes.NewReader(archiveData), int64(len(archiveData)))
if err != nil {
return err
}
for _, zipFile := range zipReader.File {
f, err := zipFile.Open()
if err != nil {
return err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fpath := filepath.Join(dest, zipFile.Name)
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, zipFile.Mode())
if err != nil {
return err
}
_, err = io.Copy(outFile, bytes.NewReader(data))
if err != nil {
return err
}
outFile.Close()
}
return nil
}

View file

@ -10,9 +10,11 @@ import (
"github.com/portainer/portainer/bolt/dockerhub" "github.com/portainer/portainer/bolt/dockerhub"
"github.com/portainer/portainer/bolt/endpoint" "github.com/portainer/portainer/bolt/endpoint"
"github.com/portainer/portainer/bolt/endpointgroup" "github.com/portainer/portainer/bolt/endpointgroup"
"github.com/portainer/portainer/bolt/extension"
"github.com/portainer/portainer/bolt/migrator" "github.com/portainer/portainer/bolt/migrator"
"github.com/portainer/portainer/bolt/registry" "github.com/portainer/portainer/bolt/registry"
"github.com/portainer/portainer/bolt/resourcecontrol" "github.com/portainer/portainer/bolt/resourcecontrol"
"github.com/portainer/portainer/bolt/schedule"
"github.com/portainer/portainer/bolt/settings" "github.com/portainer/portainer/bolt/settings"
"github.com/portainer/portainer/bolt/stack" "github.com/portainer/portainer/bolt/stack"
"github.com/portainer/portainer/bolt/tag" "github.com/portainer/portainer/bolt/tag"
@ -38,6 +40,7 @@ type Store struct {
DockerHubService *dockerhub.Service DockerHubService *dockerhub.Service
EndpointGroupService *endpointgroup.Service EndpointGroupService *endpointgroup.Service
EndpointService *endpoint.Service EndpointService *endpoint.Service
ExtensionService *extension.Service
RegistryService *registry.Service RegistryService *registry.Service
ResourceControlService *resourcecontrol.Service ResourceControlService *resourcecontrol.Service
SettingsService *settings.Service SettingsService *settings.Service
@ -49,6 +52,7 @@ type Store struct {
UserService *user.Service UserService *user.Service
VersionService *version.Service VersionService *version.Service
WebhookService *webhook.Service WebhookService *webhook.Service
ScheduleService *schedule.Service
} }
// NewStore initializes a new Store and the associated services // NewStore initializes a new Store and the associated services
@ -138,6 +142,7 @@ func (store *Store) MigrateData() error {
ResourceControlService: store.ResourceControlService, ResourceControlService: store.ResourceControlService,
SettingsService: store.SettingsService, SettingsService: store.SettingsService,
StackService: store.StackService, StackService: store.StackService,
TemplateService: store.TemplateService,
UserService: store.UserService, UserService: store.UserService,
VersionService: store.VersionService, VersionService: store.VersionService,
FileService: store.fileService, FileService: store.fileService,
@ -174,6 +179,12 @@ func (store *Store) initServices() error {
} }
store.EndpointService = endpointService store.EndpointService = endpointService
extensionService, err := extension.NewService(store.db)
if err != nil {
return err
}
store.ExtensionService = extensionService
registryService, err := registry.NewService(store.db) registryService, err := registry.NewService(store.db)
if err != nil { if err != nil {
return err return err
@ -240,5 +251,11 @@ func (store *Store) initServices() error {
} }
store.WebhookService = webhookService store.WebhookService = webhookService
scheduleService, err := schedule.NewService(store.db)
if err != nil {
return err
}
store.ScheduleService = scheduleService
return nil return nil
} }

View file

@ -0,0 +1,86 @@
package extension
import (
"github.com/portainer/portainer"
"github.com/portainer/portainer/bolt/internal"
"github.com/boltdb/bolt"
)
const (
// BucketName represents the name of the bucket where this service stores data.
BucketName = "extension"
)
// Service represents a service for managing endpoint data.
type Service struct {
db *bolt.DB
}
// NewService creates a new instance of a service.
func NewService(db *bolt.DB) (*Service, error) {
err := internal.CreateBucket(db, BucketName)
if err != nil {
return nil, err
}
return &Service{
db: db,
}, nil
}
// Extension returns a extension by ID
func (service *Service) Extension(ID portainer.ExtensionID) (*portainer.Extension, error) {
var extension portainer.Extension
identifier := internal.Itob(int(ID))
err := internal.GetObject(service.db, BucketName, identifier, &extension)
if err != nil {
return nil, err
}
return &extension, nil
}
// Extensions return an array containing all the extensions.
func (service *Service) Extensions() ([]portainer.Extension, error) {
var extensions = make([]portainer.Extension, 0)
err := service.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BucketName))
cursor := bucket.Cursor()
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
var extension portainer.Extension
err := internal.UnmarshalObject(v, &extension)
if err != nil {
return err
}
extensions = append(extensions, extension)
}
return nil
})
return extensions, err
}
// Persist persists a extension inside the database.
func (service *Service) Persist(extension *portainer.Extension) error {
return service.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BucketName))
data, err := internal.MarshalObject(extension)
if err != nil {
return err
}
return bucket.Put(internal.Itob(int(extension.ID)), data)
})
}
// DeleteExtension deletes a Extension.
func (service *Service) DeleteExtension(ID portainer.ExtensionID) error {
identifier := internal.Itob(int(ID))
return internal.DeleteObject(service.db, BucketName, identifier)
}

View file

@ -0,0 +1,35 @@
package migrator
import (
"strings"
"github.com/portainer/portainer"
)
func (m *Migrator) updateSettingsToDBVersion15() error {
legacySettings, err := m.settingsService.Settings()
if err != nil {
return err
}
legacySettings.EnableHostManagementFeatures = false
return m.settingsService.UpdateSettings(legacySettings)
}
func (m *Migrator) updateTemplatesToVersion15() error {
legacyTemplates, err := m.templateService.Templates()
if err != nil {
return err
}
for _, template := range legacyTemplates {
template.Logo = strings.Replace(template.Logo, "https://portainer.io/images", portainer.AssetsServerURL, -1)
err = m.templateService.UpdateTemplate(template.ID, &template)
if err != nil {
return err
}
}
return nil
}

View file

@ -8,6 +8,7 @@ import (
"github.com/portainer/portainer/bolt/resourcecontrol" "github.com/portainer/portainer/bolt/resourcecontrol"
"github.com/portainer/portainer/bolt/settings" "github.com/portainer/portainer/bolt/settings"
"github.com/portainer/portainer/bolt/stack" "github.com/portainer/portainer/bolt/stack"
"github.com/portainer/portainer/bolt/template"
"github.com/portainer/portainer/bolt/user" "github.com/portainer/portainer/bolt/user"
"github.com/portainer/portainer/bolt/version" "github.com/portainer/portainer/bolt/version"
) )
@ -22,6 +23,7 @@ type (
resourceControlService *resourcecontrol.Service resourceControlService *resourcecontrol.Service
settingsService *settings.Service settingsService *settings.Service
stackService *stack.Service stackService *stack.Service
templateService *template.Service
userService *user.Service userService *user.Service
versionService *version.Service versionService *version.Service
fileService portainer.FileService fileService portainer.FileService
@ -36,6 +38,7 @@ type (
ResourceControlService *resourcecontrol.Service ResourceControlService *resourcecontrol.Service
SettingsService *settings.Service SettingsService *settings.Service
StackService *stack.Service StackService *stack.Service
TemplateService *template.Service
UserService *user.Service UserService *user.Service
VersionService *version.Service VersionService *version.Service
FileService portainer.FileService FileService portainer.FileService
@ -51,6 +54,7 @@ func NewMigrator(parameters *Parameters) *Migrator {
endpointService: parameters.EndpointService, endpointService: parameters.EndpointService,
resourceControlService: parameters.ResourceControlService, resourceControlService: parameters.ResourceControlService,
settingsService: parameters.SettingsService, settingsService: parameters.SettingsService,
templateService: parameters.TemplateService,
stackService: parameters.StackService, stackService: parameters.StackService,
userService: parameters.UserService, userService: parameters.UserService,
versionService: parameters.VersionService, versionService: parameters.VersionService,
@ -186,5 +190,18 @@ func (m *Migrator) Migrate() error {
} }
} }
// Portainer 1.20.0
if m.currentDBVersion < 15 {
err := m.updateSettingsToDBVersion15()
if err != nil {
return err
}
err = m.updateTemplatesToVersion15()
if err != nil {
return err
}
}
return m.versionService.StoreDBVersion(portainer.DBVersion) return m.versionService.StoreDBVersion(portainer.DBVersion)
} }

View file

@ -0,0 +1,129 @@
package schedule
import (
"github.com/portainer/portainer"
"github.com/portainer/portainer/bolt/internal"
"github.com/boltdb/bolt"
)
const (
// BucketName represents the name of the bucket where this service stores data.
BucketName = "schedules"
)
// Service represents a service for managing schedule data.
type Service struct {
db *bolt.DB
}
// NewService creates a new instance of a service.
func NewService(db *bolt.DB) (*Service, error) {
err := internal.CreateBucket(db, BucketName)
if err != nil {
return nil, err
}
return &Service{
db: db,
}, nil
}
// Schedule returns a schedule by ID.
func (service *Service) Schedule(ID portainer.ScheduleID) (*portainer.Schedule, error) {
var schedule portainer.Schedule
identifier := internal.Itob(int(ID))
err := internal.GetObject(service.db, BucketName, identifier, &schedule)
if err != nil {
return nil, err
}
return &schedule, nil
}
// UpdateSchedule updates a schedule.
func (service *Service) UpdateSchedule(ID portainer.ScheduleID, schedule *portainer.Schedule) error {
identifier := internal.Itob(int(ID))
return internal.UpdateObject(service.db, BucketName, identifier, schedule)
}
// DeleteSchedule deletes a schedule.
func (service *Service) DeleteSchedule(ID portainer.ScheduleID) error {
identifier := internal.Itob(int(ID))
return internal.DeleteObject(service.db, BucketName, identifier)
}
// Schedules return a array containing all the schedules.
func (service *Service) Schedules() ([]portainer.Schedule, error) {
var schedules = make([]portainer.Schedule, 0)
err := service.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BucketName))
cursor := bucket.Cursor()
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
var schedule portainer.Schedule
err := internal.UnmarshalObject(v, &schedule)
if err != nil {
return err
}
schedules = append(schedules, schedule)
}
return nil
})
return schedules, err
}
// SchedulesByJobType return a array containing all the schedules
// with the specified JobType.
func (service *Service) SchedulesByJobType(jobType portainer.JobType) ([]portainer.Schedule, error) {
var schedules = make([]portainer.Schedule, 0)
err := service.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BucketName))
cursor := bucket.Cursor()
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
var schedule portainer.Schedule
err := internal.UnmarshalObject(v, &schedule)
if err != nil {
return err
}
if schedule.JobType == jobType {
schedules = append(schedules, schedule)
}
}
return nil
})
return schedules, err
}
// CreateSchedule assign an ID to a new schedule and saves it.
func (service *Service) CreateSchedule(schedule *portainer.Schedule) error {
return service.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(BucketName))
// We manually manage sequences for schedules
err := bucket.SetSequence(uint64(schedule.ID))
if err != nil {
return err
}
data, err := internal.MarshalObject(schedule)
if err != nil {
return err
}
return bucket.Put(internal.Itob(int(schedule.ID)), data)
})
}
// GetNextIdentifier returns the next identifier for a schedule.
func (service *Service) GetNextIdentifier() int {
return internal.GetNextIdentifier(service.db, BucketName)
}

View file

@ -2,7 +2,9 @@ package main // import "github.com/portainer/portainer"
import ( import (
"encoding/json" "encoding/json"
"os"
"strings" "strings"
"time"
"github.com/portainer/portainer" "github.com/portainer/portainer"
"github.com/portainer/portainer/bolt" "github.com/portainer/portainer/bolt"
@ -87,7 +89,7 @@ func initJWTService(authenticationEnabled bool) portainer.JWTService {
} }
func initDigitalSignatureService() portainer.DigitalSignatureService { func initDigitalSignatureService() portainer.DigitalSignatureService {
return &crypto.ECDSAService{} return crypto.NewECDSAService(os.Getenv("AGENT_SECRET"))
} }
func initCryptoService() portainer.CryptoService { func initCryptoService() portainer.CryptoService {
@ -110,25 +112,110 @@ func initSnapshotter(clientFactory *docker.ClientFactory) portainer.Snapshotter
return docker.NewSnapshotter(clientFactory) return docker.NewSnapshotter(clientFactory)
} }
func initJobScheduler(endpointService portainer.EndpointService, snapshotter portainer.Snapshotter, flags *portainer.CLIFlags) (portainer.JobScheduler, error) { func initJobScheduler() portainer.JobScheduler {
jobScheduler := cron.NewJobScheduler(endpointService, snapshotter) return cron.NewJobScheduler()
}
func loadSnapshotSystemSchedule(jobScheduler portainer.JobScheduler, snapshotter portainer.Snapshotter, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, settingsService portainer.SettingsService) error {
settings, err := settingsService.Settings()
if err != nil {
return err
}
schedules, err := scheduleService.SchedulesByJobType(portainer.SnapshotJobType)
if err != nil {
return err
}
var snapshotSchedule *portainer.Schedule
if len(schedules) == 0 {
snapshotJob := &portainer.SnapshotJob{}
snapshotSchedule = &portainer.Schedule{
ID: portainer.ScheduleID(scheduleService.GetNextIdentifier()),
Name: "system_snapshot",
CronExpression: "@every " + settings.SnapshotInterval,
Recurring: true,
JobType: portainer.SnapshotJobType,
SnapshotJob: snapshotJob,
Created: time.Now().Unix(),
}
} else {
snapshotSchedule = &schedules[0]
}
snapshotJobContext := cron.NewSnapshotJobContext(endpointService, snapshotter)
snapshotJobRunner := cron.NewSnapshotJobRunner(snapshotSchedule, snapshotJobContext)
err = jobScheduler.ScheduleJob(snapshotJobRunner)
if err != nil {
return err
}
if len(schedules) == 0 {
return scheduleService.CreateSchedule(snapshotSchedule)
}
return nil
}
func loadEndpointSyncSystemSchedule(jobScheduler portainer.JobScheduler, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, flags *portainer.CLIFlags) error {
if *flags.ExternalEndpoints == "" {
return nil
}
if *flags.ExternalEndpoints != "" {
log.Println("Using external endpoint definition. Endpoint management via the API will be disabled.") log.Println("Using external endpoint definition. Endpoint management via the API will be disabled.")
err := jobScheduler.ScheduleEndpointSyncJob(*flags.ExternalEndpoints, *flags.SyncInterval)
schedules, err := scheduleService.SchedulesByJobType(portainer.EndpointSyncJobType)
if err != nil { if err != nil {
return nil, err return err
}
if len(schedules) != 0 {
return nil
}
endpointSyncJob := &portainer.EndpointSyncJob{}
endointSyncSchedule := &portainer.Schedule{
ID: portainer.ScheduleID(scheduleService.GetNextIdentifier()),
Name: "system_endpointsync",
CronExpression: "@every " + *flags.SyncInterval,
Recurring: true,
JobType: portainer.EndpointSyncJobType,
EndpointSyncJob: endpointSyncJob,
Created: time.Now().Unix(),
}
endpointSyncJobContext := cron.NewEndpointSyncJobContext(endpointService, *flags.ExternalEndpoints)
endpointSyncJobRunner := cron.NewEndpointSyncJobRunner(endointSyncSchedule, endpointSyncJobContext)
err = jobScheduler.ScheduleJob(endpointSyncJobRunner)
if err != nil {
return err
}
return scheduleService.CreateSchedule(endointSyncSchedule)
}
func loadSchedulesFromDatabase(jobScheduler portainer.JobScheduler, jobService portainer.JobService, scheduleService portainer.ScheduleService, endpointService portainer.EndpointService, fileService portainer.FileService) error {
schedules, err := scheduleService.Schedules()
if err != nil {
return err
}
for _, schedule := range schedules {
if schedule.JobType == portainer.ScriptExecutionJobType {
jobContext := cron.NewScriptExecutionJobContext(jobService, endpointService, fileService)
jobRunner := cron.NewScriptExecutionJobRunner(&schedule, jobContext)
err = jobScheduler.ScheduleJob(jobRunner)
if err != nil {
return err
}
} }
} }
if *flags.Snapshot { return nil
err := jobScheduler.ScheduleSnapshotJob(*flags.SnapshotInterval)
if err != nil {
return nil, err
}
}
return jobScheduler, nil
} }
func initStatus(endpointManagement, snapshot bool, flags *portainer.CLIFlags) *portainer.Status { func initStatus(endpointManagement, snapshot bool, flags *portainer.CLIFlags) *portainer.Status {
@ -175,6 +262,7 @@ func initSettings(settingsService portainer.SettingsService, flags *portainer.CL
}, },
AllowBindMountsForRegularUsers: true, AllowBindMountsForRegularUsers: true,
AllowPrivilegedModeForRegularUsers: true, AllowPrivilegedModeForRegularUsers: true,
EnableHostManagementFeatures: false,
SnapshotInterval: *flags.SnapshotInterval, SnapshotInterval: *flags.SnapshotInterval,
} }
@ -383,6 +471,43 @@ func initEndpoint(flags *portainer.CLIFlags, endpointService portainer.EndpointS
return createUnsecuredEndpoint(*flags.EndpointURL, endpointService, snapshotter) return createUnsecuredEndpoint(*flags.EndpointURL, endpointService, snapshotter)
} }
func initJobService(dockerClientFactory *docker.ClientFactory) portainer.JobService {
return docker.NewJobService(dockerClientFactory)
}
func initExtensionManager(fileService portainer.FileService, extensionService portainer.ExtensionService) (portainer.ExtensionManager, error) {
extensionManager := exec.NewExtensionManager(fileService, extensionService)
extensions, err := extensionService.Extensions()
if err != nil {
return nil, err
}
for _, extension := range extensions {
err := extensionManager.EnableExtension(&extension, extension.License.LicenseKey)
if err != nil {
return nil, err
}
}
return extensionManager, nil
}
func terminateIfNoAdminCreated(userService portainer.UserService) {
timer1 := time.NewTimer(5 * time.Minute)
<-timer1.C
users, err := userService.UsersByRole(portainer.AdministratorRole)
if err != nil {
log.Fatal(err)
}
if len(users) == 0 {
log.Fatal("No administrator account was created after 5 min. Shutting down the Portainer instance for security reasons.")
return
}
}
func main() { func main() {
flags := initCLI() flags := initCLI()
@ -406,16 +531,16 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
clientFactory := initClientFactory(digitalSignatureService) extensionManager, err := initExtensionManager(fileService, store.ExtensionService)
snapshotter := initSnapshotter(clientFactory)
jobScheduler, err := initJobScheduler(store.EndpointService, snapshotter, flags)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
jobScheduler.Start() clientFactory := initClientFactory(digitalSignatureService)
jobService := initJobService(clientFactory)
snapshotter := initSnapshotter(clientFactory)
endpointManagement := true endpointManagement := true
if *flags.ExternalEndpoints != "" { if *flags.ExternalEndpoints != "" {
@ -439,6 +564,27 @@ func main() {
log.Fatal(err) log.Fatal(err)
} }
jobScheduler := initJobScheduler()
err = loadSchedulesFromDatabase(jobScheduler, jobService, store.ScheduleService, store.EndpointService, fileService)
if err != nil {
log.Fatal(err)
}
err = loadEndpointSyncSystemSchedule(jobScheduler, store.ScheduleService, store.EndpointService, flags)
if err != nil {
log.Fatal(err)
}
if *flags.Snapshot {
err = loadSnapshotSystemSchedule(jobScheduler, snapshotter, store.ScheduleService, store.EndpointService, store.SettingsService)
if err != nil {
log.Fatal(err)
}
}
jobScheduler.Start()
err = initDockerHub(store.DockerHubService) err = initDockerHub(store.DockerHubService)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -487,6 +633,10 @@ func main() {
} }
} }
if !*flags.NoAuth {
go terminateIfNoAdminCreated(store.UserService)
}
var server portainer.Server = &http.Server{ var server portainer.Server = &http.Server{
Status: applicationStatus, Status: applicationStatus,
BindAddress: *flags.Addr, BindAddress: *flags.Addr,
@ -498,16 +648,19 @@ func main() {
TeamMembershipService: store.TeamMembershipService, TeamMembershipService: store.TeamMembershipService,
EndpointService: store.EndpointService, EndpointService: store.EndpointService,
EndpointGroupService: store.EndpointGroupService, EndpointGroupService: store.EndpointGroupService,
ExtensionService: store.ExtensionService,
ResourceControlService: store.ResourceControlService, ResourceControlService: store.ResourceControlService,
SettingsService: store.SettingsService, SettingsService: store.SettingsService,
RegistryService: store.RegistryService, RegistryService: store.RegistryService,
DockerHubService: store.DockerHubService, DockerHubService: store.DockerHubService,
StackService: store.StackService, StackService: store.StackService,
ScheduleService: store.ScheduleService,
TagService: store.TagService, TagService: store.TagService,
TemplateService: store.TemplateService, TemplateService: store.TemplateService,
WebhookService: store.WebhookService, WebhookService: store.WebhookService,
SwarmStackManager: swarmStackManager, SwarmStackManager: swarmStackManager,
ComposeStackManager: composeStackManager, ComposeStackManager: composeStackManager,
ExtensionManager: extensionManager,
CryptoService: cryptoService, CryptoService: cryptoService,
JWTService: jwtService, JWTService: jwtService,
FileService: fileService, FileService: fileService,
@ -520,6 +673,7 @@ func main() {
SSLCert: *flags.SSLCert, SSLCert: *flags.SSLCert,
SSLKey: *flags.SSLKey, SSLKey: *flags.SSLKey,
DockerClientFactory: clientFactory, DockerClientFactory: clientFactory,
JobService: jobService,
} }
log.Printf("Starting Portainer %s on %s", portainer.APIVersion, *flags.Addr) log.Printf("Starting Portainer %s on %s", portainer.APIVersion, *flags.Addr)

View file

@ -1,60 +0,0 @@
package cron
import (
"log"
"github.com/portainer/portainer"
)
type (
endpointSnapshotJob struct {
endpointService portainer.EndpointService
snapshotter portainer.Snapshotter
}
)
func newEndpointSnapshotJob(endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) endpointSnapshotJob {
return endpointSnapshotJob{
endpointService: endpointService,
snapshotter: snapshotter,
}
}
func (job endpointSnapshotJob) Snapshot() error {
endpoints, err := job.endpointService.Endpoints()
if err != nil {
return err
}
for _, endpoint := range endpoints {
if endpoint.Type == portainer.AzureEnvironment {
continue
}
snapshot, err := job.snapshotter.CreateSnapshot(&endpoint)
endpoint.Status = portainer.EndpointStatusUp
if err != nil {
log.Printf("cron error: endpoint snapshot error (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err)
endpoint.Status = portainer.EndpointStatusDown
}
if snapshot != nil {
endpoint.Snapshots = []portainer.Snapshot{*snapshot}
}
err = job.endpointService.UpdateEndpoint(endpoint.ID, &endpoint)
if err != nil {
return err
}
}
return nil
}
func (job endpointSnapshotJob) Run() {
err := job.Snapshot()
if err != nil {
log.Printf("cron error: snapshot job error (err=%s)\n", err)
}
}

View file

@ -9,19 +9,41 @@ import (
"github.com/portainer/portainer" "github.com/portainer/portainer"
) )
type ( // EndpointSyncJobRunner is used to run a EndpointSyncJob
endpointSyncJob struct { type EndpointSyncJobRunner struct {
schedule *portainer.Schedule
context *EndpointSyncJobContext
}
// EndpointSyncJobContext represents the context of execution of a EndpointSyncJob
type EndpointSyncJobContext struct {
endpointService portainer.EndpointService endpointService portainer.EndpointService
endpointFilePath string endpointFilePath string
} }
synchronization struct { // NewEndpointSyncJobContext returns a new context that can be used to execute a EndpointSyncJob
func NewEndpointSyncJobContext(endpointService portainer.EndpointService, endpointFilePath string) *EndpointSyncJobContext {
return &EndpointSyncJobContext{
endpointService: endpointService,
endpointFilePath: endpointFilePath,
}
}
// NewEndpointSyncJobRunner returns a new runner that can be scheduled
func NewEndpointSyncJobRunner(schedule *portainer.Schedule, context *EndpointSyncJobContext) *EndpointSyncJobRunner {
return &EndpointSyncJobRunner{
schedule: schedule,
context: context,
}
}
type synchronization struct {
endpointsToCreate []*portainer.Endpoint endpointsToCreate []*portainer.Endpoint
endpointsToUpdate []*portainer.Endpoint endpointsToUpdate []*portainer.Endpoint
endpointsToDelete []*portainer.Endpoint endpointsToDelete []*portainer.Endpoint
} }
fileEndpoint struct { type fileEndpoint struct {
Name string `json:"Name"` Name string `json:"Name"`
URL string `json:"URL"` URL string `json:"URL"`
TLS bool `json:"TLS,omitempty"` TLS bool `json:"TLS,omitempty"`
@ -29,24 +51,51 @@ type (
TLSCACert string `json:"TLSCACert,omitempty"` TLSCACert string `json:"TLSCACert,omitempty"`
TLSCert string `json:"TLSCert,omitempty"` TLSCert string `json:"TLSCert,omitempty"`
TLSKey string `json:"TLSKey,omitempty"` TLSKey string `json:"TLSKey,omitempty"`
}
// GetSchedule returns the schedule associated to the runner
func (runner *EndpointSyncJobRunner) GetSchedule() *portainer.Schedule {
return runner.schedule
}
// Run triggers the execution of the endpoint synchronization process.
func (runner *EndpointSyncJobRunner) Run() {
data, err := ioutil.ReadFile(runner.context.endpointFilePath)
if endpointSyncError(err) {
return
} }
)
const ( var fileEndpoints []fileEndpoint
// ErrEmptyEndpointArray is an error raised when the external endpoint source array is empty. err = json.Unmarshal(data, &fileEndpoints)
ErrEmptyEndpointArray = portainer.Error("External endpoint source is empty") if endpointSyncError(err) {
) return
}
func newEndpointSyncJob(endpointFilePath string, endpointService portainer.EndpointService) endpointSyncJob { if len(fileEndpoints) == 0 {
return endpointSyncJob{ log.Println("background job error (endpoint synchronization). External endpoint source is empty")
endpointService: endpointService, return
endpointFilePath: endpointFilePath, }
storedEndpoints, err := runner.context.endpointService.Endpoints()
if endpointSyncError(err) {
return
}
convertedFileEndpoints := convertFileEndpoints(fileEndpoints)
sync := prepareSyncData(storedEndpoints, convertedFileEndpoints)
if sync.requireSync() {
err = runner.context.endpointService.Synchronize(sync.endpointsToCreate, sync.endpointsToUpdate, sync.endpointsToDelete)
if endpointSyncError(err) {
return
}
log.Printf("Endpoint synchronization ended. [created: %v] [updated: %v] [deleted: %v]", len(sync.endpointsToCreate), len(sync.endpointsToUpdate), len(sync.endpointsToDelete))
} }
} }
func endpointSyncError(err error) bool { func endpointSyncError(err error) bool {
if err != nil { if err != nil {
log.Printf("cron error: synchronization job error (err=%s)\n", err) log.Printf("background job error (endpoint synchronization). Unable to synchronize endpoints (err=%s)\n", err)
return true return true
} }
return false return false
@ -126,8 +175,7 @@ func (sync synchronization) requireSync() bool {
return false return false
} }
// TMP: endpointSyncJob method to access logger, should be generic func prepareSyncData(storedEndpoints, fileEndpoints []portainer.Endpoint) *synchronization {
func (job endpointSyncJob) prepareSyncData(storedEndpoints, fileEndpoints []portainer.Endpoint) *synchronization {
endpointsToCreate := make([]*portainer.Endpoint, 0) endpointsToCreate := make([]*portainer.Endpoint, 0)
endpointsToUpdate := make([]*portainer.Endpoint, 0) endpointsToUpdate := make([]*portainer.Endpoint, 0)
endpointsToDelete := make([]*portainer.Endpoint, 0) endpointsToDelete := make([]*portainer.Endpoint, 0)
@ -164,43 +212,3 @@ func (job endpointSyncJob) prepareSyncData(storedEndpoints, fileEndpoints []port
endpointsToDelete: endpointsToDelete, endpointsToDelete: endpointsToDelete,
} }
} }
func (job endpointSyncJob) Sync() error {
data, err := ioutil.ReadFile(job.endpointFilePath)
if endpointSyncError(err) {
return err
}
var fileEndpoints []fileEndpoint
err = json.Unmarshal(data, &fileEndpoints)
if endpointSyncError(err) {
return err
}
if len(fileEndpoints) == 0 {
return ErrEmptyEndpointArray
}
storedEndpoints, err := job.endpointService.Endpoints()
if endpointSyncError(err) {
return err
}
convertedFileEndpoints := convertFileEndpoints(fileEndpoints)
sync := job.prepareSyncData(storedEndpoints, convertedFileEndpoints)
if sync.requireSync() {
err = job.endpointService.Synchronize(sync.endpointsToCreate, sync.endpointsToUpdate, sync.endpointsToDelete)
if endpointSyncError(err) {
return err
}
log.Printf("Endpoint synchronization ended. [created: %v] [updated: %v] [deleted: %v]", len(sync.endpointsToCreate), len(sync.endpointsToUpdate), len(sync.endpointsToDelete))
}
return nil
}
func (job endpointSyncJob) Run() {
log.Println("cron: synchronization job started")
err := job.Sync()
endpointSyncError(err)
}

View file

@ -0,0 +1,96 @@
package cron
import (
"log"
"time"
"github.com/portainer/portainer"
)
// ScriptExecutionJobRunner is used to run a ScriptExecutionJob
type ScriptExecutionJobRunner struct {
schedule *portainer.Schedule
context *ScriptExecutionJobContext
executedOnce bool
}
// ScriptExecutionJobContext represents the context of execution of a ScriptExecutionJob
type ScriptExecutionJobContext struct {
jobService portainer.JobService
endpointService portainer.EndpointService
fileService portainer.FileService
}
// NewScriptExecutionJobContext returns a new context that can be used to execute a ScriptExecutionJob
func NewScriptExecutionJobContext(jobService portainer.JobService, endpointService portainer.EndpointService, fileService portainer.FileService) *ScriptExecutionJobContext {
return &ScriptExecutionJobContext{
jobService: jobService,
endpointService: endpointService,
fileService: fileService,
}
}
// NewScriptExecutionJobRunner returns a new runner that can be scheduled
func NewScriptExecutionJobRunner(schedule *portainer.Schedule, context *ScriptExecutionJobContext) *ScriptExecutionJobRunner {
return &ScriptExecutionJobRunner{
schedule: schedule,
context: context,
executedOnce: false,
}
}
// Run triggers the execution of the job.
// It will iterate through all the endpoints specified in the context to
// execute the script associated to the job.
func (runner *ScriptExecutionJobRunner) Run() {
if !runner.schedule.Recurring && runner.executedOnce {
return
}
runner.executedOnce = true
scriptFile, err := runner.context.fileService.GetFileContent(runner.schedule.ScriptExecutionJob.ScriptPath)
if err != nil {
log.Printf("scheduled job error (script execution). Unable to retrieve script file (err=%s)\n", err)
return
}
targets := make([]*portainer.Endpoint, 0)
for _, endpointID := range runner.schedule.ScriptExecutionJob.Endpoints {
endpoint, err := runner.context.endpointService.Endpoint(endpointID)
if err != nil {
log.Printf("scheduled job error (script execution). Unable to retrieve information about endpoint (id=%d) (err=%s)\n", endpointID, err)
return
}
targets = append(targets, endpoint)
}
runner.executeAndRetry(targets, scriptFile, 0)
}
func (runner *ScriptExecutionJobRunner) executeAndRetry(endpoints []*portainer.Endpoint, script []byte, retryCount int) {
retryTargets := make([]*portainer.Endpoint, 0)
for _, endpoint := range endpoints {
err := runner.context.jobService.ExecuteScript(endpoint, "", runner.schedule.ScriptExecutionJob.Image, script, runner.schedule)
if err == portainer.ErrUnableToPingEndpoint {
retryTargets = append(retryTargets, endpoint)
} else if err != nil {
log.Printf("scheduled job error (script execution). Unable to execute script (endpoint=%s) (err=%s)\n", endpoint.Name, err)
}
}
retryCount++
if retryCount >= runner.schedule.ScriptExecutionJob.RetryCount {
return
}
time.Sleep(time.Duration(runner.schedule.ScriptExecutionJob.RetryInterval) * time.Second)
runner.executeAndRetry(retryTargets, script, retryCount)
}
// GetSchedule returns the schedule associated to the runner
func (runner *ScriptExecutionJobRunner) GetSchedule() *portainer.Schedule {
return runner.schedule
}

85
api/cron/job_snapshot.go Normal file
View file

@ -0,0 +1,85 @@
package cron
import (
"log"
"github.com/portainer/portainer"
)
// SnapshotJobRunner is used to run a SnapshotJob
type SnapshotJobRunner struct {
schedule *portainer.Schedule
context *SnapshotJobContext
}
// SnapshotJobContext represents the context of execution of a SnapshotJob
type SnapshotJobContext struct {
endpointService portainer.EndpointService
snapshotter portainer.Snapshotter
}
// NewSnapshotJobContext returns a new context that can be used to execute a SnapshotJob
func NewSnapshotJobContext(endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) *SnapshotJobContext {
return &SnapshotJobContext{
endpointService: endpointService,
snapshotter: snapshotter,
}
}
// NewSnapshotJobRunner returns a new runner that can be scheduled
func NewSnapshotJobRunner(schedule *portainer.Schedule, context *SnapshotJobContext) *SnapshotJobRunner {
return &SnapshotJobRunner{
schedule: schedule,
context: context,
}
}
// GetSchedule returns the schedule associated to the runner
func (runner *SnapshotJobRunner) GetSchedule() *portainer.Schedule {
return runner.schedule
}
// Run triggers the execution of the schedule.
// It will iterate through all the endpoints available in the database to
// create a snapshot of each one of them.
// As a snapshot can be a long process, to avoid any concurrency issue we
// retrieve the latest version of the endpoint right after a snapshot.
func (runner *SnapshotJobRunner) Run() {
go func() {
endpoints, err := runner.context.endpointService.Endpoints()
if err != nil {
log.Printf("background schedule error (endpoint snapshot). Unable to retrieve endpoint list (err=%s)\n", err)
return
}
for _, endpoint := range endpoints {
if endpoint.Type == portainer.AzureEnvironment {
continue
}
snapshot, snapshotError := runner.context.snapshotter.CreateSnapshot(&endpoint)
latestEndpointReference, err := runner.context.endpointService.Endpoint(endpoint.ID)
if latestEndpointReference == nil {
log.Printf("background schedule error (endpoint snapshot). Endpoint not found inside the database anymore (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err)
continue
}
latestEndpointReference.Status = portainer.EndpointStatusUp
if snapshotError != nil {
log.Printf("background schedule error (endpoint snapshot). Unable to create snapshot (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, snapshotError)
latestEndpointReference.Status = portainer.EndpointStatusDown
}
if snapshot != nil {
latestEndpointReference.Snapshots = []portainer.Snapshot{*snapshot}
}
err = runner.context.endpointService.UpdateEndpoint(latestEndpointReference.ID, latestEndpointReference)
if err != nil {
log.Printf("background schedule error (endpoint snapshot). Unable to update endpoint (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err)
return
}
}
}()
}

View file

@ -1,80 +1,109 @@
package cron package cron
import ( import (
"log"
"github.com/portainer/portainer" "github.com/portainer/portainer"
"github.com/robfig/cron" "github.com/robfig/cron"
) )
// JobScheduler represents a service for managing crons. // JobScheduler represents a service for managing crons
type JobScheduler struct { type JobScheduler struct {
cron *cron.Cron cron *cron.Cron
endpointService portainer.EndpointService
snapshotter portainer.Snapshotter
endpointFilePath string
endpointSyncInterval string
} }
// NewJobScheduler initializes a new service. // NewJobScheduler initializes a new service
func NewJobScheduler(endpointService portainer.EndpointService, snapshotter portainer.Snapshotter) *JobScheduler { func NewJobScheduler() *JobScheduler {
return &JobScheduler{ return &JobScheduler{
cron: cron.New(), cron: cron.New(),
endpointService: endpointService,
snapshotter: snapshotter,
} }
} }
// ScheduleEndpointSyncJob schedules a cron job to synchronize the endpoints from a file // ScheduleJob schedules the execution of a job via a runner
func (scheduler *JobScheduler) ScheduleEndpointSyncJob(endpointFilePath string, interval string) error { func (scheduler *JobScheduler) ScheduleJob(runner portainer.JobRunner) error {
return scheduler.cron.AddJob(runner.GetSchedule().CronExpression, runner)
}
scheduler.endpointFilePath = endpointFilePath // UpdateSystemJobSchedule updates the first occurence of the specified
scheduler.endpointSyncInterval = interval // scheduled job based on the specified job type.
// It does so by re-creating a new cron
// and adding all the existing jobs. It will then re-schedule the new job
// with the update cron expression passed in parameter.
// NOTE: the cron library do not support updating schedules directly
// hence the work-around
func (scheduler *JobScheduler) UpdateSystemJobSchedule(jobType portainer.JobType, newCronExpression string) error {
cronEntries := scheduler.cron.Entries()
newCron := cron.New()
job := newEndpointSyncJob(endpointFilePath, scheduler.endpointService) for _, entry := range cronEntries {
if entry.Job.(portainer.JobRunner).GetSchedule().JobType == jobType {
err := job.Sync() err := newCron.AddJob(newCronExpression, entry.Job)
if err != nil { if err != nil {
return err return err
} }
continue
return scheduler.cron.AddJob("@every "+interval, job)
}
// ScheduleSnapshotJob schedules a cron job to create endpoint snapshots
func (scheduler *JobScheduler) ScheduleSnapshotJob(interval string) error {
job := newEndpointSnapshotJob(scheduler.endpointService, scheduler.snapshotter)
err := job.Snapshot()
if err != nil {
return err
} }
return scheduler.cron.AddJob("@every "+interval, job) newCron.Schedule(entry.Schedule, entry.Job)
} }
// UpdateSnapshotJob will update the schedules to match the new snapshot interval
func (scheduler *JobScheduler) UpdateSnapshotJob(interval string) {
// TODO: the cron library do not support removing/updating schedules.
// As a work-around we need to re-create the cron and reschedule the jobs.
// We should update the library.
jobs := scheduler.cron.Entries()
scheduler.cron.Stop() scheduler.cron.Stop()
scheduler.cron = newCron
scheduler.cron.Start()
return nil
}
scheduler.cron = cron.New() // UpdateJobSchedule updates a specific scheduled job by re-creating a new cron
// and adding all the existing jobs. It will then re-schedule the new job
// via the specified JobRunner parameter.
// NOTE: the cron library do not support updating schedules directly
// hence the work-around
func (scheduler *JobScheduler) UpdateJobSchedule(runner portainer.JobRunner) error {
cronEntries := scheduler.cron.Entries()
newCron := cron.New()
for _, job := range jobs { for _, entry := range cronEntries {
switch job.Job.(type) {
case endpointSnapshotJob: if entry.Job.(portainer.JobRunner).GetSchedule().ID == runner.GetSchedule().ID {
scheduler.ScheduleSnapshotJob(interval)
case endpointSyncJob: var jobRunner cron.Job = runner
scheduler.ScheduleEndpointSyncJob(scheduler.endpointFilePath, scheduler.endpointSyncInterval) if entry.Job.(portainer.JobRunner).GetSchedule().JobType == portainer.SnapshotJobType {
default: jobRunner = entry.Job
log.Println("Unsupported job")
}
} }
err := newCron.AddJob(runner.GetSchedule().CronExpression, jobRunner)
if err != nil {
return err
}
continue
}
newCron.Schedule(entry.Schedule, entry.Job)
}
scheduler.cron.Stop()
scheduler.cron = newCron
scheduler.cron.Start()
return nil
}
// UnscheduleJob remove a scheduled job by re-creating a new cron
// and adding all the existing jobs except for the one specified via scheduleID.
// NOTE: the cron library do not support removing schedules directly
// hence the work-around
func (scheduler *JobScheduler) UnscheduleJob(scheduleID portainer.ScheduleID) {
cronEntries := scheduler.cron.Entries()
newCron := cron.New()
for _, entry := range cronEntries {
if entry.Job.(portainer.JobRunner).GetSchedule().ID == scheduleID {
continue
}
newCron.Schedule(entry.Schedule, entry.Job)
}
scheduler.cron.Stop()
scheduler.cron = newCron
scheduler.cron.Start() scheduler.cron.Start()
} }

View file

@ -26,6 +26,15 @@ type ECDSAService struct {
privateKey *ecdsa.PrivateKey privateKey *ecdsa.PrivateKey
publicKey *ecdsa.PublicKey publicKey *ecdsa.PublicKey
encodedPubKey string encodedPubKey string
secret string
}
// NewECDSAService returns a pointer to a ECDSAService.
// An optional secret can be specified
func NewECDSAService(secret string) *ECDSAService {
return &ECDSAService{
secret: secret,
}
} }
// EncodedPublicKey returns the encoded version of the public that can be used // EncodedPublicKey returns the encoded version of the public that can be used
@ -91,11 +100,17 @@ func (service *ECDSAService) GenerateKeyPair() ([]byte, []byte, error) {
return private, public, nil return private, public, nil
} }
// Sign creates a signature from a message. // CreateSignature creates a digital signature.
// It automatically hash the message using MD5 and creates a signature from // It automatically hash a specific message using MD5 and creates a signature from
// that hash. // that hash.
// If a secret is associated to the service, it will be used instead of the specified
// message.
// It then encodes the generated signature in base64. // It then encodes the generated signature in base64.
func (service *ECDSAService) Sign(message string) (string, error) { func (service *ECDSAService) CreateSignature(message string) (string, error) {
if service.secret != "" {
message = service.secret
}
hash := HashFromBytes([]byte(message)) hash := HashFromBytes([]byte(message))
r := big.NewInt(0) r := big.NewInt(0)

View file

@ -27,12 +27,13 @@ func NewClientFactory(signatureService portainer.DigitalSignatureService) *Clien
} }
// CreateClient is a generic function to create a Docker client based on // CreateClient is a generic function to create a Docker client based on
// a specific endpoint configuration // a specific endpoint configuration. The nodeName parameter can be used
func (factory *ClientFactory) CreateClient(endpoint *portainer.Endpoint) (*client.Client, error) { // with an agent enabled endpoint to target a specific node in an agent cluster.
func (factory *ClientFactory) CreateClient(endpoint *portainer.Endpoint, nodeName string) (*client.Client, error) {
if endpoint.Type == portainer.AzureEnvironment { if endpoint.Type == portainer.AzureEnvironment {
return nil, unsupportedEnvironmentType return nil, unsupportedEnvironmentType
} else if endpoint.Type == portainer.AgentOnDockerEnvironment { } else if endpoint.Type == portainer.AgentOnDockerEnvironment {
return createAgentClient(endpoint, factory.signatureService) return createAgentClient(endpoint, factory.signatureService, nodeName)
} }
if strings.HasPrefix(endpoint.URL, "unix://") || strings.HasPrefix(endpoint.URL, "npipe://") { if strings.HasPrefix(endpoint.URL, "unix://") || strings.HasPrefix(endpoint.URL, "npipe://") {
@ -61,13 +62,13 @@ func createTCPClient(endpoint *portainer.Endpoint) (*client.Client, error) {
) )
} }
func createAgentClient(endpoint *portainer.Endpoint, signatureService portainer.DigitalSignatureService) (*client.Client, error) { func createAgentClient(endpoint *portainer.Endpoint, signatureService portainer.DigitalSignatureService, nodeName string) (*client.Client, error) {
httpCli, err := httpClient(endpoint) httpCli, err := httpClient(endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
signature, err := signatureService.Sign(portainer.PortainerAgentSignatureMessage) signature, err := signatureService.CreateSignature(portainer.PortainerAgentSignatureMessage)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -77,6 +78,10 @@ func createAgentClient(endpoint *portainer.Endpoint, signatureService portainer.
portainer.PortainerAgentSignatureHeader: signature, portainer.PortainerAgentSignatureHeader: signature,
} }
if nodeName != "" {
headers[portainer.PortainerAgentTargetHeader] = nodeName
}
return client.NewClientWithOpts( return client.NewClientWithOpts(
client.WithHost(endpoint.URL), client.WithHost(endpoint.URL),
client.WithVersion(portainer.SupportedDockerAPIVersion), client.WithVersion(portainer.SupportedDockerAPIVersion),
@ -97,7 +102,7 @@ func httpClient(endpoint *portainer.Endpoint) (*http.Client, error) {
} }
return &http.Client{ return &http.Client{
Timeout: time.Second * 10,
Transport: transport, Transport: transport,
Timeout: 30 * time.Second,
}, nil }, nil
} }

115
api/docker/job.go Normal file
View file

@ -0,0 +1,115 @@
package docker
import (
"bytes"
"context"
"io"
"io/ioutil"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
"github.com/portainer/portainer"
"github.com/portainer/portainer/archive"
)
// JobService represents a service that handles the execution of jobs
type JobService struct {
dockerClientFactory *ClientFactory
}
// NewJobService returns a pointer to a new job service
func NewJobService(dockerClientFactory *ClientFactory) *JobService {
return &JobService{
dockerClientFactory: dockerClientFactory,
}
}
// ExecuteScript will leverage a privileged container to execute a script against the specified endpoint/nodename.
// It will copy the script content specified as a parameter inside a container based on the specified image and execute it.
func (service *JobService) ExecuteScript(endpoint *portainer.Endpoint, nodeName, image string, script []byte, schedule *portainer.Schedule) error {
buffer, err := archive.TarFileInBuffer(script, "script.sh", 0700)
if err != nil {
return err
}
cli, err := service.dockerClientFactory.CreateClient(endpoint, nodeName)
if err != nil {
return err
}
defer cli.Close()
_, err = cli.Ping(context.Background())
if err != nil {
return portainer.ErrUnableToPingEndpoint
}
err = pullImage(cli, image)
if err != nil {
return err
}
containerConfig := &container.Config{
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
Tty: true,
WorkingDir: "/tmp",
Image: image,
Labels: map[string]string{
"io.portainer.job.endpoint": strconv.Itoa(int(endpoint.ID)),
},
Cmd: strslice.StrSlice([]string{"sh", "/tmp/script.sh"}),
}
if schedule != nil {
containerConfig.Labels["io.portainer.schedule.id"] = strconv.Itoa(int(schedule.ID))
}
hostConfig := &container.HostConfig{
Binds: []string{"/:/host", "/etc:/etc:ro", "/usr:/usr:ro", "/run:/run:ro", "/sbin:/sbin:ro", "/var:/var:ro"},
NetworkMode: "host",
Privileged: true,
}
networkConfig := &network.NetworkingConfig{}
body, err := cli.ContainerCreate(context.Background(), containerConfig, hostConfig, networkConfig, "")
if err != nil {
return err
}
if schedule != nil {
err = cli.ContainerRename(context.Background(), body.ID, schedule.Name+"_"+body.ID)
if err != nil {
return err
}
}
copyOptions := types.CopyToContainerOptions{}
err = cli.CopyToContainer(context.Background(), body.ID, "/tmp", bytes.NewReader(buffer), copyOptions)
if err != nil {
return err
}
startOptions := types.ContainerStartOptions{}
return cli.ContainerStart(context.Background(), body.ID, startOptions)
}
func pullImage(cli *client.Client, image string) error {
imageReadCloser, err := cli.ImagePull(context.Background(), image, types.ImagePullOptions{})
if err != nil {
return err
}
defer imageReadCloser.Close()
_, err = io.Copy(ioutil.Discard, imageReadCloser)
if err != nil {
return err
}
return nil
}

View file

@ -52,6 +52,16 @@ func snapshot(cli *client.Client) (*portainer.Snapshot, error) {
return nil, err return nil, err
} }
err = snapshotNetworks(snapshot, cli)
if err != nil {
return nil, err
}
err = snapshotVersion(snapshot, cli)
if err != nil {
return nil, err
}
snapshot.Time = time.Now().Unix() snapshot.Time = time.Now().Unix()
return snapshot, nil return snapshot, nil
} }
@ -66,6 +76,7 @@ func snapshotInfo(snapshot *portainer.Snapshot, cli *client.Client) error {
snapshot.DockerVersion = info.ServerVersion snapshot.DockerVersion = info.ServerVersion
snapshot.TotalCPU = info.NCPU snapshot.TotalCPU = info.NCPU
snapshot.TotalMemory = info.MemTotal snapshot.TotalMemory = info.MemTotal
snapshot.SnapshotRaw.Info = info
return nil return nil
} }
@ -132,6 +143,7 @@ func snapshotContainers(snapshot *portainer.Snapshot, cli *client.Client) error
snapshot.RunningContainerCount = runningContainers snapshot.RunningContainerCount = runningContainers
snapshot.StoppedContainerCount = stoppedContainers snapshot.StoppedContainerCount = stoppedContainers
snapshot.StackCount += len(stacks) snapshot.StackCount += len(stacks)
snapshot.SnapshotRaw.Containers = containers
return nil return nil
} }
@ -142,6 +154,7 @@ func snapshotImages(snapshot *portainer.Snapshot, cli *client.Client) error {
} }
snapshot.ImageCount = len(images) snapshot.ImageCount = len(images)
snapshot.SnapshotRaw.Images = images
return nil return nil
} }
@ -152,5 +165,24 @@ func snapshotVolumes(snapshot *portainer.Snapshot, cli *client.Client) error {
} }
snapshot.VolumeCount = len(volumes.Volumes) snapshot.VolumeCount = len(volumes.Volumes)
snapshot.SnapshotRaw.Volumes = volumes
return nil
}
func snapshotNetworks(snapshot *portainer.Snapshot, cli *client.Client) error {
networks, err := cli.NetworkList(context.Background(), types.NetworkListOptions{})
if err != nil {
return err
}
snapshot.SnapshotRaw.Networks = networks
return nil
}
func snapshotVersion(snapshot *portainer.Snapshot, cli *client.Client) error {
version, err := cli.ServerVersion(context.Background())
if err != nil {
return err
}
snapshot.SnapshotRaw.Version = version
return nil return nil
} }

View file

@ -18,7 +18,7 @@ func NewSnapshotter(clientFactory *ClientFactory) *Snapshotter {
// CreateSnapshot creates a snapshot of a specific endpoint // CreateSnapshot creates a snapshot of a specific endpoint
func (snapshotter *Snapshotter) CreateSnapshot(endpoint *portainer.Endpoint) (*portainer.Snapshot, error) { func (snapshotter *Snapshotter) CreateSnapshot(endpoint *portainer.Endpoint) (*portainer.Snapshot, error) {
cli, err := snapshotter.clientFactory.CreateClient(endpoint) cli, err := snapshotter.clientFactory.CreateClient(endpoint, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -88,6 +88,21 @@ const (
ErrUndefinedTLSFileType = Error("Undefined TLS file type") ErrUndefinedTLSFileType = Error("Undefined TLS file type")
) )
// Extension errors.
const (
ErrExtensionAlreadyEnabled = Error("This extension is already enabled")
)
// Docker errors.
const (
ErrUnableToPingEndpoint = Error("Unable to communicate with the endpoint")
)
// Schedule errors.
const (
ErrHostManagementFeaturesDisabled = Error("Host management features are disabled")
)
// Error represents an application error. // Error represents an application error.
type Error string type Error string

209
api/exec/extension.go Normal file
View file

@ -0,0 +1,209 @@
package exec
import (
"bytes"
"encoding/json"
"errors"
"os/exec"
"path"
"runtime"
"strconv"
"strings"
"github.com/orcaman/concurrent-map"
"github.com/portainer/portainer"
"github.com/portainer/portainer/http/client"
)
var extensionDownloadBaseURL = "https://portainer-io-assets.sfo2.digitaloceanspaces.com/extensions/"
var extensionBinaryMap = map[portainer.ExtensionID]string{
portainer.RegistryManagementExtension: "extension-registry-management",
}
// ExtensionManager represents a service used to
// manage extension processes.
type ExtensionManager struct {
processes cmap.ConcurrentMap
fileService portainer.FileService
extensionService portainer.ExtensionService
}
// NewExtensionManager returns a pointer to an ExtensionManager
func NewExtensionManager(fileService portainer.FileService, extensionService portainer.ExtensionService) *ExtensionManager {
return &ExtensionManager{
processes: cmap.New(),
fileService: fileService,
extensionService: extensionService,
}
}
func processKey(ID portainer.ExtensionID) string {
return strconv.Itoa(int(ID))
}
func buildExtensionURL(extension *portainer.Extension) string {
extensionURL := extensionDownloadBaseURL
extensionURL += extensionBinaryMap[extension.ID]
extensionURL += "-" + runtime.GOOS + "-" + runtime.GOARCH
extensionURL += "-" + extension.Version
extensionURL += ".zip"
return extensionURL
}
func buildExtensionPath(binaryPath string, extension *portainer.Extension) string {
extensionFilename := extensionBinaryMap[extension.ID]
extensionFilename += "-" + runtime.GOOS + "-" + runtime.GOARCH
extensionFilename += "-" + extension.Version
if runtime.GOOS == "windows" {
extensionFilename += ".exe"
}
extensionPath := path.Join(
binaryPath,
extensionFilename)
return extensionPath
}
// FetchExtensionDefinitions will fetch the list of available
// extension definitions from the official Portainer assets server
func (manager *ExtensionManager) FetchExtensionDefinitions() ([]portainer.Extension, error) {
extensionData, err := client.Get(portainer.ExtensionDefinitionsURL, 30)
if err != nil {
return nil, err
}
var extensions []portainer.Extension
err = json.Unmarshal(extensionData, &extensions)
if err != nil {
return nil, err
}
return extensions, nil
}
// EnableExtension will check for the existence of the extension binary on the filesystem
// first. If it does not exist, it will download it from the official Portainer assets server.
// After installing the binary on the filesystem, it will execute the binary in license check
// mode to validate the extension license. If the license is valid, it will then start
// the extension process and register it in the processes map.
func (manager *ExtensionManager) EnableExtension(extension *portainer.Extension, licenseKey string) error {
extensionBinaryPath := buildExtensionPath(manager.fileService.GetBinaryFolder(), extension)
extensionBinaryExists, err := manager.fileService.FileExists(extensionBinaryPath)
if err != nil {
return err
}
if !extensionBinaryExists {
err := manager.downloadExtension(extension)
if err != nil {
return err
}
}
licenseDetails, err := validateLicense(extensionBinaryPath, licenseKey)
if err != nil {
return err
}
extension.License = portainer.LicenseInformation{
LicenseKey: licenseKey,
Company: licenseDetails[0],
Expiration: licenseDetails[1],
}
extension.Version = licenseDetails[2]
return manager.startExtensionProcess(extension, extensionBinaryPath)
}
// DisableExtension will retrieve the process associated to the extension
// from the processes map and kill the process. It will then remove the process
// from the processes map and remove the binary associated to the extension
// from the filesystem
func (manager *ExtensionManager) DisableExtension(extension *portainer.Extension) error {
process, ok := manager.processes.Get(processKey(extension.ID))
if !ok {
return nil
}
err := process.(*exec.Cmd).Process.Kill()
if err != nil {
return err
}
manager.processes.Remove(processKey(extension.ID))
extensionBinaryPath := buildExtensionPath(manager.fileService.GetBinaryFolder(), extension)
return manager.fileService.RemoveDirectory(extensionBinaryPath)
}
// UpdateExtension will download the new extension binary from the official Portainer assets
// server, disable the previous extension via DisableExtension, trigger a license check
// and then start the extension process and add it to the processes map
func (manager *ExtensionManager) UpdateExtension(extension *portainer.Extension, version string) error {
oldVersion := extension.Version
extension.Version = version
err := manager.downloadExtension(extension)
if err != nil {
return err
}
extension.Version = oldVersion
err = manager.DisableExtension(extension)
if err != nil {
return err
}
extension.Version = version
extensionBinaryPath := buildExtensionPath(manager.fileService.GetBinaryFolder(), extension)
licenseDetails, err := validateLicense(extensionBinaryPath, extension.License.LicenseKey)
if err != nil {
return err
}
extension.Version = licenseDetails[2]
return manager.startExtensionProcess(extension, extensionBinaryPath)
}
func (manager *ExtensionManager) downloadExtension(extension *portainer.Extension) error {
extensionURL := buildExtensionURL(extension)
data, err := client.Get(extensionURL, 30)
if err != nil {
return err
}
return manager.fileService.ExtractExtensionArchive(data)
}
func validateLicense(binaryPath, licenseKey string) ([]string, error) {
licenseCheckProcess := exec.Command(binaryPath, "-license", licenseKey, "-check")
cmdOutput := &bytes.Buffer{}
licenseCheckProcess.Stdout = cmdOutput
err := licenseCheckProcess.Run()
if err != nil {
return nil, errors.New("Invalid extension license key")
}
output := string(cmdOutput.Bytes())
return strings.Split(output, "|"), nil
}
func (manager *ExtensionManager) startExtensionProcess(extension *portainer.Extension, binaryPath string) error {
extensionProcess := exec.Command(binaryPath, "-license", extension.License.LicenseKey)
err := extensionProcess.Start()
if err != nil {
return err
}
manager.processes.Set(processKey(extension.ID), extensionProcess)
return nil
}

View file

@ -140,7 +140,7 @@ func (manager *SwarmStackManager) updateDockerCLIConfiguration(dataPath string)
return err return err
} }
signature, err := manager.signatureService.Sign(portainer.PortainerAgentSignatureMessage) signature, err := manager.signatureService.CreateSignature(portainer.PortainerAgentSignatureMessage)
if err != nil { if err != nil {
return err return err
} }

View file

@ -7,6 +7,7 @@ import (
"io/ioutil" "io/ioutil"
"github.com/portainer/portainer" "github.com/portainer/portainer"
"github.com/portainer/portainer/archive"
"io" "io"
"os" "os"
@ -32,6 +33,13 @@ const (
PrivateKeyFile = "portainer.key" PrivateKeyFile = "portainer.key"
// PublicKeyFile represents the name on disk of the file containing the public key. // PublicKeyFile represents the name on disk of the file containing the public key.
PublicKeyFile = "portainer.pub" PublicKeyFile = "portainer.pub"
// BinaryStorePath represents the subfolder where binaries are stored in the file store folder.
BinaryStorePath = "bin"
// ScheduleStorePath represents the subfolder where schedule files are stored.
ScheduleStorePath = "schedules"
// ExtensionRegistryManagementStorePath represents the subfolder where files related to the
// registry management extension are stored.
ExtensionRegistryManagementStorePath = "extensions"
) )
// Service represents a service for managing files and directories. // Service represents a service for managing files and directories.
@ -63,9 +71,30 @@ func NewService(dataStorePath, fileStorePath string) (*Service, error) {
return nil, err return nil, err
} }
err = service.createDirectoryInStore(BinaryStorePath)
if err != nil {
return nil, err
}
return service, nil return service, nil
} }
// GetBinaryFolder returns the full path to the binary store on the filesystem
func (service *Service) GetBinaryFolder() string {
return path.Join(service.fileStorePath, BinaryStorePath)
}
// ExtractExtensionArchive extracts the content of an extension archive
// specified as raw data into the binary store on the filesystem
func (service *Service) ExtractExtensionArchive(data []byte) error {
err := archive.UnzipArchive(data, path.Join(service.fileStorePath, BinaryStorePath))
if err != nil {
return err
}
return nil
}
// RemoveDirectory removes a directory on the filesystem. // RemoveDirectory removes a directory on the filesystem.
func (service *Service) RemoveDirectory(directoryPath string) error { func (service *Service) RemoveDirectory(directoryPath string) error {
return os.RemoveAll(directoryPath) return os.RemoveAll(directoryPath)
@ -97,6 +126,27 @@ func (service *Service) StoreStackFileFromBytes(stackIdentifier, fileName string
return path.Join(service.fileStorePath, stackStorePath), nil return path.Join(service.fileStorePath, stackStorePath), nil
} }
// StoreRegistryManagementFileFromBytes creates a subfolder in the
// ExtensionRegistryManagementStorePath and stores a new file from bytes.
// It returns the path to the folder where the file is stored.
func (service *Service) StoreRegistryManagementFileFromBytes(folder, fileName string, data []byte) (string, error) {
extensionStorePath := path.Join(ExtensionRegistryManagementStorePath, folder)
err := service.createDirectoryInStore(extensionStorePath)
if err != nil {
return "", err
}
file := path.Join(extensionStorePath, fileName)
r := bytes.NewReader(data)
err = service.createFileInStore(file, r)
if err != nil {
return "", err
}
return path.Join(service.fileStorePath, file), nil
}
// StoreTLSFileFromBytes creates a folder in the TLSStorePath and stores a new file from bytes. // StoreTLSFileFromBytes creates a folder in the TLSStorePath and stores a new file from bytes.
// It returns the path to the newly created file. // It returns the path to the newly created file.
func (service *Service) StoreTLSFileFromBytes(folder string, fileType portainer.TLSFileType, data []byte) (string, error) { func (service *Service) StoreTLSFileFromBytes(folder string, fileType portainer.TLSFileType, data []byte) (string, error) {
@ -318,3 +368,32 @@ func (service *Service) getContentFromPEMFile(filePath string) ([]byte, error) {
block, _ := pem.Decode(fileContent) block, _ := pem.Decode(fileContent)
return block.Bytes, nil return block.Bytes, nil
} }
// GetScheduleFolder returns the absolute path on the filesystem for a schedule based
// on its identifier.
func (service *Service) GetScheduleFolder(identifier string) string {
return path.Join(service.fileStorePath, ScheduleStorePath, identifier)
}
// StoreScheduledJobFileFromBytes creates a subfolder in the ScheduleStorePath and stores a new file from bytes.
// It returns the path to the folder where the file is stored.
func (service *Service) StoreScheduledJobFileFromBytes(identifier string, data []byte) (string, error) {
scheduleStorePath := path.Join(ScheduleStorePath, identifier)
err := service.createDirectoryInStore(scheduleStorePath)
if err != nil {
return "", err
}
filePath := path.Join(scheduleStorePath, createScheduledJobFileName(identifier))
r := bytes.NewReader(data)
err = service.createFileInStore(filePath, r)
if err != nil {
return "", err
}
return path.Join(service.fileStorePath, filePath), nil
}
func createScheduledJobFileName(identifier string) string {
return "job_" + identifier + ".sh"
}

View file

@ -15,6 +15,7 @@ import (
const ( const (
errInvalidResponseStatus = portainer.Error("Invalid response status (expecting 200)") errInvalidResponseStatus = portainer.Error("Invalid response status (expecting 200)")
defaultHTTPTimeout = 5
) )
// HTTPClient represents a client to send HTTP requests. // HTTPClient represents a client to send HTTP requests.
@ -26,7 +27,7 @@ type HTTPClient struct {
func NewHTTPClient() *HTTPClient { func NewHTTPClient() *HTTPClient {
return &HTTPClient{ return &HTTPClient{
&http.Client{ &http.Client{
Timeout: time.Second * 5, Timeout: time.Second * time.Duration(defaultHTTPTimeout),
}, },
} }
} }
@ -67,10 +68,16 @@ func (client *HTTPClient) ExecuteAzureAuthenticationRequest(credentials *portain
} }
// Get executes a simple HTTP GET to the specified URL and returns // Get executes a simple HTTP GET to the specified URL and returns
// the content of the response body. // the content of the response body. Timeout can be specified via the timeout parameter,
func Get(url string) ([]byte, error) { // will default to defaultHTTPTimeout if set to 0.
func Get(url string, timeout int) ([]byte, error) {
if timeout == 0 {
timeout = defaultHTTPTimeout
}
client := &http.Client{ client := &http.Client{
Timeout: time.Second * 3, Timeout: time.Second * time.Duration(timeout),
} }
response, err := client.Get(url) response, err := client.Get(url)

View file

@ -1,6 +1,7 @@
package endpointproxy package endpointproxy
import ( import (
"errors"
"strconv" "strconv"
httperror "github.com/portainer/libhttp/error" httperror "github.com/portainer/libhttp/error"
@ -23,6 +24,10 @@ func (handler *Handler) proxyRequestsToDockerAPI(w http.ResponseWriter, r *http.
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err}
} }
if endpoint.Status == portainer.EndpointStatusDown {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to query endpoint", errors.New("Endpoint is down")}
}
err = handler.requestBouncer.EndpointAccess(r, endpoint) err = handler.requestBouncer.EndpointAccess(r, endpoint)
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusForbidden, "Permission denied to access endpoint", portainer.ErrEndpointAccessDenied} return &httperror.HandlerError{http.StatusForbidden, "Permission denied to access endpoint", portainer.ErrEndpointAccessDenied}

View file

@ -1,5 +1,7 @@
package endpointproxy package endpointproxy
// TODO: legacy extension management
import ( import (
"strconv" "strconv"
@ -42,9 +44,9 @@ func (handler *Handler) proxyRequestsToStoridgeAPI(w http.ResponseWriter, r *htt
proxyExtensionKey := string(endpoint.ID) + "_" + string(portainer.StoridgeEndpointExtension) proxyExtensionKey := string(endpoint.ID) + "_" + string(portainer.StoridgeEndpointExtension)
var proxy http.Handler var proxy http.Handler
proxy = handler.ProxyManager.GetExtensionProxy(proxyExtensionKey) proxy = handler.ProxyManager.GetLegacyExtensionProxy(proxyExtensionKey)
if proxy == nil { if proxy == nil {
proxy, err = handler.ProxyManager.CreateAndRegisterExtensionProxy(proxyExtensionKey, storidgeExtension.URL) proxy, err = handler.ProxyManager.CreateLegacyExtensionProxy(proxyExtensionKey, storidgeExtension.URL)
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to create extension proxy", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to create extension proxy", err}
} }

View file

@ -42,7 +42,6 @@ func (handler *Handler) endpointDelete(w http.ResponseWriter, r *http.Request) *
} }
handler.ProxyManager.DeleteProxy(string(endpointID)) handler.ProxyManager.DeleteProxy(string(endpointID))
handler.ProxyManager.DeleteExtensionProxies(string(endpointID))
return response.Empty(w) return response.Empty(w)
} }

View file

@ -1,5 +1,7 @@
package endpoints package endpoints
// TODO: legacy extension management
import ( import (
"net/http" "net/http"

View file

@ -1,5 +1,7 @@
package endpoints package endpoints
// TODO: legacy extension management
import ( import (
"net/http" "net/http"

View file

@ -0,0 +1,116 @@
package endpoints
import (
"errors"
"net/http"
"github.com/asaskevich/govalidator"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
type endpointJobFromFilePayload struct {
Image string
File []byte
}
type endpointJobFromFileContentPayload struct {
Image string
FileContent string
}
func (payload *endpointJobFromFilePayload) Validate(r *http.Request) error {
file, _, err := request.RetrieveMultiPartFormFile(r, "File")
if err != nil {
return portainer.Error("Invalid Script file. Ensure that the file is uploaded correctly")
}
payload.File = file
image, err := request.RetrieveMultiPartFormValue(r, "Image", false)
if err != nil {
return portainer.Error("Invalid image name")
}
payload.Image = image
return nil
}
func (payload *endpointJobFromFileContentPayload) Validate(r *http.Request) error {
if govalidator.IsNull(payload.FileContent) {
return portainer.Error("Invalid script file content")
}
if govalidator.IsNull(payload.Image) {
return portainer.Error("Invalid image name")
}
return nil
}
// POST request on /api/endpoints/:id/job?method&nodeName
func (handler *Handler) endpointJob(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
endpointID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid endpoint identifier route variable", err}
}
method, err := request.RetrieveQueryParameter(r, "method", false)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid query parameter: method", err}
}
nodeName, _ := request.RetrieveQueryParameter(r, "nodeName", true)
endpoint, err := handler.EndpointService.Endpoint(portainer.EndpointID(endpointID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find an endpoint with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err}
}
err = handler.requestBouncer.EndpointAccess(r, endpoint)
if err != nil {
return &httperror.HandlerError{http.StatusForbidden, "Permission denied to access endpoint", portainer.ErrEndpointAccessDenied}
}
switch method {
case "file":
return handler.executeJobFromFile(w, r, endpoint, nodeName)
case "string":
return handler.executeJobFromFileContent(w, r, endpoint, nodeName)
}
return &httperror.HandlerError{http.StatusBadRequest, "Invalid value for query parameter: method. Value must be one of: string or file", errors.New(request.ErrInvalidQueryParameter)}
}
func (handler *Handler) executeJobFromFile(w http.ResponseWriter, r *http.Request, endpoint *portainer.Endpoint, nodeName string) *httperror.HandlerError {
payload := &endpointJobFromFilePayload{}
err := payload.Validate(r)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
err = handler.JobService.ExecuteScript(endpoint, nodeName, payload.Image, payload.File, nil)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Failed executing job", err}
}
return response.Empty(w)
}
func (handler *Handler) executeJobFromFileContent(w http.ResponseWriter, r *http.Request, endpoint *portainer.Endpoint, nodeName string) *httperror.HandlerError {
var payload endpointJobFromFileContentPayload
err := request.DecodeAndValidateJSONPayload(r, &payload)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
err = handler.JobService.ExecuteScript(endpoint, nodeName, payload.Image, []byte(payload.FileContent), nil)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Failed executing job", err}
}
return response.Empty(w)
}

View file

@ -1,42 +1,52 @@
package endpoints package endpoints
import ( import (
"log"
"net/http" "net/http"
httperror "github.com/portainer/libhttp/error" httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response" "github.com/portainer/libhttp/response"
"github.com/portainer/portainer" "github.com/portainer/portainer"
) )
// POST request on /api/endpoints/snapshot // POST request on /api/endpoints/:id/snapshot
func (handler *Handler) endpointSnapshot(w http.ResponseWriter, r *http.Request) *httperror.HandlerError { func (handler *Handler) endpointSnapshot(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
endpoints, err := handler.EndpointService.Endpoints() endpointID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve endpoints from the database", err} return &httperror.HandlerError{http.StatusBadRequest, "Invalid endpoint identifier route variable", err}
}
endpoint, err := handler.EndpointService.Endpoint(portainer.EndpointID(endpointID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find an endpoint with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err}
} }
for _, endpoint := range endpoints {
if endpoint.Type == portainer.AzureEnvironment { if endpoint.Type == portainer.AzureEnvironment {
continue return &httperror.HandlerError{http.StatusBadRequest, "Snapshots not supported for Azure endpoints", err}
} }
snapshot, err := handler.Snapshotter.CreateSnapshot(&endpoint) snapshot, snapshotError := handler.Snapshotter.CreateSnapshot(endpoint)
endpoint.Status = portainer.EndpointStatusUp
if err != nil { latestEndpointReference, err := handler.EndpointService.Endpoint(endpoint.ID)
log.Printf("http error: endpoint snapshot error (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err) if latestEndpointReference == nil {
endpoint.Status = portainer.EndpointStatusDown return &httperror.HandlerError{http.StatusNotFound, "Unable to find an endpoint with the specified identifier inside the database", err}
}
latestEndpointReference.Status = portainer.EndpointStatusUp
if snapshotError != nil {
latestEndpointReference.Status = portainer.EndpointStatusDown
} }
if snapshot != nil { if snapshot != nil {
endpoint.Snapshots = []portainer.Snapshot{*snapshot} latestEndpointReference.Snapshots = []portainer.Snapshot{*snapshot}
} }
err = handler.EndpointService.UpdateEndpoint(endpoint.ID, &endpoint) err = handler.EndpointService.UpdateEndpoint(latestEndpointReference.ID, latestEndpointReference)
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist endpoint changes inside the database", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist endpoint changes inside the database", err}
} }
}
return response.Empty(w) return response.Empty(w)
} }

View file

@ -0,0 +1,49 @@
package endpoints
import (
"log"
"net/http"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
// POST request on /api/endpoints/snapshot
func (handler *Handler) endpointSnapshots(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
endpoints, err := handler.EndpointService.Endpoints()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve endpoints from the database", err}
}
for _, endpoint := range endpoints {
if endpoint.Type == portainer.AzureEnvironment {
continue
}
snapshot, snapshotError := handler.Snapshotter.CreateSnapshot(&endpoint)
latestEndpointReference, err := handler.EndpointService.Endpoint(endpoint.ID)
if latestEndpointReference == nil {
log.Printf("background schedule error (endpoint snapshot). Endpoint not found inside the database anymore (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, err)
continue
}
latestEndpointReference.Status = portainer.EndpointStatusUp
if snapshotError != nil {
log.Printf("background schedule error (endpoint snapshot). Unable to create snapshot (endpoint=%s, URL=%s) (err=%s)\n", endpoint.Name, endpoint.URL, snapshotError)
latestEndpointReference.Status = portainer.EndpointStatusDown
}
if snapshot != nil {
latestEndpointReference.Snapshots = []portainer.Snapshot{*snapshot}
}
err = handler.EndpointService.UpdateEndpoint(latestEndpointReference.ID, latestEndpointReference)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist endpoint changes inside the database", err}
}
}
return response.Empty(w)
}

View file

@ -12,16 +12,17 @@ import (
) )
type endpointUpdatePayload struct { type endpointUpdatePayload struct {
Name string Name *string
URL string URL *string
PublicURL string PublicURL *string
GroupID int GroupID *int
TLS bool TLS *bool
TLSSkipVerify bool TLSSkipVerify *bool
TLSSkipClientVerify bool TLSSkipClientVerify *bool
AzureApplicationID string Status *int
AzureTenantID string AzureApplicationID *string
AzureAuthenticationKey string AzureTenantID *string
AzureAuthenticationKey *string
Tags []string Tags []string
} }
@ -53,36 +54,49 @@ func (handler *Handler) endpointUpdate(w http.ResponseWriter, r *http.Request) *
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err}
} }
if payload.Name != "" { if payload.Name != nil {
endpoint.Name = payload.Name endpoint.Name = *payload.Name
} }
if payload.URL != "" { if payload.URL != nil {
endpoint.URL = payload.URL endpoint.URL = *payload.URL
} }
if payload.PublicURL != "" { if payload.PublicURL != nil {
endpoint.PublicURL = payload.PublicURL endpoint.PublicURL = *payload.PublicURL
} }
if payload.GroupID != 0 { if payload.GroupID != nil {
endpoint.GroupID = portainer.EndpointGroupID(payload.GroupID) endpoint.GroupID = portainer.EndpointGroupID(*payload.GroupID)
} }
if payload.Tags != nil { if payload.Tags != nil {
endpoint.Tags = payload.Tags endpoint.Tags = payload.Tags
} }
if payload.Status != nil {
switch *payload.Status {
case 1:
endpoint.Status = portainer.EndpointStatusUp
break
case 2:
endpoint.Status = portainer.EndpointStatusDown
break
default:
break
}
}
if endpoint.Type == portainer.AzureEnvironment { if endpoint.Type == portainer.AzureEnvironment {
credentials := endpoint.AzureCredentials credentials := endpoint.AzureCredentials
if payload.AzureApplicationID != "" { if payload.AzureApplicationID != nil {
credentials.ApplicationID = payload.AzureApplicationID credentials.ApplicationID = *payload.AzureApplicationID
} }
if payload.AzureTenantID != "" { if payload.AzureTenantID != nil {
credentials.TenantID = payload.AzureTenantID credentials.TenantID = *payload.AzureTenantID
} }
if payload.AzureAuthenticationKey != "" { if payload.AzureAuthenticationKey != nil {
credentials.AuthenticationKey = payload.AzureAuthenticationKey credentials.AuthenticationKey = *payload.AzureAuthenticationKey
} }
httpClient := client.NewHTTPClient() httpClient := client.NewHTTPClient()
@ -93,19 +107,25 @@ func (handler *Handler) endpointUpdate(w http.ResponseWriter, r *http.Request) *
endpoint.AzureCredentials = credentials endpoint.AzureCredentials = credentials
} }
if payload.TLS != nil {
folder := strconv.Itoa(endpointID) folder := strconv.Itoa(endpointID)
if payload.TLS {
if *payload.TLS {
endpoint.TLSConfig.TLS = true endpoint.TLSConfig.TLS = true
endpoint.TLSConfig.TLSSkipVerify = payload.TLSSkipVerify if payload.TLSSkipVerify != nil {
if !payload.TLSSkipVerify { endpoint.TLSConfig.TLSSkipVerify = *payload.TLSSkipVerify
if !*payload.TLSSkipVerify {
caCertPath, _ := handler.FileService.GetPathForTLSFile(folder, portainer.TLSFileCA) caCertPath, _ := handler.FileService.GetPathForTLSFile(folder, portainer.TLSFileCA)
endpoint.TLSConfig.TLSCACertPath = caCertPath endpoint.TLSConfig.TLSCACertPath = caCertPath
} else { } else {
endpoint.TLSConfig.TLSCACertPath = "" endpoint.TLSConfig.TLSCACertPath = ""
handler.FileService.DeleteTLSFile(folder, portainer.TLSFileCA) handler.FileService.DeleteTLSFile(folder, portainer.TLSFileCA)
} }
}
if !payload.TLSSkipClientVerify { if payload.TLSSkipClientVerify != nil {
if !*payload.TLSSkipClientVerify {
certPath, _ := handler.FileService.GetPathForTLSFile(folder, portainer.TLSFileCert) certPath, _ := handler.FileService.GetPathForTLSFile(folder, portainer.TLSFileCert)
endpoint.TLSConfig.TLSCertPath = certPath endpoint.TLSConfig.TLSCertPath = certPath
keyPath, _ := handler.FileService.GetPathForTLSFile(folder, portainer.TLSFileKey) keyPath, _ := handler.FileService.GetPathForTLSFile(folder, portainer.TLSFileKey)
@ -116,6 +136,8 @@ func (handler *Handler) endpointUpdate(w http.ResponseWriter, r *http.Request) *
endpoint.TLSConfig.TLSKeyPath = "" endpoint.TLSConfig.TLSKeyPath = ""
handler.FileService.DeleteTLSFile(folder, portainer.TLSFileKey) handler.FileService.DeleteTLSFile(folder, portainer.TLSFileKey)
} }
}
} else { } else {
endpoint.TLSConfig.TLS = false endpoint.TLSConfig.TLS = false
endpoint.TLSConfig.TLSSkipVerify = false endpoint.TLSConfig.TLSSkipVerify = false
@ -127,11 +149,14 @@ func (handler *Handler) endpointUpdate(w http.ResponseWriter, r *http.Request) *
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to remove TLS files from disk", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to remove TLS files from disk", err}
} }
} }
}
if payload.URL != nil || payload.TLS != nil || endpoint.Type == portainer.AzureEnvironment {
_, err = handler.ProxyManager.CreateAndRegisterProxy(endpoint) _, err = handler.ProxyManager.CreateAndRegisterProxy(endpoint)
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to register HTTP proxy for the endpoint", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to register HTTP proxy for the endpoint", err}
} }
}
err = handler.EndpointService.UpdateEndpoint(endpoint.ID, endpoint) err = handler.EndpointService.UpdateEndpoint(endpoint.ID, endpoint)
if err != nil { if err != nil {

View file

@ -31,6 +31,7 @@ type Handler struct {
FileService portainer.FileService FileService portainer.FileService
ProxyManager *proxy.Manager ProxyManager *proxy.Manager
Snapshotter portainer.Snapshotter Snapshotter portainer.Snapshotter
JobService portainer.JobService
} }
// NewHandler creates a handler to manage endpoint operations. // NewHandler creates a handler to manage endpoint operations.
@ -44,7 +45,7 @@ func NewHandler(bouncer *security.RequestBouncer, authorizeEndpointManagement bo
h.Handle("/endpoints", h.Handle("/endpoints",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.endpointCreate))).Methods(http.MethodPost) bouncer.AdministratorAccess(httperror.LoggerHandler(h.endpointCreate))).Methods(http.MethodPost)
h.Handle("/endpoints/snapshot", h.Handle("/endpoints/snapshot",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.endpointSnapshot))).Methods(http.MethodPost) bouncer.AdministratorAccess(httperror.LoggerHandler(h.endpointSnapshots))).Methods(http.MethodPost)
h.Handle("/endpoints", h.Handle("/endpoints",
bouncer.RestrictedAccess(httperror.LoggerHandler(h.endpointList))).Methods(http.MethodGet) bouncer.RestrictedAccess(httperror.LoggerHandler(h.endpointList))).Methods(http.MethodGet)
h.Handle("/endpoints/{id}", h.Handle("/endpoints/{id}",
@ -59,6 +60,9 @@ func NewHandler(bouncer *security.RequestBouncer, authorizeEndpointManagement bo
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.endpointExtensionAdd))).Methods(http.MethodPost) bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.endpointExtensionAdd))).Methods(http.MethodPost)
h.Handle("/endpoints/{id}/extensions/{extensionType}", h.Handle("/endpoints/{id}/extensions/{extensionType}",
bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.endpointExtensionRemove))).Methods(http.MethodDelete) bouncer.AuthenticatedAccess(httperror.LoggerHandler(h.endpointExtensionRemove))).Methods(http.MethodDelete)
h.Handle("/endpoints/{id}/job",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.endpointJob))).Methods(http.MethodPost)
h.Handle("/endpoints/{id}/snapshot",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.endpointSnapshot))).Methods(http.MethodPost)
return h return h
} }

View file

@ -0,0 +1,79 @@
package extensions
import (
"net/http"
"strconv"
"github.com/asaskevich/govalidator"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
type extensionCreatePayload struct {
License string
}
func (payload *extensionCreatePayload) Validate(r *http.Request) error {
if govalidator.IsNull(payload.License) {
return portainer.Error("Invalid license")
}
return nil
}
func (handler *Handler) extensionCreate(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
var payload extensionCreatePayload
err := request.DecodeAndValidateJSONPayload(r, &payload)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
extensionIdentifier, err := strconv.Atoi(string(payload.License[0]))
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid license format", err}
}
extensionID := portainer.ExtensionID(extensionIdentifier)
extensions, err := handler.ExtensionService.Extensions()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve extensions status from the database", err}
}
for _, existingExtension := range extensions {
if existingExtension.ID == extensionID {
return &httperror.HandlerError{http.StatusConflict, "Unable to enable extension", portainer.ErrExtensionAlreadyEnabled}
}
}
extension := &portainer.Extension{
ID: extensionID,
}
extensionDefinitions, err := handler.ExtensionManager.FetchExtensionDefinitions()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve extension definitions", err}
}
for _, def := range extensionDefinitions {
if def.ID == extension.ID {
extension.Version = def.Version
break
}
}
err = handler.ExtensionManager.EnableExtension(extension, payload.License)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to enable extension", err}
}
extension.Enabled = true
err = handler.ExtensionService.Persist(extension)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist extension status inside the database", err}
}
return response.Empty(w)
}

View file

@ -0,0 +1,38 @@
package extensions
import (
"net/http"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
// DELETE request on /api/extensions/:id
func (handler *Handler) extensionDelete(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
extensionIdentifier, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid extension identifier route variable", err}
}
extensionID := portainer.ExtensionID(extensionIdentifier)
extension, err := handler.ExtensionService.Extension(extensionID)
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a extension with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a extension with the specified identifier inside the database", err}
}
err = handler.ExtensionManager.DisableExtension(extension)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to delete extension", err}
}
err = handler.ExtensionService.DeleteExtension(extensionID)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to delete the extension from the database", err}
}
return response.Empty(w)
}

View file

@ -0,0 +1,63 @@
package extensions
import (
"encoding/json"
"net/http"
"github.com/coreos/go-semver/semver"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
"github.com/portainer/portainer/http/client"
)
// GET request on /api/extensions/:id
func (handler *Handler) extensionInspect(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
extensionIdentifier, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid extension identifier route variable", err}
}
extensionID := portainer.ExtensionID(extensionIdentifier)
extensionData, err := client.Get(portainer.ExtensionDefinitionsURL, 30)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve extension definitions", err}
}
var extensions []portainer.Extension
err = json.Unmarshal(extensionData, &extensions)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to parse external extension definitions", err}
}
var extension portainer.Extension
for _, p := range extensions {
if p.ID == extensionID {
extension = p
if extension.DescriptionURL != "" {
description, _ := client.Get(extension.DescriptionURL, 10)
extension.Description = string(description)
}
break
}
}
storedExtension, err := handler.ExtensionService.Extension(extensionID)
if err == portainer.ErrObjectNotFound {
return response.JSON(w, extension)
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a extension with the specified identifier inside the database", err}
}
extension.Enabled = storedExtension.Enabled
extensionVer := semver.New(extension.Version)
pVer := semver.New(storedExtension.Version)
if pVer.LessThan(*extensionVer) {
extension.UpdateAvailable = true
}
return response.JSON(w, extension)
}

View file

@ -0,0 +1,55 @@
package extensions
import (
"net/http"
"github.com/coreos/go-semver/semver"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
// GET request on /api/extensions?store=<store>
func (handler *Handler) extensionList(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
storeDetails, _ := request.RetrieveBooleanQueryParameter(r, "store", true)
extensions, err := handler.ExtensionService.Extensions()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve extensions from the database", err}
}
if storeDetails {
definitions, err := handler.ExtensionManager.FetchExtensionDefinitions()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve extensions", err}
}
for idx := range definitions {
associateExtensionData(&definitions[idx], extensions)
}
extensions = definitions
}
return response.JSON(w, extensions)
}
func associateExtensionData(definition *portainer.Extension, extensions []portainer.Extension) {
for _, extension := range extensions {
if extension.ID == definition.ID {
definition.Enabled = extension.Enabled
definition.License.Company = extension.License.Company
definition.License.Expiration = extension.License.Expiration
definitionVersion := semver.New(definition.Version)
extensionVersion := semver.New(extension.Version)
if extensionVersion.LessThan(*definitionVersion) {
definition.UpdateAvailable = true
}
break
}
}
}

View file

@ -0,0 +1,56 @@
package extensions
import (
"net/http"
"github.com/asaskevich/govalidator"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
type extensionUpdatePayload struct {
Version string
}
func (payload *extensionUpdatePayload) Validate(r *http.Request) error {
if govalidator.IsNull(payload.Version) {
return portainer.Error("Invalid extension version")
}
return nil
}
func (handler *Handler) extensionUpdate(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
extensionIdentifier, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid extension identifier route variable", err}
}
extensionID := portainer.ExtensionID(extensionIdentifier)
var payload extensionUpdatePayload
err = request.DecodeAndValidateJSONPayload(r, &payload)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
extension, err := handler.ExtensionService.Extension(extensionID)
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a extension with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a extension with the specified identifier inside the database", err}
}
err = handler.ExtensionManager.UpdateExtension(extension, payload.Version)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to update extension", err}
}
err = handler.ExtensionService.Persist(extension)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist extension status inside the database", err}
}
return response.Empty(w)
}

View file

@ -0,0 +1,37 @@
package extensions
import (
"net/http"
"github.com/gorilla/mux"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/portainer"
"github.com/portainer/portainer/http/security"
)
// Handler is the HTTP handler used to handle extension operations.
type Handler struct {
*mux.Router
ExtensionService portainer.ExtensionService
ExtensionManager portainer.ExtensionManager
}
// NewHandler creates a handler to manage extension operations.
func NewHandler(bouncer *security.RequestBouncer) *Handler {
h := &Handler{
Router: mux.NewRouter(),
}
h.Handle("/extensions",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.extensionList))).Methods(http.MethodGet)
h.Handle("/extensions",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.extensionCreate))).Methods(http.MethodPost)
h.Handle("/extensions/{id}",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.extensionInspect))).Methods(http.MethodGet)
h.Handle("/extensions/{id}",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.extensionDelete))).Methods(http.MethodDelete)
h.Handle("/extensions/{id}/update",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.extensionUpdate))).Methods(http.MethodPost)
return h
}

View file

@ -34,7 +34,6 @@ func (handler *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
} }
w.Header().Add("X-Frame-Options", "DENY")
w.Header().Add("X-XSS-Protection", "1; mode=block") w.Header().Add("X-XSS-Protection", "1; mode=block")
w.Header().Add("X-Content-Type-Options", "nosniff") w.Header().Add("X-Content-Type-Options", "nosniff")
handler.Handler.ServeHTTP(w, r) handler.Handler.ServeHTTP(w, r)

View file

@ -9,10 +9,12 @@ import (
"github.com/portainer/portainer/http/handler/endpointgroups" "github.com/portainer/portainer/http/handler/endpointgroups"
"github.com/portainer/portainer/http/handler/endpointproxy" "github.com/portainer/portainer/http/handler/endpointproxy"
"github.com/portainer/portainer/http/handler/endpoints" "github.com/portainer/portainer/http/handler/endpoints"
"github.com/portainer/portainer/http/handler/extensions"
"github.com/portainer/portainer/http/handler/file" "github.com/portainer/portainer/http/handler/file"
"github.com/portainer/portainer/http/handler/motd" "github.com/portainer/portainer/http/handler/motd"
"github.com/portainer/portainer/http/handler/registries" "github.com/portainer/portainer/http/handler/registries"
"github.com/portainer/portainer/http/handler/resourcecontrols" "github.com/portainer/portainer/http/handler/resourcecontrols"
"github.com/portainer/portainer/http/handler/schedules"
"github.com/portainer/portainer/http/handler/settings" "github.com/portainer/portainer/http/handler/settings"
"github.com/portainer/portainer/http/handler/stacks" "github.com/portainer/portainer/http/handler/stacks"
"github.com/portainer/portainer/http/handler/status" "github.com/portainer/portainer/http/handler/status"
@ -36,6 +38,7 @@ type Handler struct {
EndpointProxyHandler *endpointproxy.Handler EndpointProxyHandler *endpointproxy.Handler
FileHandler *file.Handler FileHandler *file.Handler
MOTDHandler *motd.Handler MOTDHandler *motd.Handler
ExtensionHandler *extensions.Handler
RegistryHandler *registries.Handler RegistryHandler *registries.Handler
ResourceControlHandler *resourcecontrols.Handler ResourceControlHandler *resourcecontrols.Handler
SettingsHandler *settings.Handler SettingsHandler *settings.Handler
@ -49,6 +52,7 @@ type Handler struct {
UserHandler *users.Handler UserHandler *users.Handler
WebSocketHandler *websocket.Handler WebSocketHandler *websocket.Handler
WebhookHandler *webhooks.Handler WebhookHandler *webhooks.Handler
SchedulesHanlder *schedules.Handler
} }
// ServeHTTP delegates a request to the appropriate subhandler. // ServeHTTP delegates a request to the appropriate subhandler.
@ -73,6 +77,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
case strings.HasPrefix(r.URL.Path, "/api/motd"): case strings.HasPrefix(r.URL.Path, "/api/motd"):
http.StripPrefix("/api", h.MOTDHandler).ServeHTTP(w, r) http.StripPrefix("/api", h.MOTDHandler).ServeHTTP(w, r)
case strings.HasPrefix(r.URL.Path, "/api/extensions"):
http.StripPrefix("/api", h.ExtensionHandler).ServeHTTP(w, r)
case strings.HasPrefix(r.URL.Path, "/api/registries"): case strings.HasPrefix(r.URL.Path, "/api/registries"):
http.StripPrefix("/api", h.RegistryHandler).ServeHTTP(w, r) http.StripPrefix("/api", h.RegistryHandler).ServeHTTP(w, r)
case strings.HasPrefix(r.URL.Path, "/api/resource_controls"): case strings.HasPrefix(r.URL.Path, "/api/resource_controls"):
@ -99,6 +105,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.StripPrefix("/api", h.WebSocketHandler).ServeHTTP(w, r) http.StripPrefix("/api", h.WebSocketHandler).ServeHTTP(w, r)
case strings.HasPrefix(r.URL.Path, "/api/webhooks"): case strings.HasPrefix(r.URL.Path, "/api/webhooks"):
http.StripPrefix("/api", h.WebhookHandler).ServeHTTP(w, r) http.StripPrefix("/api", h.WebhookHandler).ServeHTTP(w, r)
case strings.HasPrefix(r.URL.Path, "/api/schedules"):
http.StripPrefix("/api", h.SchedulesHanlder).ServeHTTP(w, r)
case strings.HasPrefix(r.URL.Path, "/"): case strings.HasPrefix(r.URL.Path, "/"):
h.FileHandler.ServeHTTP(w, r) h.FileHandler.ServeHTTP(w, r)
} }

View file

@ -16,9 +16,9 @@ type motdResponse struct {
func (handler *Handler) motd(w http.ResponseWriter, r *http.Request) { func (handler *Handler) motd(w http.ResponseWriter, r *http.Request) {
motd, err := client.Get(portainer.MessageOfTheDayURL) motd, err := client.Get(portainer.MessageOfTheDayURL, 0)
if err != nil { if err != nil {
w.WriteHeader(http.StatusInternalServerError) response.JSON(w, &motdResponse{Message: ""})
return return
} }

View file

@ -1,23 +1,27 @@
package registries package registries
import ( import (
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/portainer"
"github.com/portainer/portainer/http/security"
"net/http" "net/http"
"github.com/gorilla/mux" "github.com/gorilla/mux"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/portainer"
"github.com/portainer/portainer/http/proxy"
"github.com/portainer/portainer/http/security"
) )
func hideFields(registry *portainer.Registry) { func hideFields(registry *portainer.Registry) {
registry.Password = "" registry.Password = ""
registry.ManagementConfiguration = nil
} }
// Handler is the HTTP handler used to handle registry operations. // Handler is the HTTP handler used to handle registry operations.
type Handler struct { type Handler struct {
*mux.Router *mux.Router
RegistryService portainer.RegistryService RegistryService portainer.RegistryService
ExtensionService portainer.ExtensionService
FileService portainer.FileService
ProxyManager *proxy.Manager
} }
// NewHandler creates a handler to manage registry operations. // NewHandler creates a handler to manage registry operations.
@ -36,8 +40,12 @@ func NewHandler(bouncer *security.RequestBouncer) *Handler {
bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryUpdate))).Methods(http.MethodPut) bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryUpdate))).Methods(http.MethodPut)
h.Handle("/registries/{id}/access", h.Handle("/registries/{id}/access",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryUpdateAccess))).Methods(http.MethodPut) bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryUpdateAccess))).Methods(http.MethodPut)
h.Handle("/registries/{id}/configure",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryConfigure))).Methods(http.MethodPost)
h.Handle("/registries/{id}", h.Handle("/registries/{id}",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryDelete))).Methods(http.MethodDelete) bouncer.AdministratorAccess(httperror.LoggerHandler(h.registryDelete))).Methods(http.MethodDelete)
h.PathPrefix("/registries/{id}/v2").Handler(
bouncer.AdministratorAccess(httperror.LoggerHandler(h.proxyRequestsToRegistryAPI)))
return h return h
} }

View file

@ -0,0 +1,78 @@
package registries
import (
"encoding/json"
"net/http"
"strconv"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/portainer"
)
// request on /api/registries/:id/v2
func (handler *Handler) proxyRequestsToRegistryAPI(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
registryID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid registry identifier route variable", err}
}
registry, err := handler.RegistryService.Registry(portainer.RegistryID(registryID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a registry with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a registry with the specified identifier inside the database", err}
}
extension, err := handler.ExtensionService.Extension(portainer.RegistryManagementExtension)
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Registry management extension is not enabled", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a extension with the specified identifier inside the database", err}
}
var proxy http.Handler
proxy = handler.ProxyManager.GetExtensionProxy(portainer.RegistryManagementExtension)
if proxy == nil {
proxy, err = handler.ProxyManager.CreateExtensionProxy(portainer.RegistryManagementExtension)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to register registry proxy", err}
}
}
managementConfiguration := registry.ManagementConfiguration
if managementConfiguration == nil {
managementConfiguration = createDefaultManagementConfiguration(registry)
}
encodedConfiguration, err := json.Marshal(managementConfiguration)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to encode management configuration", err}
}
id := strconv.Itoa(int(registryID))
r.Header.Set("X-RegistryManagement-Key", id)
r.Header.Set("X-RegistryManagement-URI", registry.URL)
r.Header.Set("X-RegistryManagement-Config", string(encodedConfiguration))
r.Header.Set("X-PortainerExtension-License", extension.License.LicenseKey)
http.StripPrefix("/registries/"+id, proxy).ServeHTTP(w, r)
return nil
}
func createDefaultManagementConfiguration(registry *portainer.Registry) *portainer.RegistryManagementConfiguration {
config := &portainer.RegistryManagementConfiguration{
Type: registry.Type,
TLSConfig: portainer.TLSConfiguration{
TLS: false,
},
}
if registry.Authentication {
config.Authentication = true
config.Username = registry.Username
config.Password = registry.Password
}
return config
}

View file

@ -0,0 +1,137 @@
package registries
import (
"net/http"
"strconv"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
type registryConfigurePayload struct {
Authentication bool
Username string
Password string
TLS bool
TLSSkipVerify bool
TLSCertFile []byte
TLSKeyFile []byte
TLSCACertFile []byte
}
func (payload *registryConfigurePayload) Validate(r *http.Request) error {
useAuthentication, _ := request.RetrieveBooleanMultiPartFormValue(r, "Authentication", true)
payload.Authentication = useAuthentication
if useAuthentication {
username, err := request.RetrieveMultiPartFormValue(r, "Username", false)
if err != nil {
return portainer.Error("Invalid username")
}
payload.Username = username
password, _ := request.RetrieveMultiPartFormValue(r, "Password", true)
payload.Password = password
}
useTLS, _ := request.RetrieveBooleanMultiPartFormValue(r, "TLS", true)
payload.TLS = useTLS
skipTLSVerify, _ := request.RetrieveBooleanMultiPartFormValue(r, "TLSSkipVerify", true)
payload.TLSSkipVerify = skipTLSVerify
if useTLS && !skipTLSVerify {
cert, _, err := request.RetrieveMultiPartFormFile(r, "TLSCertFile")
if err != nil {
return portainer.Error("Invalid certificate file. Ensure that the file is uploaded correctly")
}
payload.TLSCertFile = cert
key, _, err := request.RetrieveMultiPartFormFile(r, "TLSKeyFile")
if err != nil {
return portainer.Error("Invalid key file. Ensure that the file is uploaded correctly")
}
payload.TLSKeyFile = key
ca, _, err := request.RetrieveMultiPartFormFile(r, "TLSCACertFile")
if err != nil {
return portainer.Error("Invalid CA certificate file. Ensure that the file is uploaded correctly")
}
payload.TLSCACertFile = ca
}
return nil
}
// POST request on /api/registries/:id/configure
func (handler *Handler) registryConfigure(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
registryID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid registry identifier route variable", err}
}
payload := &registryConfigurePayload{}
err = payload.Validate(r)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
registry, err := handler.RegistryService.Registry(portainer.RegistryID(registryID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a registry with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a registry with the specified identifier inside the database", err}
}
registry.ManagementConfiguration = &portainer.RegistryManagementConfiguration{
Type: registry.Type,
}
if payload.Authentication {
registry.ManagementConfiguration.Authentication = true
registry.ManagementConfiguration.Username = payload.Username
if payload.Username == registry.Username && payload.Password == "" {
registry.ManagementConfiguration.Password = registry.Password
} else {
registry.ManagementConfiguration.Password = payload.Password
}
}
if payload.TLS {
registry.ManagementConfiguration.TLSConfig = portainer.TLSConfiguration{
TLS: true,
TLSSkipVerify: payload.TLSSkipVerify,
}
if !payload.TLSSkipVerify {
folder := strconv.Itoa(int(registry.ID))
certPath, err := handler.FileService.StoreRegistryManagementFileFromBytes(folder, "cert.pem", payload.TLSCertFile)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist TLS certificate file on disk", err}
}
registry.ManagementConfiguration.TLSConfig.TLSCertPath = certPath
keyPath, err := handler.FileService.StoreRegistryManagementFileFromBytes(folder, "key.pem", payload.TLSKeyFile)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist TLS key file on disk", err}
}
registry.ManagementConfiguration.TLSConfig.TLSKeyPath = keyPath
cacertPath, err := handler.FileService.StoreRegistryManagementFileFromBytes(folder, "ca.pem", payload.TLSCACertFile)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist TLS CA certificate file on disk", err}
}
registry.ManagementConfiguration.TLSConfig.TLSCACertPath = cacertPath
}
}
err = handler.RegistryService.UpdateRegistry(registry.ID, registry)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist registry changes inside the database", err}
}
return response.Empty(w)
}

View file

@ -12,6 +12,7 @@ import (
type registryCreatePayload struct { type registryCreatePayload struct {
Name string Name string
Type int
URL string URL string
Authentication bool Authentication bool
Username string Username string
@ -28,6 +29,9 @@ func (payload *registryCreatePayload) Validate(r *http.Request) error {
if payload.Authentication && (govalidator.IsNull(payload.Username) || govalidator.IsNull(payload.Password)) { if payload.Authentication && (govalidator.IsNull(payload.Username) || govalidator.IsNull(payload.Password)) {
return portainer.Error("Invalid credentials. Username and password must be specified when authentication is enabled") return portainer.Error("Invalid credentials. Username and password must be specified when authentication is enabled")
} }
if payload.Type != 1 && payload.Type != 2 && payload.Type != 3 {
return portainer.Error("Invalid registry type. Valid values are: 1 (Quay.io), 2 (Azure container registry) or 3 (custom registry)")
}
return nil return nil
} }
@ -49,6 +53,7 @@ func (handler *Handler) registryCreate(w http.ResponseWriter, r *http.Request) *
} }
registry := &portainer.Registry{ registry := &portainer.Registry{
Type: portainer.RegistryType(payload.Type),
Name: payload.Name, Name: payload.Name,
URL: payload.URL, URL: payload.URL,
Authentication: payload.Authentication, Authentication: payload.Authentication,

View file

@ -0,0 +1,44 @@
package schedules
import (
"net/http"
"github.com/gorilla/mux"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/portainer"
"github.com/portainer/portainer/http/security"
)
// Handler is the HTTP handler used to handle schedule operations.
type Handler struct {
*mux.Router
ScheduleService portainer.ScheduleService
EndpointService portainer.EndpointService
SettingsService portainer.SettingsService
FileService portainer.FileService
JobService portainer.JobService
JobScheduler portainer.JobScheduler
}
// NewHandler creates a handler to manage schedule operations.
func NewHandler(bouncer *security.RequestBouncer) *Handler {
h := &Handler{
Router: mux.NewRouter(),
}
h.Handle("/schedules",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleList))).Methods(http.MethodGet)
h.Handle("/schedules",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleCreate))).Methods(http.MethodPost)
h.Handle("/schedules/{id}",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleInspect))).Methods(http.MethodGet)
h.Handle("/schedules/{id}",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleUpdate))).Methods(http.MethodPut)
h.Handle("/schedules/{id}",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleDelete))).Methods(http.MethodDelete)
h.Handle("/schedules/{id}/file",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleFile))).Methods(http.MethodGet)
h.Handle("/schedules/{id}/tasks",
bouncer.AdministratorAccess(httperror.LoggerHandler(h.scheduleTasks))).Methods(http.MethodGet)
return h
}

View file

@ -0,0 +1,238 @@
package schedules
import (
"errors"
"net/http"
"strconv"
"time"
"github.com/asaskevich/govalidator"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
"github.com/portainer/portainer/cron"
)
type scheduleCreateFromFilePayload struct {
Name string
Image string
CronExpression string
Recurring bool
Endpoints []portainer.EndpointID
File []byte
RetryCount int
RetryInterval int
}
type scheduleCreateFromFileContentPayload struct {
Name string
CronExpression string
Recurring bool
Image string
Endpoints []portainer.EndpointID
FileContent string
RetryCount int
RetryInterval int
}
func (payload *scheduleCreateFromFilePayload) Validate(r *http.Request) error {
name, err := request.RetrieveMultiPartFormValue(r, "Name", false)
if err != nil {
return errors.New("Invalid schedule name")
}
if !govalidator.Matches(name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) {
return errors.New("Invalid schedule name format. Allowed characters are: [a-zA-Z0-9_.-]")
}
payload.Name = name
image, err := request.RetrieveMultiPartFormValue(r, "Image", false)
if err != nil {
return errors.New("Invalid schedule image")
}
payload.Image = image
cronExpression, err := request.RetrieveMultiPartFormValue(r, "CronExpression", false)
if err != nil {
return errors.New("Invalid cron expression")
}
payload.CronExpression = cronExpression
var endpoints []portainer.EndpointID
err = request.RetrieveMultiPartFormJSONValue(r, "Endpoints", &endpoints, false)
if err != nil {
return errors.New("Invalid endpoints")
}
payload.Endpoints = endpoints
file, _, err := request.RetrieveMultiPartFormFile(r, "file")
if err != nil {
return portainer.Error("Invalid script file. Ensure that the file is uploaded correctly")
}
payload.File = file
retryCount, _ := request.RetrieveNumericMultiPartFormValue(r, "RetryCount", true)
payload.RetryCount = retryCount
retryInterval, _ := request.RetrieveNumericMultiPartFormValue(r, "RetryInterval", true)
payload.RetryInterval = retryInterval
return nil
}
func (payload *scheduleCreateFromFileContentPayload) Validate(r *http.Request) error {
if govalidator.IsNull(payload.Name) {
return portainer.Error("Invalid schedule name")
}
if !govalidator.Matches(payload.Name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) {
return errors.New("Invalid schedule name format. Allowed characters are: [a-zA-Z0-9_.-]")
}
if govalidator.IsNull(payload.Image) {
return portainer.Error("Invalid schedule image")
}
if govalidator.IsNull(payload.CronExpression) {
return portainer.Error("Invalid cron expression")
}
if payload.Endpoints == nil || len(payload.Endpoints) == 0 {
return portainer.Error("Invalid endpoints payload")
}
if govalidator.IsNull(payload.FileContent) {
return portainer.Error("Invalid script file content")
}
if payload.RetryCount != 0 && payload.RetryInterval == 0 {
return portainer.Error("RetryInterval must be set")
}
return nil
}
// POST /api/schedules?method=file/string
func (handler *Handler) scheduleCreate(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
method, err := request.RetrieveQueryParameter(r, "method", false)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid query parameter: method. Valid values are: file or string", err}
}
switch method {
case "string":
return handler.createScheduleFromFileContent(w, r)
case "file":
return handler.createScheduleFromFile(w, r)
default:
return &httperror.HandlerError{http.StatusBadRequest, "Invalid query parameter: method. Valid values are: file or string", errors.New(request.ErrInvalidQueryParameter)}
}
}
func (handler *Handler) createScheduleFromFileContent(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
var payload scheduleCreateFromFileContentPayload
err := request.DecodeAndValidateJSONPayload(r, &payload)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
schedule := handler.createScheduleObjectFromFileContentPayload(&payload)
err = handler.addAndPersistSchedule(schedule, []byte(payload.FileContent))
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to schedule script job", err}
}
return response.JSON(w, schedule)
}
func (handler *Handler) createScheduleFromFile(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
payload := &scheduleCreateFromFilePayload{}
err := payload.Validate(r)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
schedule := handler.createScheduleObjectFromFilePayload(payload)
err = handler.addAndPersistSchedule(schedule, payload.File)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to schedule script job", err}
}
return response.JSON(w, schedule)
}
func (handler *Handler) createScheduleObjectFromFilePayload(payload *scheduleCreateFromFilePayload) *portainer.Schedule {
scheduleIdentifier := portainer.ScheduleID(handler.ScheduleService.GetNextIdentifier())
job := &portainer.ScriptExecutionJob{
Endpoints: payload.Endpoints,
Image: payload.Image,
RetryCount: payload.RetryCount,
RetryInterval: payload.RetryInterval,
}
schedule := &portainer.Schedule{
ID: scheduleIdentifier,
Name: payload.Name,
CronExpression: payload.CronExpression,
Recurring: payload.Recurring,
JobType: portainer.ScriptExecutionJobType,
ScriptExecutionJob: job,
Created: time.Now().Unix(),
}
return schedule
}
func (handler *Handler) createScheduleObjectFromFileContentPayload(payload *scheduleCreateFromFileContentPayload) *portainer.Schedule {
scheduleIdentifier := portainer.ScheduleID(handler.ScheduleService.GetNextIdentifier())
job := &portainer.ScriptExecutionJob{
Endpoints: payload.Endpoints,
Image: payload.Image,
RetryCount: payload.RetryCount,
RetryInterval: payload.RetryInterval,
}
schedule := &portainer.Schedule{
ID: scheduleIdentifier,
Name: payload.Name,
CronExpression: payload.CronExpression,
Recurring: payload.Recurring,
JobType: portainer.ScriptExecutionJobType,
ScriptExecutionJob: job,
Created: time.Now().Unix(),
}
return schedule
}
func (handler *Handler) addAndPersistSchedule(schedule *portainer.Schedule, file []byte) error {
scriptPath, err := handler.FileService.StoreScheduledJobFileFromBytes(strconv.Itoa(int(schedule.ID)), file)
if err != nil {
return err
}
schedule.ScriptExecutionJob.ScriptPath = scriptPath
jobContext := cron.NewScriptExecutionJobContext(handler.JobService, handler.EndpointService, handler.FileService)
jobRunner := cron.NewScriptExecutionJobRunner(schedule, jobContext)
err = handler.JobScheduler.ScheduleJob(jobRunner)
if err != nil {
return err
}
return handler.ScheduleService.CreateSchedule(schedule)
}

View file

@ -0,0 +1,53 @@
package schedules
import (
"errors"
"net/http"
"strconv"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
func (handler *Handler) scheduleDelete(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
scheduleID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid schedule identifier route variable", err}
}
schedule, err := handler.ScheduleService.Schedule(portainer.ScheduleID(scheduleID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a schedule with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a schedule with the specified identifier inside the database", err}
}
if schedule.JobType == portainer.SnapshotJobType || schedule.JobType == portainer.EndpointSyncJobType {
return &httperror.HandlerError{http.StatusBadRequest, "Cannot remove system schedules", errors.New("Cannot remove system schedule")}
}
scheduleFolder := handler.FileService.GetScheduleFolder(strconv.Itoa(scheduleID))
err = handler.FileService.RemoveDirectory(scheduleFolder)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to remove the files associated to the schedule on the filesystem", err}
}
handler.JobScheduler.UnscheduleJob(schedule.ID)
err = handler.ScheduleService.DeleteSchedule(portainer.ScheduleID(scheduleID))
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to remove the schedule from the database", err}
}
return response.Empty(w)
}

View file

@ -0,0 +1,49 @@
package schedules
import (
"errors"
"net/http"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
type scheduleFileResponse struct {
ScheduleFileContent string `json:"ScheduleFileContent"`
}
// GET request on /api/schedules/:id/file
func (handler *Handler) scheduleFile(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
scheduleID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid schedule identifier route variable", err}
}
schedule, err := handler.ScheduleService.Schedule(portainer.ScheduleID(scheduleID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a schedule with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a schedule with the specified identifier inside the database", err}
}
if schedule.JobType != portainer.ScriptExecutionJobType {
return &httperror.HandlerError{http.StatusBadRequest, "Unable to retrieve script file", errors.New("This type of schedule do not have any associated script file")}
}
scheduleFileContent, err := handler.FileService.GetFileContent(schedule.ScriptExecutionJob.ScriptPath)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve schedule script file from disk", err}
}
return response.JSON(w, &scheduleFileResponse{ScheduleFileContent: string(scheduleFileContent)})
}

View file

@ -0,0 +1,35 @@
package schedules
import (
"net/http"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
)
func (handler *Handler) scheduleInspect(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
scheduleID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid schedule identifier route variable", err}
}
schedule, err := handler.ScheduleService.Schedule(portainer.ScheduleID(scheduleID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a schedule with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a schedule with the specified identifier inside the database", err}
}
return response.JSON(w, schedule)
}

View file

@ -0,0 +1,27 @@
package schedules
import (
"net/http"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
// GET request on /api/schedules
func (handler *Handler) scheduleList(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
schedules, err := handler.ScheduleService.Schedules()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve schedules from the database", err}
}
return response.JSON(w, schedules)
}

View file

@ -0,0 +1,95 @@
package schedules
import (
"encoding/json"
"errors"
"net/http"
"strconv"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
)
type taskContainer struct {
ID string `json:"Id"`
EndpointID portainer.EndpointID `json:"EndpointId"`
Status string `json:"Status"`
Created float64 `json:"Created"`
Labels map[string]string `json:"Labels"`
}
// GET request on /api/schedules/:id/tasks
func (handler *Handler) scheduleTasks(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
scheduleID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid schedule identifier route variable", err}
}
schedule, err := handler.ScheduleService.Schedule(portainer.ScheduleID(scheduleID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a schedule with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a schedule with the specified identifier inside the database", err}
}
if schedule.JobType != portainer.ScriptExecutionJobType {
return &httperror.HandlerError{http.StatusBadRequest, "Unable to retrieve schedule tasks", errors.New("This type of schedule do not have any associated tasks")}
}
tasks := make([]taskContainer, 0)
for _, endpointID := range schedule.ScriptExecutionJob.Endpoints {
endpoint, err := handler.EndpointService.Endpoint(endpointID)
if err == portainer.ErrObjectNotFound {
continue
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint with the specified identifier inside the database", err}
}
endpointTasks, err := extractTasksFromContainerSnasphot(endpoint, schedule.ID)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find extract schedule tasks from endpoint snapshot", err}
}
tasks = append(tasks, endpointTasks...)
}
return response.JSON(w, tasks)
}
func extractTasksFromContainerSnasphot(endpoint *portainer.Endpoint, scheduleID portainer.ScheduleID) ([]taskContainer, error) {
endpointTasks := make([]taskContainer, 0)
if len(endpoint.Snapshots) == 0 {
return endpointTasks, nil
}
b, err := json.Marshal(endpoint.Snapshots[0].SnapshotRaw.Containers)
if err != nil {
return nil, err
}
var containers []taskContainer
err = json.Unmarshal(b, &containers)
if err != nil {
return nil, err
}
for _, container := range containers {
if container.Labels["io.portainer.schedule.id"] == strconv.Itoa(int(scheduleID)) {
container.EndpointID = endpoint.ID
endpointTasks = append(endpointTasks, container)
}
}
return endpointTasks, nil
}

View file

@ -0,0 +1,126 @@
package schedules
import (
"errors"
"net/http"
"strconv"
"github.com/asaskevich/govalidator"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
"github.com/portainer/portainer"
"github.com/portainer/portainer/cron"
)
type scheduleUpdatePayload struct {
Name *string
Image *string
CronExpression *string
Recurring *bool
Endpoints []portainer.EndpointID
FileContent *string
RetryCount *int
RetryInterval *int
}
func (payload *scheduleUpdatePayload) Validate(r *http.Request) error {
if payload.Name != nil && !govalidator.Matches(*payload.Name, `^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) {
return errors.New("Invalid schedule name format. Allowed characters are: [a-zA-Z0-9_.-]")
}
return nil
}
func (handler *Handler) scheduleUpdate(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
settings, err := handler.SettingsService.Settings()
if err != nil {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Unable to retrieve settings", err}
}
if !settings.EnableHostManagementFeatures {
return &httperror.HandlerError{http.StatusServiceUnavailable, "Host management features are disabled", portainer.ErrHostManagementFeaturesDisabled}
}
scheduleID, err := request.RetrieveNumericRouteVariableValue(r, "id")
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid schedule identifier route variable", err}
}
var payload scheduleUpdatePayload
err = request.DecodeAndValidateJSONPayload(r, &payload)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
schedule, err := handler.ScheduleService.Schedule(portainer.ScheduleID(scheduleID))
if err == portainer.ErrObjectNotFound {
return &httperror.HandlerError{http.StatusNotFound, "Unable to find a schedule with the specified identifier inside the database", err}
} else if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find a schedule with the specified identifier inside the database", err}
}
updateJobSchedule := updateSchedule(schedule, &payload)
if payload.FileContent != nil {
_, err := handler.FileService.StoreScheduledJobFileFromBytes(strconv.Itoa(scheduleID), []byte(*payload.FileContent))
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist script file changes on the filesystem", err}
}
updateJobSchedule = true
}
if updateJobSchedule {
jobContext := cron.NewScriptExecutionJobContext(handler.JobService, handler.EndpointService, handler.FileService)
jobRunner := cron.NewScriptExecutionJobRunner(schedule, jobContext)
err := handler.JobScheduler.UpdateJobSchedule(jobRunner)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to update job scheduler", err}
}
}
err = handler.ScheduleService.UpdateSchedule(portainer.ScheduleID(scheduleID), schedule)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist schedule changes inside the database", err}
}
return response.JSON(w, schedule)
}
func updateSchedule(schedule *portainer.Schedule, payload *scheduleUpdatePayload) bool {
updateJobSchedule := false
if payload.Name != nil {
schedule.Name = *payload.Name
}
if payload.Endpoints != nil {
schedule.ScriptExecutionJob.Endpoints = payload.Endpoints
updateJobSchedule = true
}
if payload.CronExpression != nil {
schedule.CronExpression = *payload.CronExpression
updateJobSchedule = true
}
if payload.Recurring != nil {
schedule.Recurring = *payload.Recurring
updateJobSchedule = true
}
if payload.Image != nil {
schedule.ScriptExecutionJob.Image = *payload.Image
updateJobSchedule = true
}
if payload.RetryCount != nil {
schedule.ScriptExecutionJob.RetryCount = *payload.RetryCount
updateJobSchedule = true
}
if payload.RetryInterval != nil {
schedule.ScriptExecutionJob.RetryInterval = *payload.RetryInterval
updateJobSchedule = true
}
return updateJobSchedule
}

View file

@ -9,6 +9,10 @@ import (
"github.com/portainer/portainer/http/security" "github.com/portainer/portainer/http/security"
) )
func hideFields(settings *portainer.Settings) {
settings.LDAPSettings.Password = ""
}
// Handler is the HTTP handler used to handle settings operations. // Handler is the HTTP handler used to handle settings operations.
type Handler struct { type Handler struct {
*mux.Router *mux.Router
@ -16,6 +20,7 @@ type Handler struct {
LDAPService portainer.LDAPService LDAPService portainer.LDAPService
FileService portainer.FileService FileService portainer.FileService
JobScheduler portainer.JobScheduler JobScheduler portainer.JobScheduler
ScheduleService portainer.ScheduleService
} }
// NewHandler creates a handler to manage settings operations. // NewHandler creates a handler to manage settings operations.

View file

@ -14,5 +14,6 @@ func (handler *Handler) settingsInspect(w http.ResponseWriter, r *http.Request)
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve the settings from the database", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve the settings from the database", err}
} }
hideFields(settings)
return response.JSON(w, settings) return response.JSON(w, settings)
} }

View file

@ -13,6 +13,7 @@ type publicSettingsResponse struct {
AuthenticationMethod portainer.AuthenticationMethod `json:"AuthenticationMethod"` AuthenticationMethod portainer.AuthenticationMethod `json:"AuthenticationMethod"`
AllowBindMountsForRegularUsers bool `json:"AllowBindMountsForRegularUsers"` AllowBindMountsForRegularUsers bool `json:"AllowBindMountsForRegularUsers"`
AllowPrivilegedModeForRegularUsers bool `json:"AllowPrivilegedModeForRegularUsers"` AllowPrivilegedModeForRegularUsers bool `json:"AllowPrivilegedModeForRegularUsers"`
EnableHostManagementFeatures bool `json:"EnableHostManagementFeatures"`
ExternalTemplates bool `json:"ExternalTemplates"` ExternalTemplates bool `json:"ExternalTemplates"`
} }
@ -28,6 +29,7 @@ func (handler *Handler) settingsPublic(w http.ResponseWriter, r *http.Request) *
AuthenticationMethod: settings.AuthenticationMethod, AuthenticationMethod: settings.AuthenticationMethod,
AllowBindMountsForRegularUsers: settings.AllowBindMountsForRegularUsers, AllowBindMountsForRegularUsers: settings.AllowBindMountsForRegularUsers,
AllowPrivilegedModeForRegularUsers: settings.AllowPrivilegedModeForRegularUsers, AllowPrivilegedModeForRegularUsers: settings.AllowPrivilegedModeForRegularUsers,
EnableHostManagementFeatures: settings.EnableHostManagementFeatures,
ExternalTemplates: false, ExternalTemplates: false,
} }

View file

@ -18,6 +18,7 @@ type settingsUpdatePayload struct {
LDAPSettings *portainer.LDAPSettings LDAPSettings *portainer.LDAPSettings
AllowBindMountsForRegularUsers *bool AllowBindMountsForRegularUsers *bool
AllowPrivilegedModeForRegularUsers *bool AllowPrivilegedModeForRegularUsers *bool
EnableHostManagementFeatures *bool
SnapshotInterval *string SnapshotInterval *string
TemplatesURL *string TemplatesURL *string
} }
@ -76,9 +77,15 @@ func (handler *Handler) settingsUpdate(w http.ResponseWriter, r *http.Request) *
settings.AllowPrivilegedModeForRegularUsers = *payload.AllowPrivilegedModeForRegularUsers settings.AllowPrivilegedModeForRegularUsers = *payload.AllowPrivilegedModeForRegularUsers
} }
if payload.EnableHostManagementFeatures != nil {
settings.EnableHostManagementFeatures = *payload.EnableHostManagementFeatures
}
if payload.SnapshotInterval != nil && *payload.SnapshotInterval != settings.SnapshotInterval { if payload.SnapshotInterval != nil && *payload.SnapshotInterval != settings.SnapshotInterval {
settings.SnapshotInterval = *payload.SnapshotInterval err := handler.updateSnapshotInterval(settings, *payload.SnapshotInterval)
handler.JobScheduler.UpdateSnapshotJob(settings.SnapshotInterval) if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to update snapshot interval", err}
}
} }
tlsError := handler.updateTLS(settings) tlsError := handler.updateTLS(settings)
@ -94,6 +101,32 @@ func (handler *Handler) settingsUpdate(w http.ResponseWriter, r *http.Request) *
return response.JSON(w, settings) return response.JSON(w, settings)
} }
func (handler *Handler) updateSnapshotInterval(settings *portainer.Settings, snapshotInterval string) error {
settings.SnapshotInterval = snapshotInterval
schedules, err := handler.ScheduleService.SchedulesByJobType(portainer.SnapshotJobType)
if err != nil {
return err
}
if len(schedules) != 0 {
snapshotSchedule := schedules[0]
snapshotSchedule.CronExpression = "@every " + snapshotInterval
err := handler.JobScheduler.UpdateSystemJobSchedule(portainer.SnapshotJobType, snapshotSchedule.CronExpression)
if err != nil {
return err
}
err = handler.ScheduleService.UpdateSchedule(snapshotSchedule.ID, &snapshotSchedule)
if err != nil {
return err
}
}
return nil
}
func (handler *Handler) updateTLS(settings *portainer.Settings) *httperror.HandlerError { func (handler *Handler) updateTLS(settings *portainer.Settings) *httperror.HandlerError {
if (settings.LDAPSettings.TLSConfig.TLS || settings.LDAPSettings.StartTLS) && !settings.LDAPSettings.TLSConfig.TLSSkipVerify { if (settings.LDAPSettings.TLSConfig.TLS || settings.LDAPSettings.StartTLS) && !settings.LDAPSettings.TLSConfig.TLSSkipVerify {
caCertPath, _ := handler.FileService.GetPathForTLSFile(filesystem.LDAPStorePath, portainer.TLSFileCA) caCertPath, _ := handler.FileService.GetPathForTLSFile(filesystem.LDAPStorePath, portainer.TLSFileCA)

View file

@ -14,6 +14,7 @@ import (
type stackMigratePayload struct { type stackMigratePayload struct {
EndpointID int EndpointID int
SwarmID string SwarmID string
Name string
} }
func (payload *stackMigratePayload) Validate(r *http.Request) error { func (payload *stackMigratePayload) Validate(r *http.Request) error {
@ -89,11 +90,17 @@ func (handler *Handler) stackMigrate(w http.ResponseWriter, r *http.Request) *ht
stack.SwarmID = payload.SwarmID stack.SwarmID = payload.SwarmID
} }
oldName := stack.Name
if payload.Name != "" {
stack.Name = payload.Name
}
migrationError := handler.migrateStack(r, stack, targetEndpoint) migrationError := handler.migrateStack(r, stack, targetEndpoint)
if migrationError != nil { if migrationError != nil {
return migrationError return migrationError
} }
stack.Name = oldName
err = handler.deleteStack(stack, endpoint) err = handler.deleteStack(stack, endpoint)
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, err.Error(), err} return &httperror.HandlerError{http.StatusInternalServerError, err.Error(), err}

View file

@ -26,7 +26,7 @@ func (handler *Handler) templateList(w http.ResponseWriter, r *http.Request) *ht
} }
} else { } else {
var templateData []byte var templateData []byte
templateData, err = client.Get(settings.TemplatesURL) templateData, err = client.Get(settings.TemplatesURL, 0)
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve external templates", err} return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve external templates", err}
} }

View file

@ -49,7 +49,7 @@ func (handler *Handler) webhookExecute(w http.ResponseWriter, r *http.Request) *
} }
func (handler *Handler) executeServiceWebhook(w http.ResponseWriter, endpoint *portainer.Endpoint, resourceID string) *httperror.HandlerError { func (handler *Handler) executeServiceWebhook(w http.ResponseWriter, endpoint *portainer.Endpoint, resourceID string) *httperror.HandlerError {
dockerClient, err := handler.DockerClientFactory.CreateClient(endpoint) dockerClient, err := handler.DockerClientFactory.CreateClient(endpoint, "")
if err != nil { if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Error creating docker client", err} return &httperror.HandlerError{http.StatusInternalServerError, "Error creating docker client", err}
} }

View file

@ -111,12 +111,13 @@ func (handler *Handler) proxyWebsocketRequest(w http.ResponseWriter, r *http.Req
} }
} }
signature, err := handler.SignatureService.Sign(portainer.PortainerAgentSignatureMessage) signature, err := handler.SignatureService.CreateSignature(portainer.PortainerAgentSignatureMessage)
if err != nil { if err != nil {
return err return err
} }
proxy.Director = func(incoming *http.Request, out http.Header) { proxy.Director = func(incoming *http.Request, out http.Header) {
out.Set(portainer.PortainerAgentPublicKeyHeader, handler.SignatureService.EncodedPublicKey())
out.Set(portainer.PortainerAgentSignatureHeader, signature) out.Set(portainer.PortainerAgentSignatureHeader, signature)
out.Set(portainer.PortainerAgentTargetHeader, params.nodeName) out.Set(portainer.PortainerAgentTargetHeader, params.nodeName)
} }

View file

@ -43,7 +43,7 @@ func buildOperation(request *http.Request) error {
dockerfileContent = []byte(req.Content) dockerfileContent = []byte(req.Content)
} }
buffer, err := archive.TarFileInBuffer(dockerfileContent, "Dockerfile") buffer, err := archive.TarFileInBuffer(dockerfileContent, "Dockerfile", 0600)
if err != nil { if err != nil {
return err return err
} }

View file

@ -64,7 +64,7 @@ func (p *proxyTransport) proxyDockerRequest(request *http.Request) (*http.Respon
request.URL.Path = path request.URL.Path = path
if p.enableSignature { if p.enableSignature {
signature, err := p.SignatureService.Sign(portainer.PortainerAgentSignatureMessage) signature, err := p.SignatureService.CreateSignature(portainer.PortainerAgentSignatureMessage)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -25,7 +25,7 @@ type proxyFactory struct {
func (factory *proxyFactory) newHTTPProxy(u *url.URL) http.Handler { func (factory *proxyFactory) newHTTPProxy(u *url.URL) http.Handler {
u.Scheme = "http" u.Scheme = "http"
return newSingleHostReverseProxyWithHostHeader(u) return httputil.NewSingleHostReverseProxy(u)
} }
func newAzureProxy(credentials *portainer.AzureCredentials) (http.Handler, error) { func newAzureProxy(credentials *portainer.AzureCredentials) (http.Handler, error) {

View file

@ -3,18 +3,25 @@ package proxy
import ( import (
"net/http" "net/http"
"net/url" "net/url"
"strings" "strconv"
"github.com/orcaman/concurrent-map" "github.com/orcaman/concurrent-map"
"github.com/portainer/portainer" "github.com/portainer/portainer"
) )
// TODO: contain code related to legacy extension management
var extensionPorts = map[portainer.ExtensionID]string{
portainer.RegistryManagementExtension: "7001",
}
type ( type (
// Manager represents a service used to manage Docker proxies. // Manager represents a service used to manage Docker proxies.
Manager struct { Manager struct {
proxyFactory *proxyFactory proxyFactory *proxyFactory
proxies cmap.ConcurrentMap proxies cmap.ConcurrentMap
extensionProxies cmap.ConcurrentMap extensionProxies cmap.ConcurrentMap
legacyExtensionProxies cmap.ConcurrentMap
} }
// ManagerParams represents the required parameters to create a new Manager instance. // ManagerParams represents the required parameters to create a new Manager instance.
@ -33,6 +40,7 @@ func NewManager(parameters *ManagerParams) *Manager {
return &Manager{ return &Manager{
proxies: cmap.New(), proxies: cmap.New(),
extensionProxies: cmap.New(), extensionProxies: cmap.New(),
legacyExtensionProxies: cmap.New(),
proxyFactory: &proxyFactory{ proxyFactory: &proxyFactory{
ResourceControlService: parameters.ResourceControlService, ResourceControlService: parameters.ResourceControlService,
TeamMembershipService: parameters.TeamMembershipService, TeamMembershipService: parameters.TeamMembershipService,
@ -44,6 +52,83 @@ func NewManager(parameters *ManagerParams) *Manager {
} }
} }
// GetProxy returns the proxy associated to a key
func (manager *Manager) GetProxy(key string) http.Handler {
proxy, ok := manager.proxies.Get(key)
if !ok {
return nil
}
return proxy.(http.Handler)
}
// CreateAndRegisterProxy creates a new HTTP reverse proxy based on endpoint properties and and adds it to the registered proxies.
// It can also be used to create a new HTTP reverse proxy and replace an already registered proxy.
func (manager *Manager) CreateAndRegisterProxy(endpoint *portainer.Endpoint) (http.Handler, error) {
proxy, err := manager.createProxy(endpoint)
if err != nil {
return nil, err
}
manager.proxies.Set(string(endpoint.ID), proxy)
return proxy, nil
}
// DeleteProxy deletes the proxy associated to a key
func (manager *Manager) DeleteProxy(key string) {
manager.proxies.Remove(key)
}
// GetExtensionProxy returns an extension proxy associated to an extension identifier
func (manager *Manager) GetExtensionProxy(extensionID portainer.ExtensionID) http.Handler {
proxy, ok := manager.extensionProxies.Get(strconv.Itoa(int(extensionID)))
if !ok {
return nil
}
return proxy.(http.Handler)
}
// CreateExtensionProxy creates a new HTTP reverse proxy for an extension and
// registers it in the extension map associated to the specified extension identifier
func (manager *Manager) CreateExtensionProxy(extensionID portainer.ExtensionID) (http.Handler, error) {
address := "http://localhost:" + extensionPorts[extensionID]
extensionURL, err := url.Parse(address)
if err != nil {
return nil, err
}
proxy := manager.proxyFactory.newHTTPProxy(extensionURL)
manager.extensionProxies.Set(strconv.Itoa(int(extensionID)), proxy)
return proxy, nil
}
// DeleteExtensionProxy deletes the extension proxy associated to an extension identifier
func (manager *Manager) DeleteExtensionProxy(extensionID portainer.ExtensionID) {
manager.extensionProxies.Remove(strconv.Itoa(int(extensionID)))
}
// GetLegacyExtensionProxy returns a legacy extension proxy associated to a key
func (manager *Manager) GetLegacyExtensionProxy(key string) http.Handler {
proxy, ok := manager.legacyExtensionProxies.Get(key)
if !ok {
return nil
}
return proxy.(http.Handler)
}
// CreateLegacyExtensionProxy creates a new HTTP reverse proxy for a legacy extension and adds it to the registered proxies.
func (manager *Manager) CreateLegacyExtensionProxy(key, extensionAPIURL string) (http.Handler, error) {
extensionURL, err := url.Parse(extensionAPIURL)
if err != nil {
return nil, err
}
proxy := manager.proxyFactory.newHTTPProxy(extensionURL)
manager.extensionProxies.Set(key, proxy)
return proxy, nil
}
func (manager *Manager) createDockerProxy(endpointURL *url.URL, tlsConfig *portainer.TLSConfiguration) (http.Handler, error) { func (manager *Manager) createDockerProxy(endpointURL *url.URL, tlsConfig *portainer.TLSConfiguration) (http.Handler, error) {
if endpointURL.Scheme == "tcp" { if endpointURL.Scheme == "tcp" {
if tlsConfig.TLS || tlsConfig.TLSSkipVerify { if tlsConfig.TLS || tlsConfig.TLSSkipVerify {
@ -69,59 +154,3 @@ func (manager *Manager) createProxy(endpoint *portainer.Endpoint) (http.Handler,
return manager.createDockerProxy(endpointURL, &endpoint.TLSConfig) return manager.createDockerProxy(endpointURL, &endpoint.TLSConfig)
} }
} }
// CreateAndRegisterProxy creates a new HTTP reverse proxy based on endpoint properties and and adds it to the registered proxies.
// It can also be used to create a new HTTP reverse proxy and replace an already registered proxy.
func (manager *Manager) CreateAndRegisterProxy(endpoint *portainer.Endpoint) (http.Handler, error) {
proxy, err := manager.createProxy(endpoint)
if err != nil {
return nil, err
}
manager.proxies.Set(string(endpoint.ID), proxy)
return proxy, nil
}
// GetProxy returns the proxy associated to a key
func (manager *Manager) GetProxy(key string) http.Handler {
proxy, ok := manager.proxies.Get(key)
if !ok {
return nil
}
return proxy.(http.Handler)
}
// DeleteProxy deletes the proxy associated to a key
func (manager *Manager) DeleteProxy(key string) {
manager.proxies.Remove(key)
}
// CreateAndRegisterExtensionProxy creates a new HTTP reverse proxy for an extension and adds it to the registered proxies.
func (manager *Manager) CreateAndRegisterExtensionProxy(key, extensionAPIURL string) (http.Handler, error) {
extensionURL, err := url.Parse(extensionAPIURL)
if err != nil {
return nil, err
}
proxy := manager.proxyFactory.newHTTPProxy(extensionURL)
manager.extensionProxies.Set(key, proxy)
return proxy, nil
}
// GetExtensionProxy returns the extension proxy associated to a key
func (manager *Manager) GetExtensionProxy(key string) http.Handler {
proxy, ok := manager.extensionProxies.Get(key)
if !ok {
return nil
}
return proxy.(http.Handler)
}
// DeleteExtensionProxies deletes all the extension proxies associated to a key
func (manager *Manager) DeleteExtensionProxies(key string) {
for _, k := range manager.extensionProxies.Keys() {
if strings.Contains(k, key+"_") {
manager.extensionProxies.Remove(k)
}
}
}

View file

@ -114,7 +114,6 @@ func (bouncer *RequestBouncer) EndpointAccess(r *http.Request, endpoint *portain
// mwSecureHeaders provides secure headers middleware for handlers. // mwSecureHeaders provides secure headers middleware for handlers.
func mwSecureHeaders(next http.Handler) http.Handler { func mwSecureHeaders(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Frame-Options", "DENY")
w.Header().Add("X-XSS-Protection", "1; mode=block") w.Header().Add("X-XSS-Protection", "1; mode=block")
w.Header().Add("X-Content-Type-Options", "nosniff") w.Header().Add("X-Content-Type-Options", "nosniff")
next.ServeHTTP(w, r) next.ServeHTTP(w, r)

View file

@ -11,10 +11,12 @@ import (
"github.com/portainer/portainer/http/handler/endpointgroups" "github.com/portainer/portainer/http/handler/endpointgroups"
"github.com/portainer/portainer/http/handler/endpointproxy" "github.com/portainer/portainer/http/handler/endpointproxy"
"github.com/portainer/portainer/http/handler/endpoints" "github.com/portainer/portainer/http/handler/endpoints"
"github.com/portainer/portainer/http/handler/extensions"
"github.com/portainer/portainer/http/handler/file" "github.com/portainer/portainer/http/handler/file"
"github.com/portainer/portainer/http/handler/motd" "github.com/portainer/portainer/http/handler/motd"
"github.com/portainer/portainer/http/handler/registries" "github.com/portainer/portainer/http/handler/registries"
"github.com/portainer/portainer/http/handler/resourcecontrols" "github.com/portainer/portainer/http/handler/resourcecontrols"
"github.com/portainer/portainer/http/handler/schedules"
"github.com/portainer/portainer/http/handler/settings" "github.com/portainer/portainer/http/handler/settings"
"github.com/portainer/portainer/http/handler/stacks" "github.com/portainer/portainer/http/handler/stacks"
"github.com/portainer/portainer/http/handler/status" "github.com/portainer/portainer/http/handler/status"
@ -40,6 +42,7 @@ type Server struct {
AuthDisabled bool AuthDisabled bool
EndpointManagement bool EndpointManagement bool
Status *portainer.Status Status *portainer.Status
ExtensionManager portainer.ExtensionManager
ComposeStackManager portainer.ComposeStackManager ComposeStackManager portainer.ComposeStackManager
CryptoService portainer.CryptoService CryptoService portainer.CryptoService
SignatureService portainer.DigitalSignatureService SignatureService portainer.DigitalSignatureService
@ -52,8 +55,10 @@ type Server struct {
GitService portainer.GitService GitService portainer.GitService
JWTService portainer.JWTService JWTService portainer.JWTService
LDAPService portainer.LDAPService LDAPService portainer.LDAPService
ExtensionService portainer.ExtensionService
RegistryService portainer.RegistryService RegistryService portainer.RegistryService
ResourceControlService portainer.ResourceControlService ResourceControlService portainer.ResourceControlService
ScheduleService portainer.ScheduleService
SettingsService portainer.SettingsService SettingsService portainer.SettingsService
StackService portainer.StackService StackService portainer.StackService
SwarmStackManager portainer.SwarmStackManager SwarmStackManager portainer.SwarmStackManager
@ -68,6 +73,7 @@ type Server struct {
SSLCert string SSLCert string
SSLKey string SSLKey string
DockerClientFactory *docker.ClientFactory DockerClientFactory *docker.ClientFactory
JobService portainer.JobService
} }
// Start starts the HTTP server // Start starts the HTTP server
@ -80,6 +86,7 @@ func (server *Server) Start() error {
AuthDisabled: server.AuthDisabled, AuthDisabled: server.AuthDisabled,
} }
requestBouncer := security.NewRequestBouncer(requestBouncerParameters) requestBouncer := security.NewRequestBouncer(requestBouncerParameters)
proxyManagerParameters := &proxy.ManagerParams{ proxyManagerParameters := &proxy.ManagerParams{
ResourceControlService: server.ResourceControlService, ResourceControlService: server.ResourceControlService,
TeamMembershipService: server.TeamMembershipService, TeamMembershipService: server.TeamMembershipService,
@ -89,6 +96,7 @@ func (server *Server) Start() error {
SignatureService: server.SignatureService, SignatureService: server.SignatureService,
} }
proxyManager := proxy.NewManager(proxyManagerParameters) proxyManager := proxy.NewManager(proxyManagerParameters)
rateLimiter := security.NewRateLimiter(10, 1*time.Second, 1*time.Hour) rateLimiter := security.NewRateLimiter(10, 1*time.Second, 1*time.Hour)
var authHandler = auth.NewHandler(requestBouncer, rateLimiter, server.AuthDisabled) var authHandler = auth.NewHandler(requestBouncer, rateLimiter, server.AuthDisabled)
@ -109,6 +117,7 @@ func (server *Server) Start() error {
endpointHandler.FileService = server.FileService endpointHandler.FileService = server.FileService
endpointHandler.ProxyManager = proxyManager endpointHandler.ProxyManager = proxyManager
endpointHandler.Snapshotter = server.Snapshotter endpointHandler.Snapshotter = server.Snapshotter
endpointHandler.JobService = server.JobService
var endpointGroupHandler = endpointgroups.NewHandler(requestBouncer) var endpointGroupHandler = endpointgroups.NewHandler(requestBouncer)
endpointGroupHandler.EndpointGroupService = server.EndpointGroupService endpointGroupHandler.EndpointGroupService = server.EndpointGroupService
@ -122,17 +131,33 @@ func (server *Server) Start() error {
var motdHandler = motd.NewHandler(requestBouncer) var motdHandler = motd.NewHandler(requestBouncer)
var extensionHandler = extensions.NewHandler(requestBouncer)
extensionHandler.ExtensionService = server.ExtensionService
extensionHandler.ExtensionManager = server.ExtensionManager
var registryHandler = registries.NewHandler(requestBouncer) var registryHandler = registries.NewHandler(requestBouncer)
registryHandler.RegistryService = server.RegistryService registryHandler.RegistryService = server.RegistryService
registryHandler.ExtensionService = server.ExtensionService
registryHandler.FileService = server.FileService
registryHandler.ProxyManager = proxyManager
var resourceControlHandler = resourcecontrols.NewHandler(requestBouncer) var resourceControlHandler = resourcecontrols.NewHandler(requestBouncer)
resourceControlHandler.ResourceControlService = server.ResourceControlService resourceControlHandler.ResourceControlService = server.ResourceControlService
var schedulesHandler = schedules.NewHandler(requestBouncer)
schedulesHandler.ScheduleService = server.ScheduleService
schedulesHandler.EndpointService = server.EndpointService
schedulesHandler.FileService = server.FileService
schedulesHandler.JobService = server.JobService
schedulesHandler.JobScheduler = server.JobScheduler
schedulesHandler.SettingsService = server.SettingsService
var settingsHandler = settings.NewHandler(requestBouncer) var settingsHandler = settings.NewHandler(requestBouncer)
settingsHandler.SettingsService = server.SettingsService settingsHandler.SettingsService = server.SettingsService
settingsHandler.LDAPService = server.LDAPService settingsHandler.LDAPService = server.LDAPService
settingsHandler.FileService = server.FileService settingsHandler.FileService = server.FileService
settingsHandler.JobScheduler = server.JobScheduler settingsHandler.JobScheduler = server.JobScheduler
settingsHandler.ScheduleService = server.ScheduleService
var stackHandler = stacks.NewHandler(requestBouncer) var stackHandler = stacks.NewHandler(requestBouncer)
stackHandler.FileService = server.FileService stackHandler.FileService = server.FileService
@ -188,6 +213,7 @@ func (server *Server) Start() error {
EndpointProxyHandler: endpointProxyHandler, EndpointProxyHandler: endpointProxyHandler,
FileHandler: fileHandler, FileHandler: fileHandler,
MOTDHandler: motdHandler, MOTDHandler: motdHandler,
ExtensionHandler: extensionHandler,
RegistryHandler: registryHandler, RegistryHandler: registryHandler,
ResourceControlHandler: resourceControlHandler, ResourceControlHandler: resourceControlHandler,
SettingsHandler: settingsHandler, SettingsHandler: settingsHandler,
@ -201,6 +227,7 @@ func (server *Server) Start() error {
UserHandler: userHandler, UserHandler: userHandler,
WebSocketHandler: websocketHandler, WebSocketHandler: websocketHandler,
WebhookHandler: webhookHandler, WebhookHandler: webhookHandler,
SchedulesHanlder: schedulesHandler,
} }
if server.SSL { if server.SSL {

View file

@ -22,11 +22,13 @@ type Service struct{}
func searchUser(username string, conn *ldap.Conn, settings []portainer.LDAPSearchSettings) (string, error) { func searchUser(username string, conn *ldap.Conn, settings []portainer.LDAPSearchSettings) (string, error) {
var userDN string var userDN string
found := false found := false
usernameEscaped := ldap.EscapeFilter(username)
for _, searchSettings := range settings { for _, searchSettings := range settings {
searchRequest := ldap.NewSearchRequest( searchRequest := ldap.NewSearchRequest(
searchSettings.BaseDN, searchSettings.BaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
fmt.Sprintf("(&%s(%s=%s))", searchSettings.Filter, searchSettings.UserNameAttribute, username), fmt.Sprintf("(&%s(%s=%s))", searchSettings.Filter, searchSettings.UserNameAttribute, usernameEscaped),
[]string{"dn"}, []string{"dn"},
nil, nil,
) )
@ -134,12 +136,13 @@ func (*Service) GetUserGroups(username string, settings *portainer.LDAPSettings)
// Get a list of group names for specified user from LDAP/AD // Get a list of group names for specified user from LDAP/AD
func getGroups(userDN string, conn *ldap.Conn, settings []portainer.LDAPGroupSearchSettings) []string { func getGroups(userDN string, conn *ldap.Conn, settings []portainer.LDAPGroupSearchSettings) []string {
groups := make([]string, 0) groups := make([]string, 0)
userDNEscaped := ldap.EscapeFilter(userDN)
for _, searchSettings := range settings { for _, searchSettings := range settings {
searchRequest := ldap.NewSearchRequest( searchRequest := ldap.NewSearchRequest(
searchSettings.GroupBaseDN, searchSettings.GroupBaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
fmt.Sprintf("(&%s(%s=%s))", searchSettings.GroupFilter, searchSettings.GroupAttribute, userDN), fmt.Sprintf("(&%s(%s=%s))", searchSettings.GroupFilter, searchSettings.GroupAttribute, userDNEscaped),
[]string{"cn"}, []string{"cn"},
nil, nil,
) )

View file

@ -47,7 +47,7 @@ type (
// LDAPSettings represents the settings used to connect to a LDAP server // LDAPSettings represents the settings used to connect to a LDAP server
LDAPSettings struct { LDAPSettings struct {
ReaderDN string `json:"ReaderDN"` ReaderDN string `json:"ReaderDN"`
Password string `json:"Password"` Password string `json:"Password,omitempty"`
URL string `json:"URL"` URL string `json:"URL"`
TLSConfig TLSConfiguration `json:"TLSConfig"` TLSConfig TLSConfiguration `json:"TLSConfig"`
StartTLS bool `json:"StartTLS"` StartTLS bool `json:"StartTLS"`
@ -89,6 +89,7 @@ type (
AllowPrivilegedModeForRegularUsers bool `json:"AllowPrivilegedModeForRegularUsers"` AllowPrivilegedModeForRegularUsers bool `json:"AllowPrivilegedModeForRegularUsers"`
SnapshotInterval string `json:"SnapshotInterval"` SnapshotInterval string `json:"SnapshotInterval"`
TemplatesURL string `json:"TemplatesURL"` TemplatesURL string `json:"TemplatesURL"`
EnableHostManagementFeatures bool `json:"EnableHostManagementFeatures"`
// Deprecated fields // Deprecated fields
DisplayDonationHeader bool DisplayDonationHeader bool
@ -164,10 +165,14 @@ type (
// RegistryID represents a registry identifier // RegistryID represents a registry identifier
RegistryID int RegistryID int
// RegistryType represents a type of registry
RegistryType int
// Registry represents a Docker registry with all the info required // Registry represents a Docker registry with all the info required
// to connect to it // to connect to it
Registry struct { Registry struct {
ID RegistryID `json:"Id"` ID RegistryID `json:"Id"`
Type RegistryType `json:"Type"`
Name string `json:"Name"` Name string `json:"Name"`
URL string `json:"URL"` URL string `json:"URL"`
Authentication bool `json:"Authentication"` Authentication bool `json:"Authentication"`
@ -175,6 +180,17 @@ type (
Password string `json:"Password,omitempty"` Password string `json:"Password,omitempty"`
AuthorizedUsers []UserID `json:"AuthorizedUsers"` AuthorizedUsers []UserID `json:"AuthorizedUsers"`
AuthorizedTeams []TeamID `json:"AuthorizedTeams"` AuthorizedTeams []TeamID `json:"AuthorizedTeams"`
ManagementConfiguration *RegistryManagementConfiguration `json:"ManagementConfiguration"`
}
// RegistryManagementConfiguration represents a configuration that can be used to query
// the registry API via the registry management extension.
RegistryManagementConfiguration struct {
Type RegistryType `json:"Type"`
Authentication bool `json:"Authentication"`
Username string `json:"Username"`
Password string `json:"Password"`
TLSConfig TLSConfiguration `json:"TLSConfig"`
} }
// DockerHub represents all the required information to connect and use the // DockerHub represents all the required information to connect and use the
@ -220,7 +236,44 @@ type (
TLSKeyPath string `json:"TLSKey,omitempty"` TLSKeyPath string `json:"TLSKey,omitempty"`
} }
// WebhookID represents an webhook identifier. // ScheduleID represents a schedule identifier.
ScheduleID int
// JobType represents a job type
JobType int
// ScriptExecutionJob represents a scheduled job that can execute a script via a privileged container
ScriptExecutionJob struct {
Endpoints []EndpointID
Image string
ScriptPath string
RetryCount int
RetryInterval int
}
// SnapshotJob represents a scheduled job that can create endpoint snapshots
SnapshotJob struct{}
// EndpointSyncJob represents a scheduled job that synchronize endpoints based on an external file
EndpointSyncJob struct{}
// Schedule represents a scheduled job.
// It only contains a pointer to one of the JobRunner implementations
// based on the JobType.
// NOTE: The Recurring option is only used by ScriptExecutionJob at the moment
Schedule struct {
ID ScheduleID `json:"Id"`
Name string
CronExpression string
Recurring bool
Created int64
JobType JobType
ScriptExecutionJob *ScriptExecutionJob
SnapshotJob *SnapshotJob
EndpointSyncJob *EndpointSyncJob
}
// WebhookID represents a webhook identifier.
WebhookID int WebhookID int
// WebhookType represents the type of resource a webhook is related to // WebhookType represents the type of resource a webhook is related to
@ -256,6 +309,17 @@ type (
ImageCount int `json:"ImageCount"` ImageCount int `json:"ImageCount"`
ServiceCount int `json:"ServiceCount"` ServiceCount int `json:"ServiceCount"`
StackCount int `json:"StackCount"` StackCount int `json:"StackCount"`
SnapshotRaw SnapshotRaw `json:"SnapshotRaw"`
}
// SnapshotRaw represents all the information related to a snapshot as returned by the Docker API
SnapshotRaw struct {
Containers interface{} `json:"Containers"`
Volumes interface{} `json:"Volumes"`
Networks interface{} `json:"Networks"`
Images interface{} `json:"Images"`
Info interface{} `json:"Info"`
Version interface{} `json:"Version"`
} }
// EndpointGroupID represents an endpoint group identifier // EndpointGroupID represents an endpoint group identifier
@ -274,7 +338,8 @@ type (
Labels []Pair `json:"Labels"` Labels []Pair `json:"Labels"`
} }
// EndpointExtension represents a extension associated to an endpoint // EndpointExtension represents a deprecated form of Portainer extension
// TODO: legacy extension management
EndpointExtension struct { EndpointExtension struct {
Type EndpointExtensionType `json:"Type"` Type EndpointExtensionType `json:"Type"`
URL string `json:"URL"` URL string `json:"URL"`
@ -410,6 +475,36 @@ type (
// It can be either a TLS CA file, a TLS certificate file or a TLS key file // It can be either a TLS CA file, a TLS certificate file or a TLS key file
TLSFileType int TLSFileType int
// ExtensionID represents a extension identifier
ExtensionID int
// Extension represents a Portainer extension
Extension struct {
ID ExtensionID `json:"Id"`
Enabled bool `json:"Enabled"`
Name string `json:"Name,omitempty"`
ShortDescription string `json:"ShortDescription,omitempty"`
Description string `json:"Description,omitempty"`
DescriptionURL string `json:"DescriptionURL,omitempty"`
Price string `json:"Price,omitempty"`
PriceDescription string `json:"PriceDescription,omitempty"`
Deal bool `json:"Deal,omitempty"`
Available bool `json:"Available,omitempty"`
License LicenseInformation `json:"License,omitempty"`
Version string `json:"Version"`
UpdateAvailable bool `json:"UpdateAvailable"`
ShopURL string `json:"ShopURL,omitempty"`
Images []string `json:"Images,omitempty"`
Logo string `json:"Logo,omitempty"`
}
// LicenseInformation represents information about an extension license
LicenseInformation struct {
LicenseKey string `json:"LicenseKey,omitempty"`
Company string `json:"Company,omitempty"`
Expiration string `json:"Expiration,omitempty"`
}
// CLIService represents a service for managing CLI // CLIService represents a service for managing CLI
CLIService interface { CLIService interface {
ParseFlags(version string) (*CLIFlags, error) ParseFlags(version string) (*CLIFlags, error)
@ -541,6 +636,17 @@ type (
DeleteResourceControl(ID ResourceControlID) error DeleteResourceControl(ID ResourceControlID) error
} }
// ScheduleService represents a service for managing schedule data
ScheduleService interface {
Schedule(ID ScheduleID) (*Schedule, error)
Schedules() ([]Schedule, error)
SchedulesByJobType(jobType JobType) ([]Schedule, error)
CreateSchedule(schedule *Schedule) error
UpdateSchedule(ID ScheduleID, schedule *Schedule) error
DeleteSchedule(ID ScheduleID) error
GetNextIdentifier() int
}
// TagService represents a service for managing tag data // TagService represents a service for managing tag data
TagService interface { TagService interface {
Tags() ([]Tag, error) Tags() ([]Tag, error)
@ -557,6 +663,14 @@ type (
DeleteTemplate(ID TemplateID) error DeleteTemplate(ID TemplateID) error
} }
// ExtensionService represents a service for managing extension data
ExtensionService interface {
Extension(ID ExtensionID) (*Extension, error)
Extensions() ([]Extension, error)
Persist(extension *Extension) error
DeleteExtension(ID ExtensionID) error
}
// CryptoService represents a service for encrypting/hashing data // CryptoService represents a service for encrypting/hashing data
CryptoService interface { CryptoService interface {
Hash(data string) (string, error) Hash(data string) (string, error)
@ -569,7 +683,7 @@ type (
GenerateKeyPair() ([]byte, []byte, error) GenerateKeyPair() ([]byte, []byte, error)
EncodedPublicKey() string EncodedPublicKey() string
PEMHeaders() (string, string) PEMHeaders() (string, string)
Sign(message string) (string, error) CreateSignature(message string) (string, error)
} }
// JWTService represents a service for managing JWT tokens // JWTService represents a service for managing JWT tokens
@ -589,11 +703,16 @@ type (
DeleteTLSFiles(folder string) error DeleteTLSFiles(folder string) error
GetStackProjectPath(stackIdentifier string) string GetStackProjectPath(stackIdentifier string) string
StoreStackFileFromBytes(stackIdentifier, fileName string, data []byte) (string, error) StoreStackFileFromBytes(stackIdentifier, fileName string, data []byte) (string, error)
StoreRegistryManagementFileFromBytes(folder, fileName string, data []byte) (string, error)
KeyPairFilesExist() (bool, error) KeyPairFilesExist() (bool, error)
StoreKeyPair(private, public []byte, privatePEMHeader, publicPEMHeader string) error StoreKeyPair(private, public []byte, privatePEMHeader, publicPEMHeader string) error
LoadKeyPair() ([]byte, []byte, error) LoadKeyPair() ([]byte, []byte, error)
WriteJSONToFile(path string, content interface{}) error WriteJSONToFile(path string, content interface{}) error
FileExists(path string) (bool, error) FileExists(path string) (bool, error)
StoreScheduledJobFileFromBytes(identifier string, data []byte) (string, error)
GetScheduleFolder(identifier string) string
ExtractExtensionArchive(data []byte) error
GetBinaryFolder() string
} }
// GitService represents a service for managing Git // GitService represents a service for managing Git
@ -604,12 +723,19 @@ type (
// JobScheduler represents a service to run jobs on a periodic basis // JobScheduler represents a service to run jobs on a periodic basis
JobScheduler interface { JobScheduler interface {
ScheduleEndpointSyncJob(endpointFilePath, interval string) error ScheduleJob(runner JobRunner) error
ScheduleSnapshotJob(interval string) error UpdateJobSchedule(runner JobRunner) error
UpdateSnapshotJob(interval string) UpdateSystemJobSchedule(jobType JobType, newCronExpression string) error
UnscheduleJob(ID ScheduleID)
Start() Start()
} }
// JobRunner represents a service that can be used to run a job
JobRunner interface {
Run()
GetSchedule() *Schedule
}
// Snapshotter represents a service used to create endpoint snapshots // Snapshotter represents a service used to create endpoint snapshots
Snapshotter interface { Snapshotter interface {
CreateSnapshot(endpoint *Endpoint) (*Snapshot, error) CreateSnapshot(endpoint *Endpoint) (*Snapshot, error)
@ -635,15 +761,32 @@ type (
Up(stack *Stack, endpoint *Endpoint) error Up(stack *Stack, endpoint *Endpoint) error
Down(stack *Stack, endpoint *Endpoint) error Down(stack *Stack, endpoint *Endpoint) error
} }
// JobService represents a service to manage job execution on hosts
JobService interface {
ExecuteScript(endpoint *Endpoint, nodeName, image string, script []byte, schedule *Schedule) error
}
// ExtensionManager represents a service used to manage extensions
ExtensionManager interface {
FetchExtensionDefinitions() ([]Extension, error)
EnableExtension(extension *Extension, licenseKey string) error
DisableExtension(extension *Extension) error
UpdateExtension(extension *Extension, version string) error
}
) )
const ( const (
// APIVersion is the version number of the Portainer API // APIVersion is the version number of the Portainer API
APIVersion = "1.19.2" APIVersion = "1.20.0"
// DBVersion is the version number of the Portainer database // DBVersion is the version number of the Portainer database
DBVersion = 14 DBVersion = 15
// AssetsServerURL represents the URL of the Portainer asset server
AssetsServerURL = "https://portainer-io-assets.sfo2.digitaloceanspaces.com"
// MessageOfTheDayURL represents the URL where Portainer MOTD message can be retrieved // MessageOfTheDayURL represents the URL where Portainer MOTD message can be retrieved
MessageOfTheDayURL = "https://raw.githubusercontent.com/portainer/motd/master/message.html" MessageOfTheDayURL = AssetsServerURL + "/motd.html"
// ExtensionDefinitionsURL represents the URL where Portainer extension definitions can be retrieved
ExtensionDefinitionsURL = AssetsServerURL + "/extensions.json"
// PortainerAgentHeader represents the name of the header available in any agent response // PortainerAgentHeader represents the name of the header available in any agent response
PortainerAgentHeader = "Portainer-Agent" PortainerAgentHeader = "Portainer-Agent"
// PortainerAgentTargetHeader represent the name of the header containing the target node name // PortainerAgentTargetHeader represent the name of the header containing the target node name
@ -763,3 +906,31 @@ const (
// ServiceWebhook is a webhook for restarting a docker service // ServiceWebhook is a webhook for restarting a docker service
ServiceWebhook ServiceWebhook
) )
const (
_ ExtensionID = iota
// RegistryManagementExtension represents the registry management extension
RegistryManagementExtension
)
const (
_ JobType = iota
// ScriptExecutionJobType is a non-system job used to execute a script against a list of
// endpoints via privileged containers
ScriptExecutionJobType
// SnapshotJobType is a system job used to create endpoint snapshots
SnapshotJobType
// EndpointSyncJobType is a system job used to synchronize endpoints from
// an external definition store
EndpointSyncJobType
)
const (
_ RegistryType = iota
// QuayRegistry represents a Quay.io registry
QuayRegistry
// AzureRegistry represents an ACR registry
AzureRegistry
// CustomRegistry represents a custom registry
CustomRegistry
)

View file

@ -54,7 +54,7 @@ info:
**NOTE**: You can find more information on how to query the Docker API in the [Docker official documentation](https://docs.docker.com/engine/api/v1.30/) as well as in [this Portainer example](https://gist.github.com/deviantony/77026d402366b4b43fa5918d41bc42f8). **NOTE**: You can find more information on how to query the Docker API in the [Docker official documentation](https://docs.docker.com/engine/api/v1.30/) as well as in [this Portainer example](https://gist.github.com/deviantony/77026d402366b4b43fa5918d41bc42f8).
version: "1.19.2" version: "1.20.0"
title: "Portainer API" title: "Portainer API"
contact: contact:
email: "info@portainer.io" email: "info@portainer.io"
@ -153,6 +153,8 @@ paths:
operationId: "DockerHubInspect" operationId: "DockerHubInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -175,6 +177,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -211,6 +215,8 @@ paths:
operationId: "EndpointList" operationId: "EndpointList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -233,6 +239,8 @@ paths:
- "multipart/form-data" - "multipart/form-data"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "Name" - name: "Name"
in: "formData" in: "formData"
@ -265,7 +273,11 @@ paths:
- name: "TLSSkipVerify" - name: "TLSSkipVerify"
in: "formData" in: "formData"
type: "string" type: "string"
description: "Skip server verification when using TLS" (example: false) description: "Skip server verification when using TLS (example: false)"
- name: "TLSSkipClientVerify"
in: "formData"
type: "string"
description: "Skip client verification when using TLS (example: false)"
- name: "TLSCACertFile" - name: "TLSCACertFile"
in: "formData" in: "formData"
type: "file" type: "file"
@ -324,6 +336,8 @@ paths:
operationId: "EndpointInspect" operationId: "EndpointInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -365,6 +379,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -413,6 +429,8 @@ paths:
Remove an endpoint. Remove an endpoint.
**Access policy**: administrator **Access policy**: administrator
operationId: "EndpointDelete" operationId: "EndpointDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -460,6 +478,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -495,6 +515,78 @@ paths:
description: "Server error" description: "Server error"
schema: schema:
$ref: "#/definitions/GenericError" $ref: "#/definitions/GenericError"
/endpoints/{id}/job:
post:
tags:
- "endpoints"
summary: "Execute a job on the endpoint host"
description: |
Execute a job (script) on the underlying host of the endpoint.
**Access policy**: administrator
operationId: "EndpointJob"
consumes:
- "application/json"
produces:
- "application/json"
security:
- jwt: []
parameters:
- name: "id"
in: "path"
description: "Endpoint identifier"
required: true
type: "integer"
- name: "method"
in: "query"
description: "Job execution method. Possible values: file or string."
required: true
type: "string"
- name: "nodeName"
in: "query"
description: "Optional. Hostname of a node when targeting a Portainer agent cluster."
required: true
type: "string"
- in: "body"
name: "body"
description: "Job details. Required when method equals string."
required: true
schema:
$ref: "#/definitions/EndpointJobRequest"
- name: "Image"
in: "formData"
type: "string"
description: "Container image which will be used to execute the job. Required when method equals file."
- name: "file"
in: "formData"
type: "file"
description: "Job script file. Required when method equals file."
responses:
200:
description: "Success"
schema:
$ref: "#/definitions/Endpoint"
400:
description: "Invalid request"
schema:
$ref: "#/definitions/GenericError"
examples:
application/json:
err: "Invalid request data format"
403:
description: "Unauthorized"
schema:
$ref: "#/definitions/GenericError"
404:
description: "Endpoint not found"
schema:
$ref: "#/definitions/GenericError"
examples:
application/json:
err: "Endpoint not found"
500:
description: "Server error"
schema:
$ref: "#/definitions/GenericError"
/endpoint_groups: /endpoint_groups:
get: get:
tags: tags:
@ -508,6 +600,8 @@ paths:
operationId: "EndpointGroupList" operationId: "EndpointGroupList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -530,6 +624,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -564,6 +660,8 @@ paths:
operationId: "EndpointGroupInspect" operationId: "EndpointGroupInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -605,6 +703,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -655,6 +755,8 @@ paths:
Remove an endpoint group. Remove an endpoint group.
**Access policy**: administrator **Access policy**: administrator
operationId: "EndpointGroupDelete" operationId: "EndpointGroupDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -702,6 +804,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -750,6 +854,8 @@ paths:
operationId: "RegistryList" operationId: "RegistryList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -772,6 +878,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -813,6 +921,8 @@ paths:
operationId: "RegistryInspect" operationId: "RegistryInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -854,6 +964,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -904,6 +1016,8 @@ paths:
Remove a registry. Remove a registry.
**Access policy**: administrator **Access policy**: administrator
operationId: "RegistryDelete" operationId: "RegistryDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -944,6 +1058,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -992,6 +1108,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -1042,6 +1160,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1092,6 +1212,8 @@ paths:
Remove a resource control. Remove a resource control.
**Access policy**: restricted **Access policy**: restricted
operationId: "ResourceControlDelete" operationId: "ResourceControlDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1137,6 +1259,8 @@ paths:
operationId: "SettingsInspect" operationId: "SettingsInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -1159,6 +1283,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -1193,6 +1319,8 @@ paths:
operationId: "PublicSettingsInspect" operationId: "PublicSettingsInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -1216,6 +1344,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -1248,6 +1378,8 @@ paths:
operationId: "StatusInspect" operationId: "StatusInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -1271,6 +1403,8 @@ paths:
operationId: "StackList" operationId: "StackList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "filters" - name: "filters"
in: "query" in: "query"
@ -1303,6 +1437,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "type" - name: "type"
in: "query" in: "query"
@ -1382,6 +1518,8 @@ paths:
operationId: "StackInspect" operationId: "StackInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1427,6 +1565,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1479,6 +1619,8 @@ paths:
Remove a stack. Remove a stack.
**Access policy**: restricted **Access policy**: restricted
operationId: "StackDelete" operationId: "StackDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1529,6 +1671,8 @@ paths:
operationId: "StackFileInspect" operationId: "StackFileInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1574,6 +1718,8 @@ paths:
operationId: "StackMigrate" operationId: "StackMigrate"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1628,6 +1774,8 @@ paths:
operationId: "UserList" operationId: "UserList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -1651,6 +1799,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -1699,6 +1849,8 @@ paths:
operationId: "UserInspect" operationId: "UserInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1740,6 +1892,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1790,6 +1944,8 @@ paths:
Remove a user. Remove a user.
**Access policy**: administrator **Access policy**: administrator
operationId: "UserDelete" operationId: "UserDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1828,6 +1984,8 @@ paths:
operationId: "UserMembershipsInspect" operationId: "UserMembershipsInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1871,6 +2029,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -1918,6 +2078,8 @@ paths:
operationId: "UserAdminCheck" operationId: "UserAdminCheck"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
204: 204:
@ -1947,6 +2109,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -1991,6 +2155,8 @@ paths:
- multipart/form-data - multipart/form-data
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "path" - in: "path"
name: "certificate" name: "certificate"
@ -2032,6 +2198,8 @@ paths:
operationId: "TagList" operationId: "TagList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -2054,6 +2222,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -2093,6 +2263,8 @@ paths:
Remove a tag. Remove a tag.
**Access policy**: administrator **Access policy**: administrator
operationId: "TagDelete" operationId: "TagDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2125,6 +2297,8 @@ paths:
operationId: "TeamList" operationId: "TeamList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -2147,6 +2321,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -2195,6 +2371,8 @@ paths:
operationId: "TeamInspect" operationId: "TeamInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2243,6 +2421,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2284,6 +2464,8 @@ paths:
Remove a team. Remove a team.
**Access policy**: administrator **Access policy**: administrator
operationId: "TeamDelete" operationId: "TeamDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2323,6 +2505,8 @@ paths:
operationId: "TeamMembershipsInspect" operationId: "TeamMembershipsInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2364,6 +2548,8 @@ paths:
operationId: "TeamMembershipList" operationId: "TeamMembershipList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: [] parameters: []
responses: responses:
200: 200:
@ -2393,6 +2579,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -2443,6 +2631,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2493,6 +2683,8 @@ paths:
Remove a team membership. Access is only available to administrators leaders of the associated team. Remove a team membership. Access is only available to administrators leaders of the associated team.
**Access policy**: restricted **Access policy**: restricted
operationId: "TeamMembershipDelete" operationId: "TeamMembershipDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2539,6 +2731,8 @@ paths:
operationId: "TemplateList" operationId: "TemplateList"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
responses: responses:
200: 200:
@ -2561,6 +2755,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- in: "body" - in: "body"
name: "body" name: "body"
@ -2602,6 +2798,8 @@ paths:
operationId: "TemplateInspect" operationId: "TemplateInspect"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2650,6 +2848,8 @@ paths:
- "application/json" - "application/json"
produces: produces:
- "application/json" - "application/json"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2698,6 +2898,8 @@ paths:
Remove a template. Remove a template.
**Access policy**: administrator **Access policy**: administrator
operationId: "TemplateDelete" operationId: "TemplateDelete"
security:
- jwt: []
parameters: parameters:
- name: "id" - name: "id"
in: "path" in: "path"
@ -2816,7 +3018,7 @@ definitions:
description: "Is analytics enabled" description: "Is analytics enabled"
Version: Version:
type: "string" type: "string"
example: "1.19.2" example: "1.20.0"
description: "Portainer API version" description: "Portainer API version"
PublicSettingsInspectResponse: PublicSettingsInspectResponse:
type: "object" type: "object"
@ -4160,6 +4362,24 @@ definitions:
type: "string" type: "string"
example: "jpofkc0i9uo9wtx1zesuk649w" example: "jpofkc0i9uo9wtx1zesuk649w"
description: "Swarm cluster identifier, must match the identifier of the cluster where the stack will be relocated" description: "Swarm cluster identifier, must match the identifier of the cluster where the stack will be relocated"
Name:
type: "string"
example: "new-stack"
description: "If provided will rename the migrated stack"
EndpointJobRequest:
type: "object"
required:
- "Image"
- "FileContent"
properties:
Image:
type: "string"
example: "ubuntu:latest"
description: "Container image which will be used to execute the job"
FileContent:
type: "string"
example: "ls -lah /host/tmp"
description: "Content of the job script"
StackCreateRequest: StackCreateRequest:
type: "object" type: "object"
required: required:
@ -4269,7 +4489,7 @@ definitions:
Prune: Prune:
type: "boolean" type: "boolean"
example: false example: false
description: "Prune services that are no longer referenced" description: "Prune services that are no longer referenced (only available for Swarm stacks)"
StackFileInspectResponse: StackFileInspectResponse:
type: "object" type: "object"
properties: properties:

5
api/swagger_config.json Normal file
View file

@ -0,0 +1,5 @@
{
"packageName": "portainer",
"packageVersion": "1.20.0",
"projectName": "portainer"
}

View file

@ -22,5 +22,8 @@ angular.module('portainer', [
'portainer.agent', 'portainer.agent',
'portainer.azure', 'portainer.azure',
'portainer.docker', 'portainer.docker',
'portainer.extensions',
'extension.storidge', 'extension.storidge',
'rzModule']); 'rzModule',
'moment-picker'
]);

View file

@ -0,0 +1,23 @@
angular.module('portainer.agent').controller('FileUploaderController', [
'$q',
function FileUploaderController($q) {
var ctrl = this;
ctrl.state = {
uploadInProgress: false
};
ctrl.onFileSelected = onFileSelected;
function onFileSelected(file) {
if (!file) {
return;
}
ctrl.state.uploadInProgress = true;
$q.when(ctrl.uploadFile(file)).finally(function toggleProgress() {
ctrl.state.uploadInProgress = false;
});
}
}
]);

View file

@ -0,0 +1,6 @@
<button
ngf-select="$ctrl.onFileSelected($file)"
class="btn ng-scope"
button-spinner="$ctrl.state.uploadInProgress">
<i style="margin:0" class="fa fa-upload" ng-if="!$ctrl.state.uploadInProgress"></i>
</button>

View file

@ -0,0 +1,7 @@
angular.module('portainer.agent').component('fileUploader', {
templateUrl: 'app/agent/components/file-uploader/file-uploader.html',
controller: 'FileUploaderController',
bindings: {
uploadFile: '<onFileSelected'
}
});

View file

@ -1,14 +1,14 @@
<div class="datatable"> <div class="datatable">
<rd-widget> <rd-widget>
<rd-widget-header icon="{{$ctrl.titleIcon}}" title-text="{{ $ctrl.titleText }}">
<file-uploader ng-if="$ctrl.isUploadAllowed" on-file-selected="$ctrl.onFileSelectedForUpload">
</file-uploader>
</rd-widget-header>
<rd-widget-body classes="no-padding"> <rd-widget-body classes="no-padding">
<div class="toolBar">
<div class="toolBarTitle">
<i class="fa" ng-class="$ctrl.titleIcon" aria-hidden="true" style="margin-right: 2px;"></i> {{ $ctrl.titleText }}
</div>
</div>
<div class="searchBar"> <div class="searchBar">
<i class="fa fa-search searchIcon" aria-hidden="true"></i> <i class="fa fa-search searchIcon" aria-hidden="true"></i>
<input type="text" class="searchInput" ng-model="$ctrl.state.textFilter" placeholder="Search..." auto-focus> <input type="text" class="searchInput" ng-model="$ctrl.state.textFilter"
ng-change="$ctrl.onTextFilterChange()" placeholder="Search..." auto-focus>
</div> </div>
<div class="table-responsive"> <div class="table-responsive">
<table class="table"> <table class="table">
@ -41,23 +41,29 @@
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
<tr ng-if="$ctrl.volumeBrowser.state.path !== '/'"> <tr ng-if="!$ctrl.isRoot">
<td colspan="4"> <td colspan="4">
<a ng-click="$ctrl.volumeBrowser.up()"><i class="fa fa-level-up-alt space-right"></i>Go to parent</a> <a ng-click="$ctrl.goToParent()"><i class="fa fa-level-up-alt space-right"></i>Go
to parent</a>
</td> </td>
</tr> </tr>
<tr ng-repeat="item in ($ctrl.state.filteredDataSet = ($ctrl.dataset | filter:$ctrl.state.textFilter | orderBy:$ctrl.state.orderBy:$ctrl.state.reverseOrder))"> <tr ng-repeat="item in ($ctrl.state.filteredDataSet = ($ctrl.dataset | filter:$ctrl.state.textFilter | orderBy:$ctrl.state.orderBy:$ctrl.state.reverseOrder))">
<td> <td>
<span ng-if="item.edit"> <span ng-if="item.edit">
<input class="input-sm" type="text" ng-model="item.newName" on-enter-key="$ctrl.volumeBrowser.rename(item.Name, item.newName); item.edit = false;" auto-focus /> <input class="input-sm" type="text" ng-model="item.newName"
on-enter-key="$ctrl.rename({name: item.Name, newName: item.newName}); item.edit = false;"
auto-focus />
<a class="interactive" ng-click="item.edit = false;"><i class="fa fa-times"></i></a> <a class="interactive" ng-click="item.edit = false;"><i class="fa fa-times"></i></a>
<a class="interactive" ng-click="$ctrl.volumeBrowser.rename(item.Name, item.newName); item.edit = false;"><i class="fa fa-check-square"></i></a> <a class="interactive" ng-click="$ctrl.rename({name: item.Name, newName: item.newName}); item.edit = false;"><i
class="fa fa-check-square"></i></a>
</span> </span>
<span ng-if="!item.edit && item.Dir"> <span ng-if="!item.edit && item.Dir">
<a ng-click="$ctrl.volumeBrowser.browse(item.Name)"><i class="fa fa-folder space-right" aria-hidden="true"></i>{{ item.Name }}</a> <a ng-click="$ctrl.browse({name: item.Name})"><i class="fa fa-folder space-right"
aria-hidden="true"></i>{{ item.Name }}</a>
</span> </span>
<span ng-if="!item.edit && !item.Dir"> <span ng-if="!item.edit && !item.Dir">
<i class="fa fa-file space-right" aria-hidden="true"></i>{{ item.Name }} <i class="fa fa-file space-right" aria-hidden="true"></i>{{
item.Name }}
</span> </span>
</td> </td>
<td>{{ item.Size | humansize }}</td> <td>{{ item.Size | humansize }}</td>
@ -65,13 +71,14 @@
{{ item.ModTime | getisodatefromtimestamp }} {{ item.ModTime | getisodatefromtimestamp }}
</td> </td>
<td> <td>
<btn class="btn btn-xs btn-primary space-right" ng-click="$ctrl.volumeBrowser.download(item.Name)" ng-if="!item.Dir"> <btn class="btn btn-xs btn-primary space-right" ng-click="$ctrl.download({ name: item.Name })"
ng-if="!item.Dir">
<i class="fa fa-download" aria-hidden="true"></i> Download <i class="fa fa-download" aria-hidden="true"></i> Download
</btn> </btn>
<btn class="btn btn-xs btn-primary space-right" ng-click="item.newName = item.Name; item.edit = true"> <btn class="btn btn-xs btn-primary space-right" ng-click="item.newName = item.Name; item.edit = true">
<i class="fa fa-edit" aria-hidden="true"></i> Rename <i class="fa fa-edit" aria-hidden="true"></i> Rename
</btn> </btn>
<btn class="btn btn-xs btn-danger" ng-click="$ctrl.volumeBrowser.delete(item.Name)"> <btn class="btn btn-xs btn-danger" ng-click="$ctrl.delete({ name: item.Name })">
<i class="fa fa-trash" aria-hidden="true"></i> Delete <i class="fa fa-trash" aria-hidden="true"></i> Delete
</btn> </btn>
</td> </td>

View file

@ -0,0 +1,22 @@
angular.module('portainer.agent').component('filesDatatable', {
templateUrl: 'app/agent/components/files-datatable/files-datatable.html',
controller: 'GenericDatatableController',
bindings: {
titleText: '@',
titleIcon: '@',
dataset: '<',
tableKey: '@',
orderBy: '@',
reverseOrder: '<',
isRoot: '<',
goToParent: '&',
browse: '&',
rename: '&',
download: '&',
delete: '&',
isUploadAllowed: '<',
onFileSelectedForUpload: '<'
}
});

View file

@ -0,0 +1,147 @@
angular.module('portainer.agent').controller('HostBrowserController', [
'HostBrowserService', 'Notifications', 'FileSaver', 'ModalService',
function HostBrowserController(HostBrowserService, Notifications, FileSaver, ModalService) {
var ctrl = this;
var ROOT_PATH = '/host';
ctrl.state = {
path: ROOT_PATH
};
ctrl.goToParent = goToParent;
ctrl.browse = browse;
ctrl.renameFile = renameFile;
ctrl.downloadFile = downloadFile;
ctrl.deleteFile = confirmDeleteFile;
ctrl.isRoot = isRoot;
ctrl.onFileSelectedForUpload = onFileSelectedForUpload;
ctrl.$onInit = $onInit;
ctrl.getRelativePath = getRelativePath;
function getRelativePath(path) {
path = path || ctrl.state.path;
var rootPathRegex = new RegExp('^' + ROOT_PATH + '\/?');
var relativePath = path.replace(rootPathRegex, '/');
return relativePath;
}
function goToParent() {
getFilesForPath(parentPath(this.state.path));
}
function isRoot() {
return ctrl.state.path === ROOT_PATH;
}
function browse(folder) {
getFilesForPath(buildPath(ctrl.state.path, folder));
}
function getFilesForPath(path) {
HostBrowserService.ls(path)
.then(function onFilesLoaded(files) {
ctrl.state.path = path;
ctrl.files = files;
})
.catch(function onLoadingFailed(err) {
Notifications.error('Failure', err, 'Unable to browse');
});
}
function renameFile(name, newName) {
var filePath = buildPath(ctrl.state.path, name);
var newFilePath = buildPath(ctrl.state.path, newName);
HostBrowserService.rename(filePath, newFilePath)
.then(function onRenameSuccess() {
Notifications.success('File successfully renamed', getRelativePath(newFilePath));
return HostBrowserService.ls(ctrl.state.path);
})
.then(function onFilesLoaded(files) {
ctrl.files = files;
})
.catch(function notifyOnError(err) {
Notifications.error('Failure', err, 'Unable to rename file');
});
}
function downloadFile(file) {
var filePath = buildPath(ctrl.state.path, file);
HostBrowserService.get(filePath)
.then(function onFileReceived(data) {
var downloadData = new Blob([data.file], {
type: 'text/plain;charset=utf-8'
});
FileSaver.saveAs(downloadData, file);
})
.catch(function notifyOnError(err) {
Notifications.error('Failure', err, 'Unable to download file');
});
}
function confirmDeleteFile(name) {
var filePath = buildPath(ctrl.state.path, name);
ModalService.confirmDeletion(
'Are you sure that you want to delete ' + getRelativePath(filePath) + ' ?',
function onConfirm(confirmed) {
if (!confirmed) {
return;
}
return deleteFile(filePath);
}
);
}
function deleteFile(path) {
HostBrowserService.delete(path)
.then(function onDeleteSuccess() {
Notifications.success('File successfully deleted', getRelativePath(path));
return HostBrowserService.ls(ctrl.state.path);
})
.then(function onFilesLoaded(data) {
ctrl.files = data;
})
.catch(function notifyOnError(err) {
Notifications.error('Failure', err, 'Unable to delete file');
});
}
function $onInit() {
getFilesForPath(ROOT_PATH);
}
function parentPath(path) {
if (path === ROOT_PATH) {
return ROOT_PATH;
}
var split = _.split(path, '/');
return _.join(_.slice(split, 0, split.length - 1), '/');
}
function buildPath(parent, file) {
if (parent.lastIndexOf('/') === parent.length - 1) {
return parent + file;
}
return parent + '/' + file;
}
function onFileSelectedForUpload(file) {
HostBrowserService.upload(ctrl.state.path, file)
.then(function onFileUpload() {
onFileUploaded();
})
.catch(function onFileUpload(err) {
Notifications.error('Failure', err, 'Unable to upload file');
});
}
function onFileUploaded() {
refreshList();
}
function refreshList() {
getFilesForPath(ctrl.state.path);
}
}
]);

View file

@ -0,0 +1,16 @@
<files-datatable
title-text="Host browser - {{$ctrl.getRelativePath()}}" title-icon="fa-file"
dataset="$ctrl.files" table-key="host_browser"
order-by="Dir"
is-root="$ctrl.isRoot()"
go-to-parent="$ctrl.goToParent()"
browse="$ctrl.browse(name)"
rename="$ctrl.renameFile(name, newName)"
download="$ctrl.downloadFile(name)"
delete="$ctrl.deleteFile(name)"
is-upload-allowed="true"
on-file-selected-for-upload="$ctrl.onFileSelectedForUpload"
>
</files-datatable>

View file

@ -0,0 +1,5 @@
angular.module('portainer.agent').component('hostBrowser', {
controller: 'HostBrowserController',
templateUrl: 'app/agent/components/host-browser/host-browser.html',
bindings: {}
});

View file

@ -1,15 +0,0 @@
angular.module('portainer.agent').component('volumeBrowserDatatable', {
templateUrl: 'app/agent/components/volume-browser/volume-browser-datatable/volumeBrowserDatatable.html',
controller: 'GenericDatatableController',
bindings: {
titleText: '@',
titleIcon: '@',
dataset: '<',
tableKey: '@',
orderBy: '@',
reverseOrder: '<'
},
require: {
volumeBrowser: '^^volumeBrowser'
}
});

View file

@ -3,6 +3,7 @@ angular.module('portainer.agent').component('volumeBrowser', {
controller: 'VolumeBrowserController', controller: 'VolumeBrowserController',
bindings: { bindings: {
volumeId: '<', volumeId: '<',
nodeName: '<' nodeName: '<',
isUploadEnabled: '<'
} }
}); });

View file

@ -1,5 +1,14 @@
<volume-browser-datatable <files-datatable
title-text="Volume browser" title-icon="fa-file" title-text="Volume browser" title-icon="fa-file"
dataset="$ctrl.files" table-key="volume_browser" dataset="$ctrl.files" table-key="volume_browser"
order-by="Dir" order-by="Dir"
></volume-browser-datatable> is-root="$ctrl.state.path === '/'"
go-to-parent="$ctrl.up()"
browse="$ctrl.browse(name)"
rename="$ctrl.rename(name, newName)"
download="$ctrl.download(name)"
delete="$ctrl.delete(name)"
is-upload-allowed="$ctrl.isUploadEnabled"
on-file-selected-for-upload="$ctrl.onFileSelectedForUpload"
></files-datatable>

View file

@ -84,6 +84,16 @@ function (HttpRequestHelper, VolumeBrowserService, FileSaver, Blob, ModalService
}); });
} }
this.onFileSelectedForUpload = function onFileSelectedForUpload(file) {
VolumeBrowserService.upload(ctrl.state.path, file, ctrl.volumeId)
.then(function onFileUpload() {
onFileUploaded();
})
.catch(function onFileUpload(err) {
Notifications.error('Failure', err, 'Unable to upload file');
});
};
function parentPath(path) { function parentPath(path) {
if (path.lastIndexOf('/') === 0) { if (path.lastIndexOf('/') === 0) {
return '/'; return '/';
@ -112,4 +122,14 @@ function (HttpRequestHelper, VolumeBrowserService, FileSaver, Blob, ModalService
}); });
}; };
function onFileUploaded() {
refreshList();
}
function refreshList() {
browse(ctrl.state.path);
}
}]); }]);

View file

@ -1,8 +1,10 @@
angular.module('portainer.agent') angular.module('portainer.agent')
.factory('Agent', ['$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', function AgentFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider) { .factory('Agent', ['$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', 'StateManager',
function AgentFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider, StateManager) {
'use strict'; 'use strict';
return $resource(API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/agents', { return $resource(API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/v:version/agents', {
endpointId: EndpointProvider.endpointID endpointId: EndpointProvider.endpointID,
version: StateManager.getAgentApiVersion
}, },
{ {
query: { method: 'GET', isArray: true } query: { method: 'GET', isArray: true }

View file

@ -1,22 +1,24 @@
angular.module('portainer.agent') angular.module('portainer.agent')
.factory('Browse', ['$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', function BrowseFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider) { .factory('Browse', ['$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', 'StateManager',
function BrowseFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider, StateManager) {
'use strict'; 'use strict';
return $resource(API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/browse/:id/:action', { return $resource(API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/v:version/browse/:action', {
endpointId: EndpointProvider.endpointID endpointId: EndpointProvider.endpointID,
version: StateManager.getAgentApiVersion
}, },
{ {
ls: { ls: {
method: 'GET', isArray: true, params: { id: '@id', action: 'ls' } method: 'GET', isArray: true, params: { action: 'ls' }
}, },
get: { get: {
method: 'GET', params: { id: '@id', action: 'get' }, method: 'GET', params: { action: 'get' },
transformResponse: browseGetResponse transformResponse: browseGetResponse
}, },
delete: { delete: {
method: 'DELETE', params: { id: '@id', action: 'delete' } method: 'DELETE', params: { action: 'delete' }
}, },
rename: { rename: {
method: 'PUT', params: { id: '@id', action: 'rename' } method: 'PUT', params: { action: 'rename' }
} }
}); });
}]); }]);

16
app/agent/rest/host.js Normal file
View file

@ -0,0 +1,16 @@
angular.module('portainer.agent').factory('Host', [
'$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', 'StateManager',
function AgentFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider, StateManager) {
'use strict';
return $resource(
API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/v:version/host/:action',
{
endpointId: EndpointProvider.endpointID,
version: StateManager.getAgentApiVersion
},
{
info: { method: 'GET', params: { action: 'info' } }
}
);
}
]);

33
app/agent/rest/ping.js Normal file
View file

@ -0,0 +1,33 @@
angular.module('portainer.agent').factory('AgentPing', [
'$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', '$q',
function AgentPingFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider, $q) {
'use strict';
return $resource(
API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/ping',
{
endpointId: EndpointProvider.endpointID
},
{
ping: {
method: 'GET',
interceptor: {
response: function versionInterceptor(response) {
var instance = response.resource;
var version =
response.headers('Portainer-Agent-Api-Version') || 1;
instance.version = Number(version);
return instance;
},
responseError: function versionResponseError(error) {
// 404 - agent is up - set version to 1
if (error.status === 404) {
return { version: 1 };
}
return $q.reject(error);
}
}
}
}
);
}
]);

View file

@ -0,0 +1,10 @@
angular.module('portainer.agent')
.factory('AgentVersion1', ['$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', function AgentFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider) {
'use strict';
return $resource(API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/agents', {
endpointId: EndpointProvider.endpointID
},
{
query: { method: 'GET', isArray: true }
});
}]);

View file

@ -0,0 +1,22 @@
angular.module('portainer.agent')
.factory('BrowseVersion1', ['$resource', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', function BrowseFactory($resource, API_ENDPOINT_ENDPOINTS, EndpointProvider) {
'use strict';
return $resource(API_ENDPOINT_ENDPOINTS + '/:endpointId/docker/browse/:volumeID/:action', {
endpointId: EndpointProvider.endpointID
},
{
ls: {
method: 'GET', isArray: true, params: { action: 'ls' }
},
get: {
method: 'GET', params: { action: 'get' },
transformResponse: browseGetResponse
},
delete: {
method: 'DELETE', params: { action: 'delete' }
},
rename: {
method: 'PUT', params: { action: 'rename' }
}
});
}]);

View file

@ -1,14 +1,31 @@
angular.module('portainer.agent') angular.module('portainer.agent').factory('AgentService', [
.factory('AgentService', ['$q', 'Agent', function AgentServiceFactory($q, Agent) { '$q', 'Agent', 'AgentVersion1', 'HttpRequestHelper', 'Host', 'StateManager',
function AgentServiceFactory($q, Agent, AgentVersion1, HttpRequestHelper, Host, StateManager) {
'use strict'; 'use strict';
var service = {}; var service = {};
service.agents = function() { service.agents = agents;
service.hostInfo = hostInfo;
function getAgentApiVersion() {
var state = StateManager.getState();
return state.endpoint.agentApiVersion;
}
function hostInfo(nodeName) {
HttpRequestHelper.setPortainerAgentTargetHeader(nodeName);
return Host.info().$promise;
}
function agents() {
var deferred = $q.defer(); var deferred = $q.defer();
Agent.query({}).$promise var agentVersion = getAgentApiVersion();
.then(function success(data) { var service = agentVersion > 1 ? Agent : AgentVersion1;
var agents = data.map(function (item) {
service.query({ version: agentVersion })
.$promise.then(function success(data) {
var agents = data.map(function(item) {
return new AgentViewModel(item); return new AgentViewModel(item);
}); });
deferred.resolve(agents); deferred.resolve(agents);
@ -18,7 +35,8 @@ angular.module('portainer.agent')
}); });
return deferred.promise; return deferred.promise;
}; }
return service; return service;
}]); }
]);

View file

@ -0,0 +1,52 @@
angular.module('portainer.agent').factory('HostBrowserService', [
'Browse', 'Upload', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', '$q', 'StateManager',
function HostBrowserServiceFactory(Browse, Upload, API_ENDPOINT_ENDPOINTS, EndpointProvider, $q, StateManager) {
var service = {};
service.ls = ls;
service.get = get;
service.delete = deletePath;
service.rename = rename;
service.upload = upload;
function ls(path) {
return Browse.ls({ path: path }).$promise;
}
function get(path) {
return Browse.get({ path: path }).$promise;
}
function deletePath(path) {
return Browse.delete({ path: path }).$promise;
}
function rename(path, newPath) {
var payload = {
CurrentFilePath: path,
NewFilePath: newPath
};
return Browse.rename({}, payload).$promise;
}
function upload(path, file, onProgress) {
var deferred = $q.defer();
var agentVersion = StateManager.getAgentApiVersion();
var url =
API_ENDPOINT_ENDPOINTS +
'/' +
EndpointProvider.endpointID() +
'/docker' +
(agentVersion > 1 ? '/v' + agentVersion : '') +
'/browse/put';
Upload.upload({
url: url,
data: { file: file, Path: path }
}).then(deferred.resolve, deferred.reject, onProgress);
return deferred.promise;
}
return service;
}
]);

View file

@ -0,0 +1,14 @@
angular.module('portainer.agent').service('AgentPingService', [
'AgentPing',
function AgentPingService(AgentPing) {
var service = {};
service.ping = ping;
function ping() {
return AgentPing.ping().$promise;
}
return service;
}
]);

View file

@ -1,18 +1,29 @@
angular.module('portainer.agent') angular.module('portainer.agent').factory('VolumeBrowserService', [
.factory('VolumeBrowserService', ['$q', 'Browse', function VolumeBrowserServiceFactory($q, Browse) { 'StateManager', 'Browse', 'BrowseVersion1', '$q', 'API_ENDPOINT_ENDPOINTS', 'EndpointProvider', 'Upload',
function VolumeBrowserServiceFactory(StateManager, Browse, BrowseVersion1, $q, API_ENDPOINT_ENDPOINTS, EndpointProvider, Upload) {
'use strict'; 'use strict';
var service = {}; var service = {};
function getAgentApiVersion() {
var state = StateManager.getState();
return state.endpoint.agentApiVersion;
}
function getBrowseService() {
var agentVersion = getAgentApiVersion();
return agentVersion > 1 ? Browse : BrowseVersion1;
}
service.ls = function(volumeId, path) { service.ls = function(volumeId, path) {
return Browse.ls({ 'id': volumeId, 'path': path }).$promise; return getBrowseService().ls({ volumeID: volumeId, path: path, version: getAgentApiVersion() }).$promise;
}; };
service.get = function(volumeId, path) { service.get = function(volumeId, path) {
return Browse.get({ 'id': volumeId, 'path': path }).$promise; return getBrowseService().get({ volumeID: volumeId, path: path, version: getAgentApiVersion() }).$promise;
}; };
service.delete = function(volumeId, path) { service.delete = function(volumeId, path) {
return Browse.delete({ 'id': volumeId, 'path': path }).$promise; return getBrowseService().delete({ volumeID: volumeId, path: path, version: getAgentApiVersion() }).$promise;
}; };
service.rename = function(volumeId, path, newPath) { service.rename = function(volumeId, path, newPath) {
@ -20,8 +31,32 @@ angular.module('portainer.agent')
CurrentFilePath: path, CurrentFilePath: path,
NewFilePath: newPath NewFilePath: newPath
}; };
return Browse.rename({ 'id': volumeId }, payload).$promise; return getBrowseService().rename({ volumeID: volumeId, version: getAgentApiVersion() }, payload).$promise;
};
service.upload = function upload(path, file, volumeId, onProgress) {
var deferred = $q.defer();
var agentVersion = StateManager.getAgentApiVersion();
if (agentVersion <2) {
deferred.reject('upload is not supported on this agent version');
return;
}
var url =
API_ENDPOINT_ENDPOINTS +
'/' +
EndpointProvider.endpointID() +
'/docker' +
'/v' + agentVersion +
'/browse/put?volumeID=' +
volumeId;
Upload.upload({
url: url,
data: { file: file, Path: path }
}).then(deferred.resolve, deferred.reject, onProgress);
return deferred.promise;
}; };
return service; return service;
}]); }
]);

View file

@ -37,10 +37,16 @@ function ($rootScope, $state, Authentication, authManager, StateManager, Endpoin
function initAuthentication(authManager, Authentication, $rootScope, $state) { function initAuthentication(authManager, Authentication, $rootScope, $state) {
authManager.checkAuthOnRefresh(); authManager.checkAuthOnRefresh();
authManager.redirectWhenUnauthenticated();
Authentication.init(); Authentication.init();
$rootScope.$on('tokenHasExpired', function() {
// The unauthenticated event is broadcasted by the jwtInterceptor when
// hitting a 401. We're using this instead of the usual combination of
// authManager.redirectWhenUnauthenticated() + unauthenticatedRedirector
// to have more controls on which URL should trigger the unauthenticated state.
$rootScope.$on('unauthenticated', function (event, data) {
if (!_.includes(data.config.url, '/v2/')) {
$state.go('portainer.auth', {error: 'Your session has expired'}); $state.go('portainer.auth', {error: 'Your session has expired'});
}
}); });
} }

View file

@ -17,7 +17,7 @@
</div> </div>
<div class="searchBar"> <div class="searchBar">
<i class="fa fa-search searchIcon" aria-hidden="true"></i> <i class="fa fa-search searchIcon" aria-hidden="true"></i>
<input type="text" class="searchInput" ng-model="$ctrl.state.textFilter" placeholder="Search..." auto-focus> <input type="text" class="searchInput" ng-model="$ctrl.state.textFilter" ng-change="$ctrl.onTextFilterChange()" placeholder="Search..." auto-focus>
</div> </div>
<div class="table-responsive"> <div class="table-responsive">
<table class="table table-hover table-filters nowrap-cells"> <table class="table table-hover table-filters nowrap-cells">

View file

@ -14,12 +14,10 @@ angular.module('portainer')
jwtOptionsProvider.config({ jwtOptionsProvider.config({
tokenGetter: ['LocalStorage', function(LocalStorage) { tokenGetter: ['LocalStorage', function(LocalStorage) {
return LocalStorage.getJWT(); return LocalStorage.getJWT();
}],
unauthenticatedRedirector: ['$state', function($state) {
$state.go('portainer.auth', {error: 'Your session has expired'});
}] }]
}); });
$httpProvider.interceptors.push('jwtInterceptor'); $httpProvider.interceptors.push('jwtInterceptor');
$httpProvider.interceptors.push('EndpointStatusInterceptor');
$httpProvider.defaults.headers.post['Content-Type'] = 'application/json'; $httpProvider.defaults.headers.post['Content-Type'] = 'application/json';
$httpProvider.defaults.headers.put['Content-Type'] = 'application/json'; $httpProvider.defaults.headers.put['Content-Type'] = 'application/json';
$httpProvider.defaults.headers.patch['Content-Type'] = 'application/json'; $httpProvider.defaults.headers.patch['Content-Type'] = 'application/json';
@ -51,6 +49,7 @@ angular.module('portainer')
cfpLoadingBarProvider.includeSpinner = false; cfpLoadingBarProvider.includeSpinner = false;
cfpLoadingBarProvider.parentSelector = '#loadingbar-placeholder'; cfpLoadingBarProvider.parentSelector = '#loadingbar-placeholder';
cfpLoadingBarProvider.latencyThreshold = 600;
$urlRouterProvider.otherwise('/auth'); $urlRouterProvider.otherwise('/auth');
}]); }]);

Some files were not shown because too many files have changed in this diff Show more