mirror of
https://github.com/portainer/portainer.git
synced 2025-07-21 06:19:41 +02:00
* use the Store interface IsErrObjectNotFound() to avoid revealing internal errors Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * what happens when you extract the datastore interfaces into their own package Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * Start renaming Storage methods Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * extract the boltdb specific code from the Portainer storage code (example, the others need the same) Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * more extract bolt.Tx from datastore code Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * minimise imports by putting moving the struct definition into the file that needs the Service imports Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * more extraction of boltdb.Tx Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * extract the use of bucket.SetSequence Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * almost done - just endpoint.Synchonise :/ Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * so, endpoint.Synchonize looks hard, but i can't find where we use it, so 'delete first refactoring' Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix test compile errors Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * test compile fixes after rebase Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix a mis-remembering I had wrt deserialisation - last time i used AnyData - jsoniter's bindTo looks interesting for the same reason Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * set us up to make the connection an interface Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * make the db connection a datastore interface, and separate out our datastore services from the bolt ones Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * rename methods to something less oltdb internals specific Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * these errors are not boltdb secific Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * start using the db-backend factory method too Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * export boltdb raw in case we can't export from the service layer Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add a raw export from boltdb to yaml for broken db's, and an export services to yaml in backup Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add the version info by hand for now Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * actually, the export from services can be fully typed - its the import that needs to do more work Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * redo raw export, and make import capable of using it Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add DockerHub Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * migration from anything older than v1.21.0 has been broken for quite a while, deleting the un-tested code Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix go test ./... again Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * my goland wasn't setup to gofmt Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * move the two extremely dubious migration tests down into store, so they can use the test store code Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * the migrator is now free of boltdb Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * reverse goland overzealous replcement of internal with boltdb Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * more undo over-zealous goland internal->boltdb Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * yay, now bolt is only mentioned inside the api/database/ dir Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * and this might be the last of the boltdb references? Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add todo Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * extract the store code into a separate module too Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * don't need the fileService in boltdb anymore Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * use IsErrObjectNotFound() Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * use a string to select what database backend we use Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * make isNew store an ephemeral bool that doesn't stay true after we've initialised it Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * move the import.json wip to a separate file so its more obvious - we'll be using it for testing, emergency fixups, and in the next part of the store work, when we improve migrations and data model lifecycles Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * undo vscode formatting html Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix app templates symbol (#6221) * feat(webhook) EE-2125 send registry auth haeder when update swarms service via webhook (#6220) * feat(webhook) EE-2125 add some helpers to registry utils * feat(webhook) EE-2125 persist registryID when creating a webhook * feat(webhook) EE-2125 send registry auth header when executing a webhook * feat(webhook) EE-2125 send registryID to backend when creating a service with webhook * feat(webhook) EE-2125 use the initial registry ID to create webhook on editing service screen * feat(webhook) EE-2125 update webhook when update registry * feat(webhook) EE-2125 add endpoint of update webhook * feat(webhook) EE-2125 code cleanup * feat(webhook) EE-2125 fix a typo * feat(webhook) EE-2125 fix circle import issue with unit test Co-authored-by: Simon Meng <simon.meng@portainer.io> * fix(kubeconfig): show kubeconfig download button for non admin users [EE-2123] (#6204) Co-authored-by: Simon Meng <simon.meng@portainer.io> * fix data-cy for k8s cluster menu (#6226) LGTM * feat(stack): make stack created from app template editable EE-1941 (#6104) feat(stack): make stack from app template editable * fix(container):disable Duplicate/Edit button when the container is portainer (#6223) * fix/ee-1909/show-pull-image-error (#6195) Co-authored-by: sunportainer <ericsun@SG1.local> * feat(cy): add data-cy to helm install button (#6241) * feat(cy): add data-cy to add registry button (#6242) * refactor(app): convert root folder files to es6 (#4159) * refactor(app): duplicate constants as es6 exports (#4158) * fix(docker): provide workaround to save network name variable (#6080) * fix/EE-1862/unable-to-stop-or-remove-stack workaround for var without default value in yaml file * fix/EE-1862/unable-to-stop-or-remove-stack check yaml file * fixed func and var names * wrapper error and used bool for stringset * UT case for createNetworkEnvFile * UT case for %s=%s * powerful StringSet * wrapper error for extract network name * wrapper all the return err * store more env * put to env file * make default value None * feat: gzip static resources (#6258) * fix(ssl)//handle --sslcert and --sslkey ee-2106 (#6203) * fix/ee-2106/handle-sslcert-sslkey Co-authored-by: sunportainer <ericsun@SG1.local> * fix(server):support disable https only ee-2068 (#6232) * fix/ee-2068/disable-forcely-https * feat(store): implement store tests EE-2112 (#6224) * add store tests * add some more tests * Update missing helm user repo methods * remove redundant comments * add webhook export * update webhooks * use the Store interface IsErrObjectNotFound() to avoid revealing internal errors Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * what happens when you extract the datastore interfaces into their own package Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * Start renaming Storage methods Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * extract the boltdb specific code from the Portainer storage code (example, the others need the same) Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * more extract bolt.Tx from datastore code Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * minimise imports by putting moving the struct definition into the file that needs the Service imports Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * more extraction of boltdb.Tx Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * extract the use of bucket.SetSequence Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * almost done - just endpoint.Synchonise :/ Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * so, endpoint.Synchonize looks hard, but i can't find where we use it, so 'delete first refactoring' Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix test compile errors Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * test compile fixes after rebase Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix a mis-remembering I had wrt deserialisation - last time i used AnyData - jsoniter's bindTo looks interesting for the same reason Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * set us up to make the connection an interface Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * make the db connection a datastore interface, and separate out our datastore services from the bolt ones Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * rename methods to something less oltdb internals specific Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * these errors are not boltdb secific Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * start using the db-backend factory method too Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * export boltdb raw in case we can't export from the service layer Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add a raw export from boltdb to yaml for broken db's, and an export services to yaml in backup Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add the version info by hand for now Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * actually, the export from services can be fully typed - its the import that needs to do more work Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * redo raw export, and make import capable of using it Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add DockerHub Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * migration from anything older than v1.21.0 has been broken for quite a while, deleting the un-tested code Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * fix go test ./... again Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * my goland wasn't setup to gofmt Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * move the two extremely dubious migration tests down into store, so they can use the test store code Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * the migrator is now free of boltdb Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * reverse goland overzealous replcement of internal with boltdb Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * more undo over-zealous goland internal->boltdb Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * yay, now bolt is only mentioned inside the api/database/ dir Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * and this might be the last of the boltdb references? Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * add todo Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * extract the store code into a separate module too Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * don't need the fileService in boltdb anymore Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * use IsErrObjectNotFound() Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * use a string to select what database backend we use Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * make isNew store an ephemeral bool that doesn't stay true after we've initialised it Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * move the import.json wip to a separate file so its more obvious - we'll be using it for testing, emergency fixups, and in the next part of the store work, when we improve migrations and data model lifecycles Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * undo vscode formatting html Signed-off-by: Sven Dowideit <sven.dowideit@portainer.io> * Update missing helm user repo methods * feat(store): implement store tests EE-2112 (#6224) * add store tests * add some more tests * remove redundant comments * add webhook export * update webhooks * fix build issues after rebasing * move migratorparams * remove unneeded integer type conversions * disable the db import/export for now Co-authored-by: Richard Wei <54336863+WaysonWei@users.noreply.github.com> Co-authored-by: cong meng <mcpacino@gmail.com> Co-authored-by: Simon Meng <simon.meng@portainer.io> Co-authored-by: Marcelo Rydel <marcelorydel26@gmail.com> Co-authored-by: Hao Zhang <hao.zhang@portainer.io> Co-authored-by: sunportainer <93502624+sunportainer@users.noreply.github.com> Co-authored-by: sunportainer <ericsun@SG1.local> Co-authored-by: wheresolivia <78844659+wheresolivia@users.noreply.github.com> Co-authored-by: Chaim Lev-Ari <chiptus@users.noreply.github.com> Co-authored-by: Chao Geng <93526589+chaogeng77977@users.noreply.github.com> Co-authored-by: Dmitry Salakhov <to@dimasalakhov.com> Co-authored-by: Matt Hook <hookenz@gmail.com>
246 lines
7.9 KiB
Go
246 lines
7.9 KiB
Go
package chisel
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"github.com/portainer/portainer/api/http/proxy"
|
|
"log"
|
|
"net/http"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/dchest/uniuri"
|
|
chserver "github.com/jpillora/chisel/server"
|
|
cmap "github.com/orcaman/concurrent-map"
|
|
portainer "github.com/portainer/portainer/api"
|
|
"github.com/portainer/portainer/api/dataservices"
|
|
)
|
|
|
|
const (
|
|
tunnelCleanupInterval = 10 * time.Second
|
|
requiredTimeout = 15 * time.Second
|
|
activeTimeout = 4*time.Minute + 30*time.Second
|
|
)
|
|
|
|
// Service represents a service to manage the state of multiple reverse tunnels.
|
|
// It is used to start a reverse tunnel server and to manage the connection status of each tunnel
|
|
// connected to the tunnel server.
|
|
type Service struct {
|
|
serverFingerprint string
|
|
serverPort string
|
|
tunnelDetailsMap cmap.ConcurrentMap
|
|
dataStore dataservices.DataStore
|
|
snapshotService portainer.SnapshotService
|
|
chiselServer *chserver.Server
|
|
shutdownCtx context.Context
|
|
ProxyManager *proxy.Manager
|
|
}
|
|
|
|
// NewService returns a pointer to a new instance of Service
|
|
func NewService(dataStore dataservices.DataStore, shutdownCtx context.Context) *Service {
|
|
return &Service{
|
|
tunnelDetailsMap: cmap.New(),
|
|
dataStore: dataStore,
|
|
shutdownCtx: shutdownCtx,
|
|
}
|
|
}
|
|
|
|
// pingAgent ping the given agent so that the agent can keep the tunnel alive
|
|
func (service *Service) pingAgent(endpointID portainer.EndpointID) error {
|
|
tunnel := service.GetTunnelDetails(endpointID)
|
|
requestURL := fmt.Sprintf("http://127.0.0.1:%d/ping", tunnel.Port)
|
|
req, err := http.NewRequest(http.MethodHead, requestURL, nil)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
httpClient := &http.Client{
|
|
Timeout: 3 * time.Second,
|
|
}
|
|
_, err = httpClient.Do(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// KeepTunnelAlive keeps the tunnel of the given environment for maxAlive duration, or until ctx is done
|
|
func (service *Service) KeepTunnelAlive(endpointID portainer.EndpointID, ctx context.Context, maxAlive time.Duration) {
|
|
go func() {
|
|
log.Printf("[DEBUG] [chisel,KeepTunnelAlive] [endpoint_id: %d] [message: start for %.0f minutes]\n", endpointID, maxAlive.Minutes())
|
|
maxAliveTicker := time.NewTicker(maxAlive)
|
|
defer maxAliveTicker.Stop()
|
|
pingTicker := time.NewTicker(tunnelCleanupInterval)
|
|
defer pingTicker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-pingTicker.C:
|
|
service.SetTunnelStatusToActive(endpointID)
|
|
err := service.pingAgent(endpointID)
|
|
if err != nil {
|
|
log.Printf("[DEBUG] [chisel,KeepTunnelAlive] [endpoint_id: %d] [warning: ping agent err=%s]\n", endpointID, err)
|
|
}
|
|
case <-maxAliveTicker.C:
|
|
log.Printf("[DEBUG] [chisel,KeepTunnelAlive] [endpoint_id: %d] [message: stop as %.0f minutes timeout]\n", endpointID, maxAlive.Minutes())
|
|
return
|
|
case <-ctx.Done():
|
|
err := ctx.Err()
|
|
log.Printf("[DEBUG] [chisel,KeepTunnelAlive] [endpoint_id: %d] [message: stop as err=%s]\n", endpointID, err)
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
// StartTunnelServer starts a tunnel server on the specified addr and port.
|
|
// It uses a seed to generate a new private/public key pair. If the seed cannot
|
|
// be found inside the database, it will generate a new one randomly and persist it.
|
|
// It starts the tunnel status verification process in the background.
|
|
// The snapshotter is used in the tunnel status verification process.
|
|
func (service *Service) StartTunnelServer(addr, port string, snapshotService portainer.SnapshotService) error {
|
|
keySeed, err := service.retrievePrivateKeySeed()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
config := &chserver.Config{
|
|
Reverse: true,
|
|
KeySeed: keySeed,
|
|
}
|
|
|
|
chiselServer, err := chserver.NewServer(config)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
service.serverFingerprint = chiselServer.GetFingerprint()
|
|
service.serverPort = port
|
|
|
|
err = chiselServer.Start(addr, port)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
service.chiselServer = chiselServer
|
|
|
|
// TODO: work-around Chisel default behavior.
|
|
// By default, Chisel will allow anyone to connect if no user exists.
|
|
username, password := generateRandomCredentials()
|
|
err = service.chiselServer.AddUser(username, password, "127.0.0.1")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
service.snapshotService = snapshotService
|
|
go service.startTunnelVerificationLoop()
|
|
|
|
return nil
|
|
}
|
|
|
|
// StopTunnelServer stops tunnel http server
|
|
func (service *Service) StopTunnelServer() error {
|
|
return service.chiselServer.Close()
|
|
}
|
|
|
|
func (service *Service) retrievePrivateKeySeed() (string, error) {
|
|
var serverInfo *portainer.TunnelServerInfo
|
|
|
|
serverInfo, err := service.dataStore.TunnelServer().Info()
|
|
if service.dataStore.IsErrObjectNotFound(err) {
|
|
keySeed := uniuri.NewLen(16)
|
|
|
|
serverInfo = &portainer.TunnelServerInfo{
|
|
PrivateKeySeed: keySeed,
|
|
}
|
|
|
|
err := service.dataStore.TunnelServer().UpdateInfo(serverInfo)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
} else if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
return serverInfo.PrivateKeySeed, nil
|
|
}
|
|
|
|
func (service *Service) startTunnelVerificationLoop() {
|
|
log.Printf("[DEBUG] [chisel, monitoring] [check_interval_seconds: %f] [message: starting tunnel management process]", tunnelCleanupInterval.Seconds())
|
|
ticker := time.NewTicker(tunnelCleanupInterval)
|
|
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
service.checkTunnels()
|
|
case <-service.shutdownCtx.Done():
|
|
log.Println("[DEBUG] Shutting down tunnel service")
|
|
if err := service.StopTunnelServer(); err != nil {
|
|
log.Printf("Stopped tunnel service: %s", err)
|
|
}
|
|
ticker.Stop()
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (service *Service) checkTunnels() {
|
|
for item := range service.tunnelDetailsMap.IterBuffered() {
|
|
tunnel := item.Val.(*portainer.TunnelDetails)
|
|
|
|
if tunnel.LastActivity.IsZero() || tunnel.Status == portainer.EdgeAgentIdle {
|
|
continue
|
|
}
|
|
|
|
elapsed := time.Since(tunnel.LastActivity)
|
|
log.Printf("[DEBUG] [chisel,monitoring] [endpoint_id: %s] [status: %s] [status_time_seconds: %f] [message: environment tunnel monitoring]", item.Key, tunnel.Status, elapsed.Seconds())
|
|
|
|
if tunnel.Status == portainer.EdgeAgentManagementRequired && elapsed.Seconds() < requiredTimeout.Seconds() {
|
|
continue
|
|
} else if tunnel.Status == portainer.EdgeAgentManagementRequired && elapsed.Seconds() > requiredTimeout.Seconds() {
|
|
log.Printf("[DEBUG] [chisel,monitoring] [endpoint_id: %s] [status: %s] [status_time_seconds: %f] [timeout_seconds: %f] [message: REQUIRED state timeout exceeded]", item.Key, tunnel.Status, elapsed.Seconds(), requiredTimeout.Seconds())
|
|
}
|
|
|
|
if tunnel.Status == portainer.EdgeAgentActive && elapsed.Seconds() < activeTimeout.Seconds() {
|
|
continue
|
|
} else if tunnel.Status == portainer.EdgeAgentActive && elapsed.Seconds() > activeTimeout.Seconds() {
|
|
log.Printf("[DEBUG] [chisel,monitoring] [endpoint_id: %s] [status: %s] [status_time_seconds: %f] [timeout_seconds: %f] [message: ACTIVE state timeout exceeded]", item.Key, tunnel.Status, elapsed.Seconds(), activeTimeout.Seconds())
|
|
|
|
endpointID, err := strconv.Atoi(item.Key)
|
|
if err != nil {
|
|
log.Printf("[ERROR] [chisel,snapshot,conversion] Invalid environment identifier (id: %s): %s", item.Key, err)
|
|
}
|
|
|
|
err = service.snapshotEnvironment(portainer.EndpointID(endpointID), tunnel.Port)
|
|
if err != nil {
|
|
log.Printf("[ERROR] [snapshot] Unable to snapshot Edge environment (id: %s): %s", item.Key, err)
|
|
}
|
|
}
|
|
|
|
endpointID, err := strconv.Atoi(item.Key)
|
|
if err != nil {
|
|
log.Printf("[ERROR] [chisel,conversion] Invalid environment identifier (id: %s): %s", item.Key, err)
|
|
continue
|
|
}
|
|
|
|
service.SetTunnelStatusToIdle(portainer.EndpointID(endpointID))
|
|
}
|
|
}
|
|
|
|
func (service *Service) snapshotEnvironment(endpointID portainer.EndpointID, tunnelPort int) error {
|
|
endpoint, err := service.dataStore.Endpoint().Endpoint(endpointID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
endpointURL := endpoint.URL
|
|
|
|
endpoint.URL = fmt.Sprintf("tcp://127.0.0.1:%d", tunnelPort)
|
|
err = service.snapshotService.SnapshotEndpoint(endpoint)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
endpoint.URL = endpointURL
|
|
return service.dataStore.Endpoint().UpdateEndpoint(endpoint.ID, endpoint)
|
|
}
|